mirror of
https://github.com/ivuorinen/gibidify.git
synced 2026-01-26 03:24:05 +00:00
* build: update Go 1.25, CI workflows, and build tooling - Upgrade to Go 1.25 - Add benchmark targets to Makefile - Implement parallel gosec execution - Lock tool versions for reproducibility - Add shellcheck directives to scripts - Update CI workflows with improved caching * refactor: migrate from golangci-lint to revive - Replace golangci-lint with revive for linting - Configure comprehensive revive rules - Fix all EditorConfig violations - Add yamllint and yamlfmt support - Remove deprecated .golangci.yml * refactor: rename utils to shared and deduplicate code - Rename utils package to shared - Add shared constants package - Deduplicate constants across packages - Address CodeRabbit review feedback * fix: resolve SonarQube issues and add safety guards - Fix all 73 SonarQube OPEN issues - Add nil guards for resourceMonitor, backpressure, metricsCollector - Implement io.Closer for headerFileReader - Propagate errors from processing helpers - Add metrics and templates packages - Improve error handling across codebase * test: improve test infrastructure and coverage - Add benchmarks for cli, fileproc, metrics - Improve test coverage for cli, fileproc, config - Refactor tests with helper functions - Add shared test constants - Fix test function naming conventions - Reduce cognitive complexity in benchmark tests * docs: update documentation and configuration examples - Update CLAUDE.md with current project state - Refresh README with new features - Add usage and configuration examples - Add SonarQube project configuration - Consolidate config.example.yaml * fix: resolve shellcheck warnings in scripts - Use ./*.go instead of *.go to prevent dash-prefixed filenames from being interpreted as options (SC2035) - Remove unreachable return statement after exit (SC2317) - Remove obsolete gibidiutils/ directory reference * chore(deps): upgrade go dependencies * chore(lint): megalinter fixes * fix: improve test coverage and fix file descriptor leaks - Add defer r.Close() to fix pipe file descriptor leaks in benchmark tests - Refactor TestProcessorConfigureFileTypes with helper functions and assertions - Refactor TestProcessorLogFinalStats with output capture and keyword verification - Use shared constants instead of literal strings (TestFilePNG, FormatMarkdown, etc.) - Reduce cognitive complexity by extracting helper functions * fix: align test comments with function names Remove underscores from test comments to match actual function names: - benchmark/benchmark_test.go (2 fixes) - fileproc/filetypes_config_test.go (4 fixes) - fileproc/filetypes_registry_test.go (6 fixes) - fileproc/processor_test.go (6 fixes) - fileproc/resource_monitor_types_test.go (4 fixes) - fileproc/writer_test.go (3 fixes) * fix: various test improvements and bug fixes - Remove duplicate maxCacheSize check in filetypes_registry_test.go - Shorten long comment in processor_test.go to stay under 120 chars - Remove flaky time.Sleep in collector_test.go, use >= 0 assertion - Close pipe reader in benchmark_test.go to fix file descriptor leak - Use ContinueOnError in flags_test.go to match ResetFlags behavior - Add nil check for p.ui in processor_workers.go before UpdateProgress - Fix resource_monitor_validation_test.go by setting hardMemoryLimitBytes directly * chore(yaml): add missing document start markers Add --- document start to YAML files to satisfy yamllint: - .github/workflows/codeql.yml - .github/workflows/build-test-publish.yml - .github/workflows/security.yml - .github/actions/setup/action.yml * fix: guard nil resourceMonitor and fix test deadlock - Guard resourceMonitor before CreateFileProcessingContext call - Add ui.UpdateProgress on emergency stop and path error returns - Fix potential deadlock in TestProcessFile using wg.Go with defer close
96 lines
2.5 KiB
Go
96 lines
2.5 KiB
Go
package fileproc
|
|
|
|
import (
|
|
"context"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/spf13/viper"
|
|
|
|
"github.com/ivuorinen/gibidify/testutil"
|
|
)
|
|
|
|
func TestResourceMonitorConcurrentReadsLimit(t *testing.T) {
|
|
testutil.ResetViperConfig(t, "")
|
|
|
|
// Set a low concurrent reads limit for testing
|
|
viper.Set("resourceLimits.enabled", true)
|
|
viper.Set("resourceLimits.maxConcurrentReads", 2)
|
|
|
|
rm := NewResourceMonitor()
|
|
defer rm.Close()
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
|
|
defer cancel()
|
|
|
|
// First read slot should succeed
|
|
err := rm.AcquireReadSlot(ctx)
|
|
if err != nil {
|
|
t.Errorf("Expected no error for first read slot, got %v", err)
|
|
}
|
|
|
|
// Second read slot should succeed
|
|
err = rm.AcquireReadSlot(ctx)
|
|
if err != nil {
|
|
t.Errorf("Expected no error for second read slot, got %v", err)
|
|
}
|
|
|
|
// Third read slot should time out (context deadline exceeded)
|
|
err = rm.AcquireReadSlot(ctx)
|
|
if err == nil {
|
|
t.Error("Expected timeout error for third read slot, got nil")
|
|
}
|
|
|
|
// Release one slot and try again
|
|
rm.ReleaseReadSlot()
|
|
|
|
// Create new context for the next attempt
|
|
ctx2, cancel2 := context.WithTimeout(context.Background(), 100*time.Millisecond)
|
|
defer cancel2()
|
|
|
|
err = rm.AcquireReadSlot(ctx2)
|
|
if err != nil {
|
|
t.Errorf("Expected no error after releasing a slot, got %v", err)
|
|
}
|
|
|
|
// Clean up remaining slots
|
|
rm.ReleaseReadSlot()
|
|
rm.ReleaseReadSlot()
|
|
}
|
|
|
|
func TestResourceMonitorTimeoutContexts(t *testing.T) {
|
|
testutil.ResetViperConfig(t, "")
|
|
|
|
// Set short timeouts for testing
|
|
viper.Set("resourceLimits.enabled", true)
|
|
viper.Set("resourceLimits.fileProcessingTimeoutSec", 1) // 1 second
|
|
viper.Set("resourceLimits.overallTimeoutSec", 2) // 2 seconds
|
|
|
|
rm := NewResourceMonitor()
|
|
defer rm.Close()
|
|
|
|
parentCtx := context.Background()
|
|
|
|
// Test file processing context
|
|
fileCtx, fileCancel := rm.CreateFileProcessingContext(parentCtx)
|
|
defer fileCancel()
|
|
|
|
deadline, ok := fileCtx.Deadline()
|
|
if !ok {
|
|
t.Error("Expected file processing context to have a deadline")
|
|
} else if time.Until(deadline) > time.Second+100*time.Millisecond {
|
|
t.Error("File processing timeout appears to be too long")
|
|
}
|
|
|
|
// Test overall processing context
|
|
overallCtx, overallCancel := rm.CreateOverallProcessingContext(parentCtx)
|
|
defer overallCancel()
|
|
|
|
deadline, ok = overallCtx.Deadline()
|
|
if !ok {
|
|
t.Error("Expected overall processing context to have a deadline")
|
|
} else if time.Until(deadline) > 2*time.Second+100*time.Millisecond {
|
|
t.Error("Overall processing timeout appears to be too long")
|
|
}
|
|
}
|