mirror of
https://github.com/ivuorinen/gibidify.git
synced 2026-01-26 11:34:03 +00:00
* build: update Go 1.25, CI workflows, and build tooling - Upgrade to Go 1.25 - Add benchmark targets to Makefile - Implement parallel gosec execution - Lock tool versions for reproducibility - Add shellcheck directives to scripts - Update CI workflows with improved caching * refactor: migrate from golangci-lint to revive - Replace golangci-lint with revive for linting - Configure comprehensive revive rules - Fix all EditorConfig violations - Add yamllint and yamlfmt support - Remove deprecated .golangci.yml * refactor: rename utils to shared and deduplicate code - Rename utils package to shared - Add shared constants package - Deduplicate constants across packages - Address CodeRabbit review feedback * fix: resolve SonarQube issues and add safety guards - Fix all 73 SonarQube OPEN issues - Add nil guards for resourceMonitor, backpressure, metricsCollector - Implement io.Closer for headerFileReader - Propagate errors from processing helpers - Add metrics and templates packages - Improve error handling across codebase * test: improve test infrastructure and coverage - Add benchmarks for cli, fileproc, metrics - Improve test coverage for cli, fileproc, config - Refactor tests with helper functions - Add shared test constants - Fix test function naming conventions - Reduce cognitive complexity in benchmark tests * docs: update documentation and configuration examples - Update CLAUDE.md with current project state - Refresh README with new features - Add usage and configuration examples - Add SonarQube project configuration - Consolidate config.example.yaml * fix: resolve shellcheck warnings in scripts - Use ./*.go instead of *.go to prevent dash-prefixed filenames from being interpreted as options (SC2035) - Remove unreachable return statement after exit (SC2317) - Remove obsolete gibidiutils/ directory reference * chore(deps): upgrade go dependencies * chore(lint): megalinter fixes * fix: improve test coverage and fix file descriptor leaks - Add defer r.Close() to fix pipe file descriptor leaks in benchmark tests - Refactor TestProcessorConfigureFileTypes with helper functions and assertions - Refactor TestProcessorLogFinalStats with output capture and keyword verification - Use shared constants instead of literal strings (TestFilePNG, FormatMarkdown, etc.) - Reduce cognitive complexity by extracting helper functions * fix: align test comments with function names Remove underscores from test comments to match actual function names: - benchmark/benchmark_test.go (2 fixes) - fileproc/filetypes_config_test.go (4 fixes) - fileproc/filetypes_registry_test.go (6 fixes) - fileproc/processor_test.go (6 fixes) - fileproc/resource_monitor_types_test.go (4 fixes) - fileproc/writer_test.go (3 fixes) * fix: various test improvements and bug fixes - Remove duplicate maxCacheSize check in filetypes_registry_test.go - Shorten long comment in processor_test.go to stay under 120 chars - Remove flaky time.Sleep in collector_test.go, use >= 0 assertion - Close pipe reader in benchmark_test.go to fix file descriptor leak - Use ContinueOnError in flags_test.go to match ResetFlags behavior - Add nil check for p.ui in processor_workers.go before UpdateProgress - Fix resource_monitor_validation_test.go by setting hardMemoryLimitBytes directly * chore(yaml): add missing document start markers Add --- document start to YAML files to satisfy yamllint: - .github/workflows/codeql.yml - .github/workflows/build-test-publish.yml - .github/workflows/security.yml - .github/actions/setup/action.yml * fix: guard nil resourceMonitor and fix test deadlock - Guard resourceMonitor before CreateFileProcessingContext call - Add ui.UpdateProgress on emergency stop and path error returns - Fix potential deadlock in TestProcessFile using wg.Go with defer close
752 lines
19 KiB
Go
752 lines
19 KiB
Go
package main
|
|
|
|
import (
|
|
"errors"
|
|
"flag"
|
|
"io"
|
|
"os"
|
|
"runtime"
|
|
"testing"
|
|
|
|
"github.com/ivuorinen/gibidify/shared"
|
|
"github.com/ivuorinen/gibidify/testutil"
|
|
)
|
|
|
|
// Test constants to avoid goconst linting issues.
|
|
const (
|
|
testJSON = "json"
|
|
testMarkdown = "markdown"
|
|
testConcurrency = "1,2"
|
|
testAll = "all"
|
|
testCollection = "collection"
|
|
testConcurrencyT = "concurrency"
|
|
testNonExistent = "/nonexistent/path/that/should/not/exist"
|
|
testFile1 = "test1.txt"
|
|
testFile2 = "test2.txt"
|
|
testContent1 = "content1"
|
|
testContent2 = "content2"
|
|
)
|
|
|
|
func TestParseConcurrencyList(t *testing.T) {
|
|
tests := []struct {
|
|
name string
|
|
input string
|
|
want []int
|
|
wantErr bool
|
|
errContains string
|
|
}{
|
|
{
|
|
name: "valid single value",
|
|
input: "4",
|
|
want: []int{4},
|
|
wantErr: false,
|
|
},
|
|
{
|
|
name: "valid multiple values",
|
|
input: shared.TestConcurrencyList,
|
|
want: []int{1, 2, 4, 8},
|
|
wantErr: false,
|
|
},
|
|
{
|
|
name: "valid with whitespace",
|
|
input: " 1 , 2 , 4 , 8 ",
|
|
want: []int{1, 2, 4, 8},
|
|
wantErr: false,
|
|
},
|
|
{
|
|
name: "valid single large value",
|
|
input: "16",
|
|
want: []int{16},
|
|
wantErr: false,
|
|
},
|
|
{
|
|
name: "empty string",
|
|
input: "",
|
|
wantErr: true,
|
|
errContains: shared.TestMsgInvalidConcurrencyLevel,
|
|
},
|
|
{
|
|
name: "invalid number",
|
|
input: "1,abc,4",
|
|
wantErr: true,
|
|
errContains: shared.TestMsgInvalidConcurrencyLevel,
|
|
},
|
|
{
|
|
name: "zero value",
|
|
input: "1,0,4",
|
|
wantErr: true,
|
|
errContains: "concurrency level must be positive",
|
|
},
|
|
{
|
|
name: "negative value",
|
|
input: "1,-2,4",
|
|
wantErr: true,
|
|
errContains: "concurrency level must be positive",
|
|
},
|
|
{
|
|
name: "only whitespace",
|
|
input: " , , ",
|
|
wantErr: true,
|
|
errContains: shared.TestMsgInvalidConcurrencyLevel,
|
|
},
|
|
{
|
|
name: "large value list",
|
|
input: "1,2,4,8,16",
|
|
want: []int{1, 2, 4, 8, 16},
|
|
wantErr: false,
|
|
},
|
|
}
|
|
|
|
for _, tt := range tests {
|
|
t.Run(tt.name, func(t *testing.T) {
|
|
got, err := parseConcurrencyList(tt.input)
|
|
|
|
if tt.wantErr {
|
|
testutil.AssertExpectedError(t, err, "parseConcurrencyList")
|
|
if tt.errContains != "" {
|
|
testutil.AssertErrorContains(t, err, tt.errContains, "parseConcurrencyList")
|
|
}
|
|
|
|
return
|
|
}
|
|
|
|
testutil.AssertNoError(t, err, "parseConcurrencyList")
|
|
if !equalSlices(got, tt.want) {
|
|
t.Errorf("parseConcurrencyList() = %v, want %v", got, tt.want)
|
|
}
|
|
})
|
|
}
|
|
}
|
|
|
|
func TestParseFormatList(t *testing.T) {
|
|
tests := []struct {
|
|
name string
|
|
input string
|
|
want []string
|
|
}{
|
|
{
|
|
name: "single format",
|
|
input: "json",
|
|
want: []string{"json"},
|
|
},
|
|
{
|
|
name: "multiple formats",
|
|
input: shared.TestFormatList,
|
|
want: []string{"json", "yaml", "markdown"},
|
|
},
|
|
{
|
|
name: "formats with whitespace",
|
|
input: " json , yaml , markdown ",
|
|
want: []string{"json", "yaml", "markdown"},
|
|
},
|
|
{
|
|
name: "empty string",
|
|
input: "",
|
|
want: []string{},
|
|
},
|
|
{
|
|
name: "empty parts",
|
|
input: "json,,yaml",
|
|
want: []string{"json", "yaml"},
|
|
},
|
|
{
|
|
name: "only whitespace and commas",
|
|
input: " , , ",
|
|
want: []string{},
|
|
},
|
|
{
|
|
name: "single format with whitespace",
|
|
input: " markdown ",
|
|
want: []string{"markdown"},
|
|
},
|
|
{
|
|
name: "duplicate formats",
|
|
input: "json,json,yaml",
|
|
want: []string{"json", "json", "yaml"},
|
|
},
|
|
}
|
|
|
|
for _, tt := range tests {
|
|
t.Run(tt.name, func(t *testing.T) {
|
|
got := parseFormatList(tt.input)
|
|
if !equalSlices(got, tt.want) {
|
|
t.Errorf("parseFormatList() = %v, want %v", got, tt.want)
|
|
}
|
|
})
|
|
}
|
|
}
|
|
|
|
func TestGetSourceDescription(t *testing.T) {
|
|
// Save original flag values and reset after test
|
|
origSourceDir := sourceDir
|
|
origNumFiles := numFiles
|
|
defer func() {
|
|
sourceDir = origSourceDir
|
|
numFiles = origNumFiles
|
|
}()
|
|
|
|
tests := []struct {
|
|
name string
|
|
sourceDir string
|
|
numFiles int
|
|
want string
|
|
}{
|
|
{
|
|
name: "empty source directory with default files",
|
|
sourceDir: "",
|
|
numFiles: 100,
|
|
want: "temporary files (100 files)",
|
|
},
|
|
{
|
|
name: "empty source directory with custom files",
|
|
sourceDir: "",
|
|
numFiles: 50,
|
|
want: "temporary files (50 files)",
|
|
},
|
|
{
|
|
name: "non-empty source directory",
|
|
sourceDir: "/path/to/source",
|
|
numFiles: 100,
|
|
want: "/path/to/source",
|
|
},
|
|
{
|
|
name: "current directory",
|
|
sourceDir: ".",
|
|
numFiles: 100,
|
|
want: ".",
|
|
},
|
|
}
|
|
|
|
for _, tt := range tests {
|
|
t.Run(tt.name, func(t *testing.T) {
|
|
// Set flag pointers to test values
|
|
*sourceDir = tt.sourceDir
|
|
*numFiles = tt.numFiles
|
|
|
|
got := getSourceDescription()
|
|
if got != tt.want {
|
|
t.Errorf("getSourceDescription() = %v, want %v", got, tt.want)
|
|
}
|
|
})
|
|
}
|
|
}
|
|
|
|
func TestRunCollectionBenchmark(t *testing.T) {
|
|
restore := testutil.SuppressLogs(t)
|
|
defer restore()
|
|
|
|
// Save original flag values
|
|
origSourceDir := sourceDir
|
|
origNumFiles := numFiles
|
|
defer func() {
|
|
sourceDir = origSourceDir
|
|
numFiles = origNumFiles
|
|
}()
|
|
|
|
t.Run("success with temp files", func(t *testing.T) {
|
|
*sourceDir = ""
|
|
*numFiles = 10
|
|
|
|
err := runCollectionBenchmark()
|
|
testutil.AssertNoError(t, err, "runCollectionBenchmark with temp files")
|
|
})
|
|
|
|
t.Run("success with real directory", func(t *testing.T) {
|
|
tempDir := t.TempDir()
|
|
testutil.CreateTestFiles(t, tempDir, []testutil.FileSpec{
|
|
{Name: testFile1, Content: testContent1},
|
|
{Name: testFile2, Content: testContent2},
|
|
})
|
|
|
|
*sourceDir = tempDir
|
|
*numFiles = 10
|
|
|
|
err := runCollectionBenchmark()
|
|
testutil.AssertNoError(t, err, "runCollectionBenchmark with real directory")
|
|
})
|
|
}
|
|
|
|
func TestRunProcessingBenchmark(t *testing.T) {
|
|
restore := testutil.SuppressLogs(t)
|
|
defer restore()
|
|
|
|
// Save original flag values
|
|
origSourceDir := sourceDir
|
|
origFormat := format
|
|
origConcurrency := concurrency
|
|
defer func() {
|
|
sourceDir = origSourceDir
|
|
format = origFormat
|
|
concurrency = origConcurrency
|
|
}()
|
|
|
|
t.Run("success with json format", func(t *testing.T) {
|
|
tempDir := t.TempDir()
|
|
testutil.CreateTestFiles(t, tempDir, []testutil.FileSpec{
|
|
{Name: testFile1, Content: testContent1},
|
|
{Name: testFile2, Content: testContent2},
|
|
})
|
|
|
|
*sourceDir = tempDir
|
|
*format = testJSON
|
|
*concurrency = 2
|
|
|
|
err := runProcessingBenchmark()
|
|
testutil.AssertNoError(t, err, "runProcessingBenchmark with json")
|
|
})
|
|
|
|
t.Run("success with markdown format", func(t *testing.T) {
|
|
tempDir := t.TempDir()
|
|
testutil.CreateTestFiles(t, tempDir, []testutil.FileSpec{
|
|
{Name: testFile1, Content: testContent1},
|
|
})
|
|
|
|
*sourceDir = tempDir
|
|
*format = testMarkdown
|
|
*concurrency = 1
|
|
|
|
err := runProcessingBenchmark()
|
|
testutil.AssertNoError(t, err, "runProcessingBenchmark with markdown")
|
|
})
|
|
}
|
|
|
|
func TestRunConcurrencyBenchmark(t *testing.T) {
|
|
restore := testutil.SuppressLogs(t)
|
|
defer restore()
|
|
|
|
// Save original flag values
|
|
origSourceDir := sourceDir
|
|
origFormat := format
|
|
origConcurrencyList := concurrencyList
|
|
defer func() {
|
|
sourceDir = origSourceDir
|
|
format = origFormat
|
|
concurrencyList = origConcurrencyList
|
|
}()
|
|
|
|
t.Run("success with valid concurrency list", func(t *testing.T) {
|
|
tempDir := t.TempDir()
|
|
testutil.CreateTestFiles(t, tempDir, []testutil.FileSpec{
|
|
{Name: testFile1, Content: testContent1},
|
|
})
|
|
|
|
*sourceDir = tempDir
|
|
*format = testJSON
|
|
*concurrencyList = testConcurrency
|
|
|
|
err := runConcurrencyBenchmark()
|
|
testutil.AssertNoError(t, err, "runConcurrencyBenchmark")
|
|
})
|
|
|
|
t.Run("error with invalid concurrency list", func(t *testing.T) {
|
|
tempDir := t.TempDir()
|
|
*sourceDir = tempDir
|
|
*format = testJSON
|
|
*concurrencyList = "invalid"
|
|
|
|
err := runConcurrencyBenchmark()
|
|
testutil.AssertExpectedError(t, err, "runConcurrencyBenchmark with invalid list")
|
|
testutil.AssertErrorContains(t, err, "invalid concurrency list", "runConcurrencyBenchmark")
|
|
})
|
|
}
|
|
|
|
func TestRunFormatBenchmark(t *testing.T) {
|
|
restore := testutil.SuppressLogs(t)
|
|
defer restore()
|
|
|
|
// Save original flag values
|
|
origSourceDir := sourceDir
|
|
origFormatList := formatList
|
|
defer func() {
|
|
sourceDir = origSourceDir
|
|
formatList = origFormatList
|
|
}()
|
|
|
|
t.Run("success with valid format list", func(t *testing.T) {
|
|
tempDir := t.TempDir()
|
|
testutil.CreateTestFiles(t, tempDir, []testutil.FileSpec{
|
|
{Name: testFile1, Content: testContent1},
|
|
})
|
|
|
|
*sourceDir = tempDir
|
|
*formatList = "json,yaml"
|
|
|
|
err := runFormatBenchmark()
|
|
testutil.AssertNoError(t, err, "runFormatBenchmark")
|
|
})
|
|
|
|
t.Run("success with single format", func(t *testing.T) {
|
|
tempDir := t.TempDir()
|
|
testutil.CreateTestFiles(t, tempDir, []testutil.FileSpec{
|
|
{Name: testFile1, Content: testContent1},
|
|
})
|
|
|
|
*sourceDir = tempDir
|
|
*formatList = testMarkdown
|
|
|
|
err := runFormatBenchmark()
|
|
testutil.AssertNoError(t, err, "runFormatBenchmark with single format")
|
|
})
|
|
}
|
|
|
|
func TestRunBenchmarks(t *testing.T) {
|
|
restore := testutil.SuppressLogs(t)
|
|
defer restore()
|
|
|
|
// Save original flag values
|
|
origBenchmarkType := benchmarkType
|
|
origSourceDir := sourceDir
|
|
origConcurrencyList := concurrencyList
|
|
origFormatList := formatList
|
|
defer func() {
|
|
benchmarkType = origBenchmarkType
|
|
sourceDir = origSourceDir
|
|
concurrencyList = origConcurrencyList
|
|
formatList = origFormatList
|
|
}()
|
|
|
|
tempDir := t.TempDir()
|
|
testutil.CreateTestFiles(t, tempDir, []testutil.FileSpec{
|
|
{Name: testFile1, Content: testContent1},
|
|
})
|
|
|
|
tests := []struct {
|
|
name string
|
|
benchmarkType string
|
|
wantErr bool
|
|
errContains string
|
|
}{
|
|
{
|
|
name: "all benchmarks",
|
|
benchmarkType: "all",
|
|
wantErr: false,
|
|
},
|
|
{
|
|
name: "collection benchmark",
|
|
benchmarkType: "collection",
|
|
wantErr: false,
|
|
},
|
|
{
|
|
name: "processing benchmark",
|
|
benchmarkType: "processing",
|
|
wantErr: false,
|
|
},
|
|
{
|
|
name: "concurrency benchmark",
|
|
benchmarkType: "concurrency",
|
|
wantErr: false,
|
|
},
|
|
{
|
|
name: "format benchmark",
|
|
benchmarkType: "format",
|
|
wantErr: false,
|
|
},
|
|
{
|
|
name: "invalid benchmark type",
|
|
benchmarkType: "invalid",
|
|
wantErr: true,
|
|
errContains: "invalid benchmark type",
|
|
},
|
|
}
|
|
|
|
for _, tt := range tests {
|
|
t.Run(tt.name, func(t *testing.T) {
|
|
*benchmarkType = tt.benchmarkType
|
|
*sourceDir = tempDir
|
|
*concurrencyList = testConcurrency
|
|
*formatList = testMarkdown
|
|
|
|
err := runBenchmarks()
|
|
|
|
if tt.wantErr {
|
|
testutil.AssertExpectedError(t, err, "runBenchmarks")
|
|
if tt.errContains != "" {
|
|
testutil.AssertErrorContains(t, err, tt.errContains, "runBenchmarks")
|
|
}
|
|
} else {
|
|
testutil.AssertNoError(t, err, "runBenchmarks")
|
|
}
|
|
})
|
|
}
|
|
}
|
|
|
|
func TestMainFunction(t *testing.T) {
|
|
restore := testutil.SuppressLogs(t)
|
|
defer restore()
|
|
|
|
// We can't easily test main() directly due to os.Exit calls,
|
|
// but we can test runBenchmarks() which contains the main logic
|
|
tempDir := t.TempDir()
|
|
testutil.CreateTestFiles(t, tempDir, []testutil.FileSpec{
|
|
{Name: testFile1, Content: testContent1},
|
|
})
|
|
|
|
// Save original flag values
|
|
origBenchmarkType := benchmarkType
|
|
origSourceDir := sourceDir
|
|
defer func() {
|
|
benchmarkType = origBenchmarkType
|
|
sourceDir = origSourceDir
|
|
}()
|
|
|
|
*benchmarkType = testCollection
|
|
*sourceDir = tempDir
|
|
|
|
err := runBenchmarks()
|
|
testutil.AssertNoError(t, err, "runBenchmarks through main logic path")
|
|
}
|
|
|
|
func TestFlagInitialization(t *testing.T) {
|
|
// Test that flags are properly initialized with expected defaults
|
|
resetFlags()
|
|
|
|
if *sourceDir != "" {
|
|
t.Errorf("sourceDir default should be empty, got %v", *sourceDir)
|
|
}
|
|
if *benchmarkType != testAll {
|
|
t.Errorf("benchmarkType default should be 'all', got %v", *benchmarkType)
|
|
}
|
|
if *format != testJSON {
|
|
t.Errorf("format default should be 'json', got %v", *format)
|
|
}
|
|
if *concurrency != runtime.NumCPU() {
|
|
t.Errorf("concurrency default should be %d, got %d", runtime.NumCPU(), *concurrency)
|
|
}
|
|
if *concurrencyList != shared.TestConcurrencyList {
|
|
t.Errorf("concurrencyList default should be '%s', got %v", shared.TestConcurrencyList, *concurrencyList)
|
|
}
|
|
if *formatList != shared.TestFormatList {
|
|
t.Errorf("formatList default should be '%s', got %v", shared.TestFormatList, *formatList)
|
|
}
|
|
if *numFiles != 100 {
|
|
t.Errorf("numFiles default should be 100, got %d", *numFiles)
|
|
}
|
|
}
|
|
|
|
func TestErrorPropagation(t *testing.T) {
|
|
restore := testutil.SuppressLogs(t)
|
|
defer restore()
|
|
|
|
// Save original flag values
|
|
origBenchmarkType := benchmarkType
|
|
origSourceDir := sourceDir
|
|
origConcurrencyList := concurrencyList
|
|
defer func() {
|
|
benchmarkType = origBenchmarkType
|
|
sourceDir = origSourceDir
|
|
concurrencyList = origConcurrencyList
|
|
}()
|
|
|
|
tempDir := t.TempDir()
|
|
|
|
t.Run("error from concurrency benchmark propagates", func(t *testing.T) {
|
|
*benchmarkType = testConcurrencyT
|
|
*sourceDir = tempDir
|
|
*concurrencyList = "invalid,list"
|
|
|
|
err := runBenchmarks()
|
|
testutil.AssertExpectedError(t, err, "runBenchmarks with invalid concurrency")
|
|
testutil.AssertErrorContains(t, err, "invalid concurrency list", "runBenchmarks error propagation")
|
|
})
|
|
|
|
t.Run("validation error contains proper error type", func(t *testing.T) {
|
|
*benchmarkType = "invalid-type"
|
|
*sourceDir = tempDir
|
|
|
|
err := runBenchmarks()
|
|
testutil.AssertExpectedError(t, err, "runBenchmarks with invalid type")
|
|
|
|
var validationErr *shared.StructuredError
|
|
if !errors.As(err, &validationErr) {
|
|
t.Errorf("Expected StructuredError, got %T", err)
|
|
} else if validationErr.Code != shared.CodeValidationFormat {
|
|
t.Errorf("Expected validation format error code, got %v", validationErr.Code)
|
|
}
|
|
})
|
|
|
|
t.Run("empty levels array returns error", func(t *testing.T) {
|
|
// Test the specific case where all parts are empty after trimming
|
|
_, err := parseConcurrencyList(" , , ")
|
|
testutil.AssertExpectedError(t, err, "parseConcurrencyList with all empty parts")
|
|
testutil.AssertErrorContains(t, err, shared.TestMsgInvalidConcurrencyLevel, "parseConcurrencyList empty levels")
|
|
})
|
|
|
|
t.Run("single empty part returns error", func(t *testing.T) {
|
|
// Test case that should never reach the "no valid levels found" condition
|
|
_, err := parseConcurrencyList(" ")
|
|
testutil.AssertExpectedError(t, err, "parseConcurrencyList with single empty part")
|
|
testutil.AssertErrorContains(
|
|
t, err, shared.TestMsgInvalidConcurrencyLevel, "parseConcurrencyList single empty part",
|
|
)
|
|
})
|
|
|
|
t.Run("benchmark function error paths", func(t *testing.T) {
|
|
// Test with non-existent source directory to trigger error paths
|
|
nonExistentDir := testNonExistent
|
|
|
|
*benchmarkType = testCollection
|
|
*sourceDir = nonExistentDir
|
|
|
|
// This should fail as the benchmark package cannot access non-existent directories
|
|
err := runBenchmarks()
|
|
testutil.AssertExpectedError(t, err, "runBenchmarks with non-existent directory")
|
|
testutil.AssertErrorContains(t, err, "file collection benchmark failed",
|
|
"runBenchmarks error contains expected message")
|
|
})
|
|
|
|
t.Run("processing benchmark error path", func(t *testing.T) {
|
|
// Test error path for processing benchmark
|
|
nonExistentDir := testNonExistent
|
|
|
|
*benchmarkType = "processing"
|
|
*sourceDir = nonExistentDir
|
|
*format = "json"
|
|
*concurrency = 1
|
|
|
|
err := runBenchmarks()
|
|
testutil.AssertExpectedError(t, err, "runBenchmarks processing with non-existent directory")
|
|
testutil.AssertErrorContains(t, err, "file processing benchmark failed", "runBenchmarks processing error")
|
|
})
|
|
|
|
t.Run("concurrency benchmark error path", func(t *testing.T) {
|
|
// Test error path for concurrency benchmark
|
|
nonExistentDir := testNonExistent
|
|
|
|
*benchmarkType = testConcurrencyT
|
|
*sourceDir = nonExistentDir
|
|
*format = "json"
|
|
*concurrencyList = "1,2"
|
|
|
|
err := runBenchmarks()
|
|
testutil.AssertExpectedError(t, err, "runBenchmarks concurrency with non-existent directory")
|
|
testutil.AssertErrorContains(t, err, "concurrency benchmark failed", "runBenchmarks concurrency error")
|
|
})
|
|
|
|
t.Run("format benchmark error path", func(t *testing.T) {
|
|
// Test error path for format benchmark
|
|
nonExistentDir := testNonExistent
|
|
|
|
*benchmarkType = "format"
|
|
*sourceDir = nonExistentDir
|
|
*formatList = "json,yaml"
|
|
|
|
err := runBenchmarks()
|
|
testutil.AssertExpectedError(t, err, "runBenchmarks format with non-existent directory")
|
|
testutil.AssertErrorContains(t, err, "format benchmark failed", "runBenchmarks format error")
|
|
})
|
|
|
|
t.Run("all benchmarks error path", func(t *testing.T) {
|
|
// Test error path for all benchmarks
|
|
nonExistentDir := testNonExistent
|
|
|
|
*benchmarkType = "all"
|
|
*sourceDir = nonExistentDir
|
|
|
|
err := runBenchmarks()
|
|
testutil.AssertExpectedError(t, err, "runBenchmarks all with non-existent directory")
|
|
testutil.AssertErrorContains(t, err, "benchmark failed", "runBenchmarks all error")
|
|
})
|
|
}
|
|
|
|
// Benchmark functions
|
|
|
|
// BenchmarkParseConcurrencyList benchmarks the parsing of concurrency lists.
|
|
func BenchmarkParseConcurrencyList(b *testing.B) {
|
|
benchmarks := []struct {
|
|
name string
|
|
input string
|
|
}{
|
|
{
|
|
name: "single value",
|
|
input: "4",
|
|
},
|
|
{
|
|
name: "multiple values",
|
|
input: "1,2,4,8",
|
|
},
|
|
{
|
|
name: "values with whitespace",
|
|
input: " 1 , 2 , 4 , 8 , 16 ",
|
|
},
|
|
{
|
|
name: "large list",
|
|
input: "1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16",
|
|
},
|
|
}
|
|
|
|
for _, bm := range benchmarks {
|
|
b.Run(bm.name, func(b *testing.B) {
|
|
b.ReportAllocs()
|
|
for i := 0; i < b.N; i++ {
|
|
_, _ = parseConcurrencyList(bm.input)
|
|
}
|
|
})
|
|
}
|
|
}
|
|
|
|
// BenchmarkParseFormatList benchmarks the parsing of format lists.
|
|
func BenchmarkParseFormatList(b *testing.B) {
|
|
benchmarks := []struct {
|
|
name string
|
|
input string
|
|
}{
|
|
{
|
|
name: "single format",
|
|
input: "json",
|
|
},
|
|
{
|
|
name: "multiple formats",
|
|
input: shared.TestFormatList,
|
|
},
|
|
{
|
|
name: "formats with whitespace",
|
|
input: " json , yaml , markdown , xml , toml ",
|
|
},
|
|
{
|
|
name: "large list",
|
|
input: "json,yaml,markdown,xml,toml,csv,tsv,html,txt,log",
|
|
},
|
|
}
|
|
|
|
for _, bm := range benchmarks {
|
|
b.Run(bm.name, func(b *testing.B) {
|
|
b.ReportAllocs()
|
|
for i := 0; i < b.N; i++ {
|
|
_ = parseFormatList(bm.input)
|
|
}
|
|
})
|
|
}
|
|
}
|
|
|
|
// Helper functions
|
|
|
|
// equalSlices compares two slices for equality.
|
|
func equalSlices[T comparable](a, b []T) bool {
|
|
if len(a) != len(b) {
|
|
return false
|
|
}
|
|
for i := range a {
|
|
if a[i] != b[i] {
|
|
return false
|
|
}
|
|
}
|
|
|
|
return true
|
|
}
|
|
|
|
// resetFlags resets flag variables to their defaults for testing.
|
|
func resetFlags() {
|
|
flag.CommandLine = flag.NewFlagSet(os.Args[0], flag.ContinueOnError)
|
|
flag.CommandLine.SetOutput(io.Discard)
|
|
// Reinitialize the flags
|
|
sourceDir = flag.String("source", "", "Source directory to benchmark (uses temp files if empty)")
|
|
benchmarkType = flag.String("type", "all", "Benchmark type: all, collection, processing, concurrency, format")
|
|
format = flag.String("format", "json", "Output format for processing benchmarks")
|
|
concurrency = flag.Int("concurrency", runtime.NumCPU(), "Concurrency level for processing benchmarks")
|
|
concurrencyList = flag.String(
|
|
"concurrency-list", shared.TestConcurrencyList, "Comma-separated list of concurrency levels",
|
|
)
|
|
formatList = flag.String("format-list", shared.TestFormatList, "Comma-separated list of formats")
|
|
numFiles = flag.Int("files", 100, "Number of files to create for benchmarks")
|
|
}
|