chore: modernize workflows, security scanning, and linting configuration (#50)

* build: update Go 1.25, CI workflows, and build tooling

- Upgrade to Go 1.25
- Add benchmark targets to Makefile
- Implement parallel gosec execution
- Lock tool versions for reproducibility
- Add shellcheck directives to scripts
- Update CI workflows with improved caching

* refactor: migrate from golangci-lint to revive

- Replace golangci-lint with revive for linting
- Configure comprehensive revive rules
- Fix all EditorConfig violations
- Add yamllint and yamlfmt support
- Remove deprecated .golangci.yml

* refactor: rename utils to shared and deduplicate code

- Rename utils package to shared
- Add shared constants package
- Deduplicate constants across packages
- Address CodeRabbit review feedback

* fix: resolve SonarQube issues and add safety guards

- Fix all 73 SonarQube OPEN issues
- Add nil guards for resourceMonitor, backpressure, metricsCollector
- Implement io.Closer for headerFileReader
- Propagate errors from processing helpers
- Add metrics and templates packages
- Improve error handling across codebase

* test: improve test infrastructure and coverage

- Add benchmarks for cli, fileproc, metrics
- Improve test coverage for cli, fileproc, config
- Refactor tests with helper functions
- Add shared test constants
- Fix test function naming conventions
- Reduce cognitive complexity in benchmark tests

* docs: update documentation and configuration examples

- Update CLAUDE.md with current project state
- Refresh README with new features
- Add usage and configuration examples
- Add SonarQube project configuration
- Consolidate config.example.yaml

* fix: resolve shellcheck warnings in scripts

- Use ./*.go instead of *.go to prevent dash-prefixed filenames
  from being interpreted as options (SC2035)
- Remove unreachable return statement after exit (SC2317)
- Remove obsolete gibidiutils/ directory reference

* chore(deps): upgrade go dependencies

* chore(lint): megalinter fixes

* fix: improve test coverage and fix file descriptor leaks

- Add defer r.Close() to fix pipe file descriptor leaks in benchmark tests
- Refactor TestProcessorConfigureFileTypes with helper functions and assertions
- Refactor TestProcessorLogFinalStats with output capture and keyword verification
- Use shared constants instead of literal strings (TestFilePNG, FormatMarkdown, etc.)
- Reduce cognitive complexity by extracting helper functions

* fix: align test comments with function names

Remove underscores from test comments to match actual function names:
- benchmark/benchmark_test.go (2 fixes)
- fileproc/filetypes_config_test.go (4 fixes)
- fileproc/filetypes_registry_test.go (6 fixes)
- fileproc/processor_test.go (6 fixes)
- fileproc/resource_monitor_types_test.go (4 fixes)
- fileproc/writer_test.go (3 fixes)

* fix: various test improvements and bug fixes

- Remove duplicate maxCacheSize check in filetypes_registry_test.go
- Shorten long comment in processor_test.go to stay under 120 chars
- Remove flaky time.Sleep in collector_test.go, use >= 0 assertion
- Close pipe reader in benchmark_test.go to fix file descriptor leak
- Use ContinueOnError in flags_test.go to match ResetFlags behavior
- Add nil check for p.ui in processor_workers.go before UpdateProgress
- Fix resource_monitor_validation_test.go by setting hardMemoryLimitBytes directly

* chore(yaml): add missing document start markers

Add --- document start to YAML files to satisfy yamllint:
- .github/workflows/codeql.yml
- .github/workflows/build-test-publish.yml
- .github/workflows/security.yml
- .github/actions/setup/action.yml

* fix: guard nil resourceMonitor and fix test deadlock

- Guard resourceMonitor before CreateFileProcessingContext call
- Add ui.UpdateProgress on emergency stop and path error returns
- Fix potential deadlock in TestProcessFile using wg.Go with defer close
This commit is contained in:
2025-12-10 19:07:11 +02:00
committed by GitHub
parent ea4a39a360
commit 95b7ef6dd3
149 changed files with 22990 additions and 8976 deletions

View File

@@ -12,7 +12,7 @@ import (
"github.com/ivuorinen/gibidify/config"
"github.com/ivuorinen/gibidify/fileproc"
"github.com/ivuorinen/gibidify/gibidiutils"
"github.com/ivuorinen/gibidify/shared"
)
// Result represents the results of a benchmark run.
@@ -48,6 +48,46 @@ type Suite struct {
Results []Result
}
// buildBenchmarkResult constructs a Result with all metrics calculated.
// This eliminates code duplication across benchmark functions.
func buildBenchmarkResult(
name string,
files []string,
totalBytes int64,
duration time.Duration,
memBefore, memAfter runtime.MemStats,
) *Result {
result := &Result{
Name: name,
Duration: duration,
FilesProcessed: len(files),
BytesProcessed: totalBytes,
}
// Calculate rates with zero-division guard
secs := duration.Seconds()
if secs == 0 {
result.FilesPerSecond = 0
result.BytesPerSecond = 0
} else {
result.FilesPerSecond = float64(len(files)) / secs
result.BytesPerSecond = float64(totalBytes) / secs
}
result.MemoryUsage = MemoryStats{
AllocMB: shared.SafeMemoryDiffMB(memAfter.Alloc, memBefore.Alloc),
SysMB: shared.SafeMemoryDiffMB(memAfter.Sys, memBefore.Sys),
NumGC: memAfter.NumGC - memBefore.NumGC,
PauseTotalNs: memAfter.PauseTotalNs - memBefore.PauseTotalNs,
}
result.CPUUsage = CPUStats{
Goroutines: runtime.NumGoroutine(),
}
return result
}
// FileCollectionBenchmark benchmarks file collection operations.
func FileCollectionBenchmark(sourceDir string, numFiles int) (*Result, error) {
// Load configuration to ensure proper file filtering
@@ -58,14 +98,15 @@ func FileCollectionBenchmark(sourceDir string, numFiles int) (*Result, error) {
if sourceDir == "" {
tempDir, cleanupFunc, err := createBenchmarkFiles(numFiles)
if err != nil {
return nil, gibidiutils.WrapError(
return nil, shared.WrapError(
err,
gibidiutils.ErrorTypeFileSystem,
gibidiutils.CodeFSAccess,
"failed to create benchmark files",
shared.ErrorTypeFileSystem,
shared.CodeFSAccess,
shared.BenchmarkMsgFailedToCreateFiles,
)
}
cleanup = cleanupFunc
//nolint:errcheck // Benchmark output, errors don't affect results
defer cleanup()
sourceDir = tempDir
}
@@ -79,11 +120,11 @@ func FileCollectionBenchmark(sourceDir string, numFiles int) (*Result, error) {
// Run the file collection benchmark
files, err := fileproc.CollectFiles(sourceDir)
if err != nil {
return nil, gibidiutils.WrapError(
return nil, shared.WrapError(
err,
gibidiutils.ErrorTypeProcessing,
gibidiutils.CodeProcessingCollection,
"benchmark file collection failed",
shared.ErrorTypeProcessing,
shared.CodeProcessingCollection,
shared.BenchmarkMsgCollectionFailed,
)
}
@@ -101,30 +142,11 @@ func FileCollectionBenchmark(sourceDir string, numFiles int) (*Result, error) {
}
}
result := &Result{
Name: "FileCollection",
Duration: duration,
FilesProcessed: len(files),
BytesProcessed: totalBytes,
FilesPerSecond: float64(len(files)) / duration.Seconds(),
BytesPerSecond: float64(totalBytes) / duration.Seconds(),
MemoryUsage: MemoryStats{
AllocMB: float64(memAfter.Alloc-memBefore.Alloc) / 1024 / 1024,
SysMB: float64(memAfter.Sys-memBefore.Sys) / 1024 / 1024,
NumGC: memAfter.NumGC - memBefore.NumGC,
PauseTotalNs: memAfter.PauseTotalNs - memBefore.PauseTotalNs,
},
CPUUsage: CPUStats{
Goroutines: runtime.NumGoroutine(),
},
}
result := buildBenchmarkResult("FileCollection", files, totalBytes, duration, memBefore, memAfter)
return result, nil
}
// FileProcessingBenchmark benchmarks full file processing pipeline.
//
//revive:disable-next-line:function-length
func FileProcessingBenchmark(sourceDir string, format string, concurrency int) (*Result, error) {
// Load configuration to ensure proper file filtering
config.LoadConfig()
@@ -132,16 +154,17 @@ func FileProcessingBenchmark(sourceDir string, format string, concurrency int) (
var cleanup func()
if sourceDir == "" {
// Create temporary directory with test files
tempDir, cleanupFunc, err := createBenchmarkFiles(100)
tempDir, cleanupFunc, err := createBenchmarkFiles(shared.BenchmarkDefaultFileCount)
if err != nil {
return nil, gibidiutils.WrapError(
return nil, shared.WrapError(
err,
gibidiutils.ErrorTypeFileSystem,
gibidiutils.CodeFSAccess,
"failed to create benchmark files",
shared.ErrorTypeFileSystem,
shared.CodeFSAccess,
shared.BenchmarkMsgFailedToCreateFiles,
)
}
cleanup = cleanupFunc
//nolint:errcheck // Benchmark output, errors don't affect results
defer cleanup()
sourceDir = tempDir
}
@@ -149,21 +172,21 @@ func FileProcessingBenchmark(sourceDir string, format string, concurrency int) (
// Create temporary output file
outputFile, err := os.CreateTemp("", "benchmark_output_*."+format)
if err != nil {
return nil, gibidiutils.WrapError(
return nil, shared.WrapError(
err,
gibidiutils.ErrorTypeIO,
gibidiutils.CodeIOFileCreate,
shared.ErrorTypeIO,
shared.CodeIOFileCreate,
"failed to create benchmark output file",
)
}
defer func() {
if err := outputFile.Close(); err != nil {
// Log error but don't fail the benchmark
fmt.Printf("Warning: failed to close benchmark output file: %v\n", err)
//nolint:errcheck // Warning message in defer, failure doesn't affect benchmark
_, _ = fmt.Printf("Warning: failed to close benchmark output file: %v\n", err)
}
if err := os.Remove(outputFile.Name()); err != nil {
// Log error but don't fail the benchmark
fmt.Printf("Warning: failed to remove benchmark output file: %v\n", err)
//nolint:errcheck // Warning message in defer, failure doesn't affect benchmark
_, _ = fmt.Printf("Warning: failed to remove benchmark output file: %v\n", err)
}
}()
@@ -176,27 +199,21 @@ func FileProcessingBenchmark(sourceDir string, format string, concurrency int) (
// Run the full processing pipeline
files, err := fileproc.CollectFiles(sourceDir)
if err != nil {
return nil, gibidiutils.WrapError(
return nil, shared.WrapError(
err,
gibidiutils.ErrorTypeProcessing,
gibidiutils.CodeProcessingCollection,
"benchmark file collection failed",
shared.ErrorTypeProcessing,
shared.CodeProcessingCollection,
shared.BenchmarkMsgCollectionFailed,
)
}
// Process files with concurrency
err = runProcessingPipeline(context.Background(), processingConfig{
files: files,
outputFile: outputFile,
format: format,
concurrency: concurrency,
sourceDir: sourceDir,
})
err = runProcessingPipeline(context.Background(), files, outputFile, format, concurrency, sourceDir)
if err != nil {
return nil, gibidiutils.WrapError(
return nil, shared.WrapError(
err,
gibidiutils.ErrorTypeProcessing,
gibidiutils.CodeProcessingFileRead,
shared.ErrorTypeProcessing,
shared.CodeProcessingFileRead,
"benchmark processing pipeline failed",
)
}
@@ -215,24 +232,8 @@ func FileProcessingBenchmark(sourceDir string, format string, concurrency int) (
}
}
result := &Result{
Name: fmt.Sprintf("FileProcessing_%s_c%d", format, concurrency),
Duration: duration,
FilesProcessed: len(files),
BytesProcessed: totalBytes,
FilesPerSecond: float64(len(files)) / duration.Seconds(),
BytesPerSecond: float64(totalBytes) / duration.Seconds(),
MemoryUsage: MemoryStats{
AllocMB: float64(memAfter.Alloc-memBefore.Alloc) / 1024 / 1024,
SysMB: float64(memAfter.Sys-memBefore.Sys) / 1024 / 1024,
NumGC: memAfter.NumGC - memBefore.NumGC,
PauseTotalNs: memAfter.PauseTotalNs - memBefore.PauseTotalNs,
},
CPUUsage: CPUStats{
Goroutines: runtime.NumGoroutine(),
},
}
benchmarkName := fmt.Sprintf("FileProcessing_%s_c%d", format, concurrency)
result := buildBenchmarkResult(benchmarkName, files, totalBytes, duration, memBefore, memAfter)
return result, nil
}
@@ -246,10 +247,10 @@ func ConcurrencyBenchmark(sourceDir string, format string, concurrencyLevels []i
for _, concurrency := range concurrencyLevels {
result, err := FileProcessingBenchmark(sourceDir, format, concurrency)
if err != nil {
return nil, gibidiutils.WrapErrorf(
return nil, shared.WrapErrorf(
err,
gibidiutils.ErrorTypeProcessing,
gibidiutils.CodeProcessingCollection,
shared.ErrorTypeProcessing,
shared.CodeProcessingCollection,
"concurrency benchmark failed for level %d",
concurrency,
)
@@ -270,10 +271,10 @@ func FormatBenchmark(sourceDir string, formats []string) (*Suite, error) {
for _, format := range formats {
result, err := FileProcessingBenchmark(sourceDir, format, runtime.NumCPU())
if err != nil {
return nil, gibidiutils.WrapErrorf(
return nil, shared.WrapErrorf(
err,
gibidiutils.ErrorTypeProcessing,
gibidiutils.CodeProcessingCollection,
shared.ErrorTypeProcessing,
shared.CodeProcessingCollection,
"format benchmark failed for format %s",
format,
)
@@ -288,18 +289,18 @@ func FormatBenchmark(sourceDir string, formats []string) (*Suite, error) {
func createBenchmarkFiles(numFiles int) (string, func(), error) {
tempDir, err := os.MkdirTemp("", "gibidify_benchmark_*")
if err != nil {
return "", nil, gibidiutils.WrapError(
return "", nil, shared.WrapError(
err,
gibidiutils.ErrorTypeFileSystem,
gibidiutils.CodeFSAccess,
shared.ErrorTypeFileSystem,
shared.CodeFSAccess,
"failed to create temp directory",
)
}
cleanup := func() {
if err := os.RemoveAll(tempDir); err != nil {
// Log error but don't fail the benchmark
fmt.Printf("Warning: failed to remove benchmark temp directory: %v\n", err)
//nolint:errcheck // Warning message in cleanup, failure doesn't affect benchmark
_, _ = fmt.Printf("Warning: failed to remove benchmark temp directory: %v\n", err)
}
}
@@ -313,12 +314,13 @@ func createBenchmarkFiles(numFiles int) (string, func(), error) {
{".py", "print('Hello, World!')"},
{
".java",
"public class Hello {\n\tpublic static void main(String[] args) {" +
"\n\t\tSystem.out.println(\"Hello, World!\");\n\t}\n}",
"public class Hello {\n\tpublic static void main(String[] args) {\n\t" +
"\tSystem.out.println(\"Hello, World!\");\n\t}\n}",
},
{
".cpp",
"#include <iostream>\n\nint main() {\n\tstd::cout << \"Hello, World!\" << std::endl;\n\treturn 0;\n}",
"#include <iostream>\n\n" +
"int main() {\n\tstd::cout << \"Hello, World!\" << std::endl;\n\treturn 0;\n}",
},
{".rs", "fn main() {\n\tprintln!(\"Hello, World!\");\n}"},
{".rb", "puts 'Hello, World!'"},
@@ -336,10 +338,11 @@ func createBenchmarkFiles(numFiles int) (string, func(), error) {
subdir := filepath.Join(tempDir, fmt.Sprintf("subdir_%d", i/10))
if err := os.MkdirAll(subdir, 0o750); err != nil {
cleanup()
return "", nil, gibidiutils.WrapError(
return "", nil, shared.WrapError(
err,
gibidiutils.ErrorTypeFileSystem,
gibidiutils.CodeFSAccess,
shared.ErrorTypeFileSystem,
shared.CodeFSAccess,
"failed to create subdirectory",
)
}
@@ -356,11 +359,9 @@ func createBenchmarkFiles(numFiles int) (string, func(), error) {
if err := os.WriteFile(filename, []byte(content), 0o600); err != nil {
cleanup()
return "", nil, gibidiutils.WrapError(
err,
gibidiutils.ErrorTypeIO,
gibidiutils.CodeIOFileWrite,
"failed to write benchmark file",
return "", nil, shared.WrapError(
err, shared.ErrorTypeIO, shared.CodeIOFileWrite, "failed to write benchmark file",
)
}
}
@@ -369,41 +370,40 @@ func createBenchmarkFiles(numFiles int) (string, func(), error) {
}
// runProcessingPipeline runs the processing pipeline similar to main.go.
// processingConfig holds configuration for processing pipeline.
type processingConfig struct {
files []string
outputFile *os.File
format string
concurrency int
sourceDir string
}
func runProcessingPipeline(
ctx context.Context,
files []string,
outputFile *os.File,
format string,
concurrency int,
sourceDir string,
) error {
// Guard against invalid concurrency to prevent deadlocks
if concurrency < 1 {
concurrency = 1
}
func runProcessingPipeline(ctx context.Context, config processingConfig) error {
fileCh := make(chan string, config.concurrency)
writeCh := make(chan fileproc.WriteRequest, config.concurrency)
fileCh := make(chan string, concurrency)
writeCh := make(chan fileproc.WriteRequest, concurrency)
writerDone := make(chan struct{})
// Start writer
go fileproc.StartWriter(config.outputFile, writeCh, writerDone, fileproc.WriterConfig{
Format: config.format,
Prefix: "",
Suffix: "",
})
go fileproc.StartWriter(outputFile, writeCh, writerDone, format, "", "")
// Get absolute path once
absRoot, err := gibidiutils.GetAbsolutePath(config.sourceDir)
absRoot, err := shared.AbsolutePath(sourceDir)
if err != nil {
return gibidiutils.WrapError(
return shared.WrapError(
err,
gibidiutils.ErrorTypeFileSystem,
gibidiutils.CodeFSPathResolution,
shared.ErrorTypeFileSystem,
shared.CodeFSPathResolution,
"failed to get absolute path for source directory",
)
}
// Start workers with proper synchronization
var workersDone sync.WaitGroup
for i := 0; i < config.concurrency; i++ {
for i := 0; i < concurrency; i++ {
workersDone.Add(1)
go func() {
defer workersDone.Done()
@@ -414,14 +414,15 @@ func runProcessingPipeline(ctx context.Context, config processingConfig) error {
}
// Send files to workers
for _, file := range config.files {
for _, file := range files {
select {
case <-ctx.Done():
close(fileCh)
workersDone.Wait() // Wait for workers to finish
close(writeCh)
<-writerDone
return ctx.Err()
return fmt.Errorf("context canceled: %w", ctx.Err())
case fileCh <- file:
}
}
@@ -439,22 +440,38 @@ func runProcessingPipeline(ctx context.Context, config processingConfig) error {
// PrintResult prints a formatted benchmark result.
func PrintResult(result *Result) {
fmt.Printf("=== %s ===\n", result.Name)
fmt.Printf("Duration: %v\n", result.Duration)
fmt.Printf("Files Processed: %d\n", result.FilesProcessed)
fmt.Printf("Bytes Processed: %d (%.2f MB)\n", result.BytesProcessed, float64(result.BytesProcessed)/1024/1024)
fmt.Printf("Files/sec: %.2f\n", result.FilesPerSecond)
fmt.Printf("Bytes/sec: %.2f MB/sec\n", result.BytesPerSecond/1024/1024)
fmt.Printf("Memory Usage: +%.2f MB (Sys: +%.2f MB)\n", result.MemoryUsage.AllocMB, result.MemoryUsage.SysMB)
pauseDuration := time.Duration(gibidiutils.SafeUint64ToInt64WithDefault(result.MemoryUsage.PauseTotalNs, 0))
fmt.Printf("GC Runs: %d (Pause: %v)\n", result.MemoryUsage.NumGC, pauseDuration)
fmt.Printf("Goroutines: %d\n", result.CPUUsage.Goroutines)
fmt.Println()
printBenchmarkLine := func(format string, args ...any) {
if _, err := fmt.Printf(format, args...); err != nil {
// Stdout write errors are rare (broken pipe, etc.) - log but continue
shared.LogError("failed to write benchmark output", err)
}
}
printBenchmarkLine(shared.BenchmarkFmtSectionHeader, result.Name)
printBenchmarkLine("Duration: %v\n", result.Duration)
printBenchmarkLine("Files Processed: %d\n", result.FilesProcessed)
printBenchmarkLine("Bytes Processed: %d (%.2f MB)\n", result.BytesProcessed,
float64(result.BytesProcessed)/float64(shared.BytesPerMB))
printBenchmarkLine("Files/sec: %.2f\n", result.FilesPerSecond)
printBenchmarkLine("Bytes/sec: %.2f MB/sec\n", result.BytesPerSecond/float64(shared.BytesPerMB))
printBenchmarkLine(
"Memory Usage: +%.2f MB (Sys: +%.2f MB)\n",
result.MemoryUsage.AllocMB,
result.MemoryUsage.SysMB,
)
//nolint:errcheck // Overflow unlikely for pause duration, result output only
pauseDuration, _ := shared.SafeUint64ToInt64(result.MemoryUsage.PauseTotalNs)
printBenchmarkLine("GC Runs: %d (Pause: %v)\n", result.MemoryUsage.NumGC, time.Duration(pauseDuration))
printBenchmarkLine("Goroutines: %d\n", result.CPUUsage.Goroutines)
printBenchmarkLine("\n")
}
// PrintSuite prints all results in a benchmark suite.
func PrintSuite(suite *Suite) {
fmt.Printf("=== %s ===\n", suite.Name)
if _, err := fmt.Printf(shared.BenchmarkFmtSectionHeader, suite.Name); err != nil {
shared.LogError("failed to write benchmark suite header", err)
}
// Iterate by index to avoid taking address of range variable
for i := range suite.Results {
PrintResult(&suite.Results[i])
}
@@ -462,47 +479,54 @@ func PrintSuite(suite *Suite) {
// RunAllBenchmarks runs a comprehensive benchmark suite.
func RunAllBenchmarks(sourceDir string) error {
fmt.Println("Running gibidify benchmark suite...")
printBenchmark := func(msg string) {
if _, err := fmt.Println(msg); err != nil {
shared.LogError("failed to write benchmark message", err)
}
}
printBenchmark("Running gibidify benchmark suite...")
// Load configuration
config.LoadConfig()
// File collection benchmark
fmt.Println("Running file collection benchmark...")
result, err := FileCollectionBenchmark(sourceDir, 1000)
printBenchmark(shared.BenchmarkMsgRunningCollection)
result, err := FileCollectionBenchmark(sourceDir, shared.BenchmarkDefaultFileCount)
if err != nil {
return gibidiutils.WrapError(
return shared.WrapError(
err,
gibidiutils.ErrorTypeProcessing,
gibidiutils.CodeProcessingCollection,
"file collection benchmark failed",
shared.ErrorTypeProcessing,
shared.CodeProcessingCollection,
shared.BenchmarkMsgFileCollectionFailed,
)
}
PrintResult(result)
// Format benchmarks
fmt.Println("Running format benchmarks...")
formatSuite, err := FormatBenchmark(sourceDir, []string{"json", "yaml", "markdown"})
printBenchmark("Running format benchmarks...")
formats := []string{shared.FormatJSON, shared.FormatYAML, shared.FormatMarkdown}
formatSuite, err := FormatBenchmark(sourceDir, formats)
if err != nil {
return gibidiutils.WrapError(
return shared.WrapError(
err,
gibidiutils.ErrorTypeProcessing,
gibidiutils.CodeProcessingCollection,
"format benchmark failed",
shared.ErrorTypeProcessing,
shared.CodeProcessingCollection,
shared.BenchmarkMsgFormatFailed,
)
}
PrintSuite(formatSuite)
// Concurrency benchmarks
fmt.Println("Running concurrency benchmarks...")
printBenchmark("Running concurrency benchmarks...")
concurrencyLevels := []int{1, 2, 4, 8, runtime.NumCPU()}
concurrencySuite, err := ConcurrencyBenchmark(sourceDir, "json", concurrencyLevels)
concurrencySuite, err := ConcurrencyBenchmark(sourceDir, shared.FormatJSON, concurrencyLevels)
if err != nil {
return gibidiutils.WrapError(
return shared.WrapError(
err,
gibidiutils.ErrorTypeProcessing,
gibidiutils.CodeProcessingCollection,
"concurrency benchmark failed",
shared.ErrorTypeProcessing,
shared.CodeProcessingCollection,
shared.BenchmarkMsgConcurrencyFailed,
)
}
PrintSuite(concurrencySuite)

View File

@@ -1,10 +1,54 @@
package benchmark
import (
"bytes"
"io"
"os"
"path/filepath"
"runtime"
"strings"
"testing"
"time"
"github.com/ivuorinen/gibidify/shared"
)
// capturedOutput captures stdout output from a function call.
func capturedOutput(t *testing.T, fn func()) string {
t.Helper()
original := os.Stdout
r, w, err := os.Pipe()
if err != nil {
t.Fatalf(shared.TestMsgFailedToCreatePipe, err)
}
defer r.Close()
defer func() { os.Stdout = original }()
os.Stdout = w
fn()
if err := w.Close(); err != nil {
t.Logf(shared.TestMsgFailedToClose, err)
}
var buf bytes.Buffer
if _, err := io.Copy(&buf, r); err != nil {
t.Fatalf(shared.TestMsgFailedToReadOutput, err)
}
return buf.String()
}
// verifyOutputContains checks if output contains all expected strings.
func verifyOutputContains(t *testing.T, testName, output string, expected []string) {
t.Helper()
for _, check := range expected {
if !strings.Contains(output, check) {
t.Errorf("Test %s: output missing expected content: %q\nFull output:\n%s", testName, check, output)
}
}
}
// TestFileCollectionBenchmark tests the file collection benchmark.
func TestFileCollectionBenchmark(t *testing.T) {
result, err := FileCollectionBenchmark("", 10)
@@ -22,7 +66,7 @@ func TestFileCollectionBenchmark(t *testing.T) {
t.Logf("Bytes processed: %d", result.BytesProcessed)
if result.FilesProcessed <= 0 {
t.Errorf("Expected files processed > 0, got %d", result.FilesProcessed)
t.Errorf(shared.TestFmtExpectedFilesProcessed, result.FilesProcessed)
}
if result.Duration <= 0 {
@@ -38,7 +82,7 @@ func TestFileProcessingBenchmark(t *testing.T) {
}
if result.FilesProcessed <= 0 {
t.Errorf("Expected files processed > 0, got %d", result.FilesProcessed)
t.Errorf(shared.TestFmtExpectedFilesProcessed, result.FilesProcessed)
}
if result.Duration <= 0 {
@@ -59,12 +103,12 @@ func TestConcurrencyBenchmark(t *testing.T) {
}
if len(suite.Results) != len(concurrencyLevels) {
t.Errorf("Expected %d results, got %d", len(concurrencyLevels), len(suite.Results))
t.Errorf(shared.TestFmtExpectedResults, len(concurrencyLevels), len(suite.Results))
}
for i, result := range suite.Results {
if result.FilesProcessed <= 0 {
t.Errorf("Result %d: Expected files processed > 0, got %d", i, result.FilesProcessed)
t.Errorf("Result %d: "+shared.TestFmtExpectedFilesProcessed, i, result.FilesProcessed)
}
}
}
@@ -82,12 +126,12 @@ func TestFormatBenchmark(t *testing.T) {
}
if len(suite.Results) != len(formats) {
t.Errorf("Expected %d results, got %d", len(formats), len(suite.Results))
t.Errorf(shared.TestFmtExpectedResults, len(formats), len(suite.Results))
}
for i, result := range suite.Results {
if result.FilesProcessed <= 0 {
t.Errorf("Result %d: Expected files processed > 0, got %d", i, result.FilesProcessed)
t.Errorf("Result %d: "+shared.TestFmtExpectedFilesProcessed, i, result.FilesProcessed)
}
}
}
@@ -116,7 +160,7 @@ func BenchmarkFileCollection(b *testing.B) {
b.Fatalf("FileCollectionBenchmark failed: %v", err)
}
if result.FilesProcessed <= 0 {
b.Errorf("Expected files processed > 0, got %d", result.FilesProcessed)
b.Errorf(shared.TestFmtExpectedFilesProcessed, result.FilesProcessed)
}
}
}
@@ -129,7 +173,7 @@ func BenchmarkFileProcessing(b *testing.B) {
b.Fatalf("FileProcessingBenchmark failed: %v", err)
}
if result.FilesProcessed <= 0 {
b.Errorf("Expected files processed > 0, got %d", result.FilesProcessed)
b.Errorf(shared.TestFmtExpectedFilesProcessed, result.FilesProcessed)
}
}
}
@@ -144,7 +188,7 @@ func BenchmarkConcurrency(b *testing.B) {
b.Fatalf("ConcurrencyBenchmark failed: %v", err)
}
if len(suite.Results) != len(concurrencyLevels) {
b.Errorf("Expected %d results, got %d", len(concurrencyLevels), len(suite.Results))
b.Errorf(shared.TestFmtExpectedResults, len(concurrencyLevels), len(suite.Results))
}
}
}
@@ -159,7 +203,315 @@ func BenchmarkFormats(b *testing.B) {
b.Fatalf("FormatBenchmark failed: %v", err)
}
if len(suite.Results) != len(formats) {
b.Errorf("Expected %d results, got %d", len(formats), len(suite.Results))
b.Errorf(shared.TestFmtExpectedResults, len(formats), len(suite.Results))
}
}
}
// TestPrintResult tests the PrintResult function.
func TestPrintResult(t *testing.T) {
// Create a test result
result := &Result{
Name: "Test Benchmark",
Duration: 1 * time.Second,
FilesProcessed: 100,
BytesProcessed: 2048000, // ~2MB for easy calculation
}
// Capture stdout
original := os.Stdout
r, w, err := os.Pipe()
if err != nil {
t.Fatalf(shared.TestMsgFailedToCreatePipe, err)
}
defer r.Close()
defer func() { os.Stdout = original }()
os.Stdout = w
// Call PrintResult
PrintResult(result)
// Close writer and read captured output
if err := w.Close(); err != nil {
t.Logf(shared.TestMsgFailedToClose, err)
}
var buf bytes.Buffer
if _, err := io.Copy(&buf, r); err != nil {
t.Fatalf(shared.TestMsgFailedToReadOutput, err)
}
output := buf.String()
// Verify expected content
expectedContents := []string{
"=== Test Benchmark ===",
"Duration: 1s",
"Files Processed: 100",
"Bytes Processed: 2048000",
"1.95 MB", // 2048000 / 1024 / 1024 ≈ 1.95
}
for _, expected := range expectedContents {
if !strings.Contains(output, expected) {
t.Errorf("PrintResult output missing expected content: %q\nFull output:\n%s", expected, output)
}
}
}
// TestPrintSuite tests the PrintSuite function.
func TestPrintSuite(t *testing.T) {
// Create a test suite with multiple results
suite := &Suite{
Name: "Test Suite",
Results: []Result{
{
Name: "Benchmark 1",
Duration: 500 * time.Millisecond,
FilesProcessed: 50,
BytesProcessed: 1024000, // 1MB
},
{
Name: "Benchmark 2",
Duration: 750 * time.Millisecond,
FilesProcessed: 75,
BytesProcessed: 1536000, // 1.5MB
},
},
}
// Capture stdout
original := os.Stdout
r, w, err := os.Pipe()
if err != nil {
t.Fatalf(shared.TestMsgFailedToCreatePipe, err)
}
defer r.Close()
defer func() { os.Stdout = original }()
os.Stdout = w
// Call PrintSuite
PrintSuite(suite)
// Close writer and read captured output
if err := w.Close(); err != nil {
t.Logf(shared.TestMsgFailedToClose, err)
}
var buf bytes.Buffer
if _, err := io.Copy(&buf, r); err != nil {
t.Fatalf(shared.TestMsgFailedToReadOutput, err)
}
output := buf.String()
// Verify expected content
expectedContents := []string{
"=== Test Suite ===",
"=== Benchmark 1 ===",
"Duration: 500ms",
"Files Processed: 50",
"=== Benchmark 2 ===",
"Duration: 750ms",
"Files Processed: 75",
}
for _, expected := range expectedContents {
if !strings.Contains(output, expected) {
t.Errorf("PrintSuite output missing expected content: %q\nFull output:\n%s", expected, output)
}
}
// Verify both results are printed
benchmark1Count := strings.Count(output, "=== Benchmark 1 ===")
benchmark2Count := strings.Count(output, "=== Benchmark 2 ===")
if benchmark1Count != 1 {
t.Errorf("Expected exactly 1 occurrence of 'Benchmark 1', got %d", benchmark1Count)
}
if benchmark2Count != 1 {
t.Errorf("Expected exactly 1 occurrence of 'Benchmark 2', got %d", benchmark2Count)
}
}
// TestPrintResultEdgeCases tests edge cases for PrintResult.
func TestPrintResultEdgeCases(t *testing.T) {
tests := []struct {
name string
result *Result
checks []string
}{
{
name: "zero values",
result: &Result{
Name: "Zero Benchmark",
Duration: 0,
FilesProcessed: 0,
BytesProcessed: 0,
},
checks: []string{
"=== Zero Benchmark ===",
"Duration: 0s",
"Files Processed: 0",
"Bytes Processed: 0",
"0.00 MB",
},
},
{
name: "large values",
result: &Result{
Name: "Large Benchmark",
Duration: 1 * time.Hour,
FilesProcessed: 1000000,
BytesProcessed: 1073741824, // 1GB
},
checks: []string{
"=== Large Benchmark ===",
"Duration: 1h0m0s",
"Files Processed: 1000000",
"Bytes Processed: 1073741824",
"1024.00 MB",
},
},
{
name: "empty name",
result: &Result{
Name: "",
Duration: 100 * time.Millisecond,
FilesProcessed: 10,
BytesProcessed: 1024,
},
checks: []string{
"=== ===", // Empty name between === markers
"Duration: 100ms",
"Files Processed: 10",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := tt.result
output := capturedOutput(t, func() { PrintResult(result) })
verifyOutputContains(t, tt.name, output, tt.checks)
})
}
}
// TestPrintSuiteEdgeCases tests edge cases for PrintSuite.
func TestPrintSuiteEdgeCases(t *testing.T) {
tests := []struct {
name string
suite *Suite
checks []string
}{
{
name: "empty suite",
suite: &Suite{
Name: "Empty Suite",
Results: []Result{},
},
checks: []string{
"=== Empty Suite ===",
},
},
{
name: "suite with empty name",
suite: &Suite{
Name: "",
Results: []Result{
{
Name: "Single Benchmark",
Duration: 200 * time.Millisecond,
FilesProcessed: 20,
BytesProcessed: 2048,
},
},
},
checks: []string{
"=== ===", // Empty name
"=== Single Benchmark ===",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
suite := tt.suite
output := capturedOutput(t, func() { PrintSuite(suite) })
verifyOutputContains(t, tt.name, output, tt.checks)
})
}
}
// TestRunAllBenchmarks tests the RunAllBenchmarks function.
func TestRunAllBenchmarks(t *testing.T) {
// Create a temporary directory with some test files
srcDir := t.TempDir()
// Create a few test files
testFiles := []struct {
name string
content string
}{
{shared.TestFileMainGo, "package main\nfunc main() {}"},
{shared.TestFile2Name, "Hello World"},
{shared.TestFile3Name, "# Test Markdown"},
}
for _, file := range testFiles {
filePath := filepath.Join(srcDir, file.name)
err := os.WriteFile(filePath, []byte(file.content), 0o644)
if err != nil {
t.Fatalf("Failed to create test file %s: %v", file.name, err)
}
}
// Capture stdout to verify output
original := os.Stdout
r, w, pipeErr := os.Pipe()
if pipeErr != nil {
t.Fatalf(shared.TestMsgFailedToCreatePipe, pipeErr)
}
defer func() {
if err := r.Close(); err != nil {
t.Logf("Failed to close pipe reader: %v", err)
}
}()
defer func() { os.Stdout = original }()
os.Stdout = w
// Call RunAllBenchmarks
err := RunAllBenchmarks(srcDir)
// Close writer and read captured output
if closeErr := w.Close(); closeErr != nil {
t.Logf(shared.TestMsgFailedToClose, closeErr)
}
var buf bytes.Buffer
if _, copyErr := io.Copy(&buf, r); copyErr != nil {
t.Fatalf(shared.TestMsgFailedToReadOutput, copyErr)
}
output := buf.String()
// Check for error
if err != nil {
t.Errorf("RunAllBenchmarks failed: %v", err)
}
// Verify expected output content
expectedContents := []string{
"Running gibidify benchmark suite...",
"Running file collection benchmark...",
"Running format benchmarks...",
"Running concurrency benchmarks...",
}
for _, expected := range expectedContents {
if !strings.Contains(output, expected) {
t.Errorf("RunAllBenchmarks output missing expected content: %q\nFull output:\n%s", expected, output)
}
}
// The function should not panic and should complete successfully
t.Log("RunAllBenchmarks completed successfully with output captured")
}