diff --git a/.checkmake b/.checkmake
index fb06f79..cdbb5bc 100644
--- a/.checkmake
+++ b/.checkmake
@@ -1,14 +1,8 @@
# checkmake configuration
-# See: https://github.com/checkmake/checkmake#configuration
+# See: https://github.com/mrtazz/checkmake#configuration
[rules.timestampexpansion]
disabled = true
[rules.maxbodylength]
disabled = true
-
-[rules.minphony]
-disabled = true
-
-[rules.phonydeclared]
-disabled = true
diff --git a/.editorconfig b/.editorconfig
index 34522aa..ab856a8 100644
--- a/.editorconfig
+++ b/.editorconfig
@@ -7,27 +7,31 @@ trim_trailing_whitespace = true
indent_size = 2
indent_style = tab
tab_width = 2
+charset = utf-8
[*.go]
max_line_length = 120
-[*.md]
-trim_trailing_whitespace = false
-
-[*.{yml,yaml,json,toml}]
+[*.{yml,yaml,json,example}]
indent_style = space
max_line_length = 250
-[*.{yaml.example,yml.example}]
-indent_style = space
-
-[.yamllint]
-indent_style = space
-
[LICENSE]
max_line_length = 80
indent_size = 0
indent_style = space
+[*.{sh,md,txt}]
+indent_style = space
+
+[.yamllint]
+indent_style = space
+
[Makefile]
-max_line_length = 80
+indent_style = tab
+indent_size = 0
+max_line_length = 999
+tab_width = 4
+
+[*.md]
+trim_trailing_whitespace = false
diff --git a/.github/actions/setup/action.yml b/.github/actions/setup/action.yml
index 0674ad4..44a09f5 100644
--- a/.github/actions/setup/action.yml
+++ b/.github/actions/setup/action.yml
@@ -1,3 +1,4 @@
+---
name: "Setup Go with Runner Hardening"
description: "Reusable action to set up Go"
inputs:
diff --git a/.github/workflows/build-test-publish.yml b/.github/workflows/build-test-publish.yml
index e0ee56c..db7c3ba 100644
--- a/.github/workflows/build-test-publish.yml
+++ b/.github/workflows/build-test-publish.yml
@@ -1,4 +1,5 @@
# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json
+---
name: Build, Test, Coverage, and Publish
on:
diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml
index 7d780f3..57a0dec 100644
--- a/.github/workflows/codeql.yml
+++ b/.github/workflows/codeql.yml
@@ -1,3 +1,4 @@
+---
name: CodeQL Analysis
on:
diff --git a/.github/workflows/security.yml b/.github/workflows/security.yml
index 1cc2285..8ab46a2 100644
--- a/.github/workflows/security.yml
+++ b/.github/workflows/security.yml
@@ -1,3 +1,4 @@
+---
name: Security Scan
on:
diff --git a/.gitignore b/.gitignore
index 61f96d4..1b23659 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,19 +1,19 @@
+*.out
.DS_Store
.idea
+.serena/
+coverage.*
gibidify
+gibidify-benchmark
gibidify.json
gibidify.txt
gibidify.yaml
+megalinter-reports/*
output.json
output.txt
output.yaml
-coverage.out
-megalinter-reports/*
-coverage.*
-*.out
-gibidify-benchmark
gosec-report.json
-gosec-results.sarif
govulncheck-report.json
-govulncheck-errors.log
security-report.md
+gosec*.log
+pr.txt
diff --git a/.golangci.yml b/.golangci.yml
deleted file mode 100644
index f13663c..0000000
--- a/.golangci.yml
+++ /dev/null
@@ -1,256 +0,0 @@
-run:
- timeout: 5m
- tests: true
- go: "1.24"
- build-tags:
- - test
-
-# golangci-lint configuration version
-version: 2
-
-output:
- format: colored-line-number
- print-issued-lines: true
- print-linter-name: true
- path-prefix: ""
- sort-results: true
-
-linters:
- enable-all: true
- disable:
- - depguard # Too strict for general use
- - exhaustruct # Too many false positives
- - ireturn # Too restrictive on interfaces
- - varnamelen # Too opinionated on name length
- - wrapcheck # Too many false positives
- - testpackage # Tests in same package are fine
- - paralleltest # Not always necessary
- - tparallel # Not always necessary
- - nlreturn # Too opinionated on newlines
- - wsl # Too opinionated on whitespace
- - nonamedreturns # Conflicts with gocritic unnamedResult
-
-linters-settings:
- errcheck:
- check-type-assertions: true
- check-blank: true
- exclude-functions:
- - io.Copy
- - fmt.Print
- - fmt.Printf
- - fmt.Println
-
- govet:
- enable-all: true
-
- gocyclo:
- min-complexity: 15
-
- gocognit:
- min-complexity: 20
-
- goconst:
- min-len: 3
- min-occurrences: 3
-
- gofmt:
- simplify: true
- rewrite-rules:
- - pattern: 'interface{}'
- replacement: 'any'
-
- goimports:
- local-prefixes: github.com/ivuorinen/gibidify
-
- golint:
- min-confidence: 0.8
-
- lll:
- line-length: 120
- tab-width: 2 # EditorConfig: tab_width = 2
-
- misspell:
- locale: US
-
- nakedret:
- max-func-lines: 30
-
- prealloc:
- simple: true
- range-loops: true
- for-loops: true
-
- revive:
- enable-all-rules: true
- rules:
- - name: package-comments
- disabled: true
- - name: file-header
- disabled: true
- - name: max-public-structs
- disabled: true
- - name: line-length-limit
- arguments: [120]
- - name: function-length
- arguments: [50, 100]
- - name: cognitive-complexity
- arguments: [20]
- - name: cyclomatic
- arguments: [15]
- - name: add-constant
- arguments:
- - maxLitCount: "3"
- allowStrs: "\"error\",\"\""
- allowInts: "0,1,2"
- - name: argument-limit
- arguments: [6]
- - name: banned-characters
- disabled: true
- - name: function-result-limit
- arguments: [3]
-
- gosec:
- excludes:
- - G104 # Handled by errcheck
- severity: medium
- confidence: medium
- exclude-generated: true
- config:
- G301: "0750"
- G302: "0640"
- G306: "0640"
-
- dupl:
- threshold: 150
-
- gocritic:
- enabled-tags:
- - diagnostic
- - experimental
- - opinionated
- - performance
- - style
- disabled-checks:
- - whyNoLint
- - paramTypeCombine
-
- gofumpt:
- extra-rules: true
-
- # EditorConfig compliance settings
- # These settings enforce .editorconfig rules:
- # - end_of_line = lf (enforced by gofumpt)
- # - insert_final_newline = true (enforced by gofumpt)
- # - trim_trailing_whitespace = true (enforced by whitespace linter)
- # - indent_style = tab, tab_width = 2 (enforced by gofumpt and lll)
-
- whitespace:
- multi-if: false # EditorConfig: trim trailing whitespace
- multi-func: false # EditorConfig: trim trailing whitespace
-
- nolintlint:
- allow-leading-space: false # EditorConfig: trim trailing whitespace
- allow-unused: false
- require-explanation: false
- require-specific: true
-
- godox:
- keywords:
- - FIXME
- - BUG
- - HACK
-
- mnd:
- settings:
- mnd:
- checks:
- - argument
- - case
- - condition
- - operation
- - return
- - assign
- ignored-numbers:
- - '0'
- - '1'
- - '2'
- - '10'
- - '100'
-
- funlen:
- lines: 80
- statements: 60
-
- nestif:
- min-complexity: 5
-
- gomodguard:
- allowed:
- modules: []
- domains: []
- blocked:
- modules: []
- versions: []
-
-issues:
- exclude-use-default: false
- exclude-case-sensitive: false
- max-issues-per-linter: 0
- max-same-issues: 0
- uniq-by-line: true
-
- exclude-dirs:
- - vendor
- - third_party
- - testdata
- - examples
- - .git
-
- exclude-files:
- - ".*\\.pb\\.go$"
- - ".*\\.gen\\.go$"
-
- exclude-rules:
- - path: _test\.go
- linters:
- - dupl
- - gosec
- - goconst
- - funlen
- - gocognit
- - gocyclo
- - errcheck
- - lll
- - nestif
-
- - path: main\.go
- linters:
- - gochecknoglobals
- - gochecknoinits
-
- - path: fileproc/filetypes\.go
- linters:
- - gochecknoglobals # Allow globals for singleton registry pattern
-
- - text: "Using the variable on range scope"
- linters:
- - scopelint
-
- - text: "should have comment or be unexported"
- linters:
- - golint
- - revive
-
- - text: "don't use ALL_CAPS in Go names"
- linters:
- - golint
- - stylecheck
-
- exclude:
- - "Error return value of .* is not checked"
- - "exported (type|method|function) .* should have comment"
- - "ST1000: at least one file in a package should have a package comment"
-
-severity:
- default-severity: error
- case-sensitive: false
diff --git a/.mega-linter.yml b/.mega-linter.yml
index d10e151..c57d96a 100644
--- a/.mega-linter.yml
+++ b/.mega-linter.yml
@@ -15,9 +15,11 @@ PRINT_ALPACA: false # Print Alpaca logo in console
SARIF_REPORTER: true # Generate SARIF report
SHOW_SKIPPED_LINTERS: false # Show skipped linters in MegaLinter log
-GO_REVIVE_CLI_LINT_MODE: project
-
DISABLE_LINTERS:
- REPOSITORY_DEVSKIM
- REPOSITORY_TRIVY
- GO_GOLANGCI_LINT
+ - YAML_PRETTIER
+
+# By default megalinter uses list_of_files, which is wrong.
+GO_REVIVE_CLI_LINT_MODE: project
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 4a7fceb..68c897f 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -1,9 +1,13 @@
+---
+# yaml-language-server: $schema=https://json.schemastore.org/pre-commit-config.json
+# For more hooks, see https://pre-commit.com/hooks.html
repos:
- - repo: https://github.com/golangci/golangci-lint
- rev: v2.7.2
+ - repo: https://github.com/editorconfig-checker/editorconfig-checker.python
+ rev: 3.4.0
hooks:
- - id: golangci-lint
- args: ["--timeout=5m"]
+ - id: editorconfig-checker
+ alias: ec
+
- repo: https://github.com/tekwizely/pre-commit-golang
rev: v1.0.0-rc.2
hooks:
@@ -11,14 +15,13 @@ repos:
alias: build
- id: go-mod-tidy
alias: tidy
+ - id: go-revive
+ alias: revive
+ - id: go-vet-mod
+ alias: vet
+ - id: go-staticcheck-mod
+ alias: static
- id: go-fmt
alias: fmt
- - repo: https://github.com/editorconfig-checker/editorconfig-checker.python
- rev: 3.6.0
- hooks:
- - id: editorconfig-checker
- alias: ec
- - repo: https://github.com/shellcheck-py/shellcheck-py
- rev: v0.11.0.1
- hooks:
- - id: shellcheck
+ - id: go-sec-mod
+ alias: sec
diff --git a/.serena/project.yml b/.serena/project.yml
index 9246931..e0c14b8 100644
--- a/.serena/project.yml
+++ b/.serena/project.yml
@@ -3,6 +3,7 @@
# * For JavaScript, use typescript
# Special requirements:
# * csharp: Requires the presence of a .sln file in the project folder.
+---
language: go
# whether to use the project's gitignore file to ignore files
diff --git a/.yamlfmt.yml b/.yamlfmt.yml
new file mode 100644
index 0000000..d2231d8
--- /dev/null
+++ b/.yamlfmt.yml
@@ -0,0 +1,18 @@
+---
+doublestar: true
+gitignore_excludes: true
+formatter:
+ type: basic
+ include_document_start: true
+ retain_line_breaks_single: true
+ scan_folded_as_literal: false
+ max_line_length: 0
+ trim_trailing_whitespace: true
+ array_indent: 2
+ force_array_style: block
+include:
+ - ./**/*.yml
+ - ./**/*.yaml
+ - .github/**/*.yml
+ - .github/**/*.yaml
+# exclude:
diff --git a/.yamllint b/.yamllint
index 88a729f..a2fe0af 100644
--- a/.yamllint
+++ b/.yamllint
@@ -1,3 +1,4 @@
+---
# yamllint configuration
# See: https://yamllint.readthedocs.io/en/stable/configuration.html
@@ -35,6 +36,3 @@ rules:
# Relax comments formatting
comments:
min-spaces-from-content: 1
-
- # Allow document start marker to be optional
- document-start: disable
diff --git a/CLAUDE.md b/CLAUDE.md
index dc90940..15c2552 100644
--- a/CLAUDE.md
+++ b/CLAUDE.md
@@ -1,12 +1,15 @@
# CLAUDE.md
-Go CLI aggregating code files into LLM-optimized output. Supports markdown/JSON/YAML with concurrent processing.
+Go CLI aggregating code files into LLM-optimized output.
+Supports markdown/JSON/YAML with concurrent processing.
-## Architecture (42 files, 8.2K lines)
+## Architecture
-**Core**: `main.go` (37), `cli/` (4), `fileproc/` (27), `config/` (3), `utils/` (4), `testutil/` (2)
+**Core**: `main.go`, `cli/`, `fileproc/`, `config/`, `utils/`, `testutil/`, `cmd/`
-**Modules**: Collection, processing, writers, registry (~63ns cache), resource limits
+**Advanced**: `metrics/`, `templates/`, `benchmark/`
+
+**Modules**: Collection, processing, writers, registry (~63ns cache), resource limits, metrics, templating
**Patterns**: Producer-consumer, thread-safe registry, streaming, modular (50-200 lines)
@@ -15,6 +18,7 @@ Go CLI aggregating code files into LLM-optimized output. Supports markdown/JSON/
```bash
make lint-fix && make lint && make test
./gibidify -source
-format markdown --verbose
+./gibidify -source -format json --log-level debug --verbose
```
## Config
@@ -22,29 +26,51 @@ make lint-fix && make lint && make test
`~/.config/gibidify/config.yaml`
Size limit 5MB, ignore dirs, custom types, 100MB memory limit
-## Quality
+## Linting Standards (MANDATORY)
-**CRITICAL**: `make lint-fix && make lint` (0 issues), 120 chars, EditorConfig, 30+ linters
+**Linter**: revive (comprehensive rule set migrated from golangci-lint)
+**Command**: `revive -config revive.toml ./...`
+**Complexity**: cognitive-complexity ≤15, cyclomatic ≤15, max-control-nesting ≤5
+**Security**: unhandled errors, secure coding patterns, credential detection
+**Performance**: optimize-operands-order, string-format, range optimizations
+**Format**: line-length ≤120 chars, EditorConfig (LF, tabs), gofmt/goimports
+**Testing**: error handling best practices, 0 tolerance policy
+
+**CRITICAL**: All rules non-negotiable. `make lint-fix && make lint` must show 0 issues.
## Testing
-**Coverage**: 84%+ (utils 90.9%, fileproc 83.8%), race detection, benchmarks
+**Coverage**: 77.9% overall (utils 90.0%, cli 83.8%, config 77.0%, testutil 73.7%, fileproc 74.5%, metrics 96.0%, templates 87.3%)
+**Patterns**: Table-driven tests, shared testutil helpers, mock objects, error assertions
+**Race detection**, benchmarks, comprehensive integration tests
+
+## Development Patterns
+
+**Logging**: Use `utils.Logger()` for all logging (replaces logrus). Default WARN level, set via `--log-level` flag
+**Error Handling**: Use `utils.WrapError` family for structured errors with context
+**Streaming**: Use `utils.StreamContent/StreamLines` for consistent file processing
+**Context**: Use `utils.CheckContextCancellation` for standardized cancellation
+**Testing**: Use `testutil.*` helpers for directory setup, error assertions
+**Validation**: Centralized in `config/validation.go` with structured error collection
## Standards
-EditorConfig (LF, tabs), semantic commits, testing required
+EditorConfig (LF, tabs), semantic commits, testing required, error wrapping
+
+## revive.toml Restrictions
+
+**AGENTS DO NOT HAVE PERMISSION** to modify `revive.toml` configuration unless user explicitly requests it.
+The linting configuration is carefully tuned and should not be altered during normal development.
## Status
-**Health: 10/10** - Production-ready, 84%+ coverage, modular, memory-optimized
+**Health: 9/10** - Production-ready with systematic deduplication complete
-**Done**: Errors, benchmarks, config, optimization, modularization, CLI (progress/colors), security (path validation, resource limits, scanning)
-
-**Next**: Documentation, output customization
+**Done**: Deduplication, errors, benchmarks, config, optimization, testing (77.9%), modularization, linting (0 issues), metrics system, templating
## Workflow
-1. `make lint-fix` first
-2. >80% coverage
-3. Follow patterns
+1. `make lint-fix` first
+2. >80% coverage
+3. Follow patterns
4. Update docs
diff --git a/Dockerfile b/Dockerfile
index 04dfe5b..b7672eb 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,38 +1,17 @@
-# Build stage - builds the binary for the target architecture
-FROM --platform=$BUILDPLATFORM golang:1.25.5-alpine AS builder
+# Use a minimal base image
+FROM alpine:3.22.1
-# Build arguments automatically set by buildx
-ARG TARGETOS
-ARG TARGETARCH
-ARG TARGETVARIANT
+# Add user
+RUN useradd -ms /bin/bash gibidify
-WORKDIR /build
-
-# Copy go mod files first for better layer caching
-COPY go.mod go.sum ./
-RUN go mod download
-
-# Copy source code
-COPY . .
-
-# Build the binary for the target platform
-RUN CGO_ENABLED=0 GOOS=${TARGETOS} GOARCH=${TARGETARCH} \
- go build -ldflags="-s -w" -o gibidify .
-
-# Runtime stage - minimal image with the binary
-FROM alpine:3.23.0
-
-# Install ca-certificates for HTTPS and create non-root user
-# hadolint ignore=DL3018
-# kics-scan ignore-line
-RUN apk add --no-cache ca-certificates && \
- adduser -D -s /bin/sh gibidify
-
-# Copy the binary from builder
-COPY --from=builder /build/gibidify /usr/local/bin/gibidify
-
-# Use non-root user
+# Use the new user
USER gibidify
+# Copy the gibidify binary into the container
+COPY gibidify /usr/local/bin/gibidify
+
+# Ensure the binary is executable
+RUN chmod +x /usr/local/bin/gibidify
+
# Set the entrypoint
ENTRYPOINT ["/usr/local/bin/gibidify"]
diff --git a/Makefile b/Makefile
index 7dac462..41ce6a8 100644
--- a/Makefile
+++ b/Makefile
@@ -1,14 +1,10 @@
-.PHONY: all clean test test-coverage build coverage help lint lint-fix \
- lint-verbose install-tools benchmark benchmark-collection \
- benchmark-concurrency benchmark-format benchmark-processing \
- build-benchmark check-all ci-lint ci-test dev-setup security \
- security-full vuln-check deps-update deps-check deps-tidy
+.PHONY: all help install-tools lint lint-fix test coverage build clean all build-benchmark benchmark benchmark-go benchmark-go-cli benchmark-go-fileproc benchmark-go-metrics benchmark-go-shared benchmark-all benchmark-collection benchmark-processing benchmark-concurrency benchmark-format security security-full vuln-check update-deps check-all dev-setup
# Default target shows help
.DEFAULT_GOAL := help
# All target runs full workflow
-all: lint test build
+all: lint lint-fix test build
# Help target
help:
@@ -26,19 +22,11 @@ lint:
lint-fix:
@./scripts/lint-fix.sh
-# Run linters with verbose output
-lint-verbose:
- @./scripts/lint-verbose.sh
-
# Run tests
test:
@echo "Running tests..."
@go test -race -v ./...
-# Run tests with coverage output
-test-coverage:
- @./scripts/test-coverage.sh
-
# Run tests with coverage
coverage:
@echo "Running tests with coverage..."
@@ -55,13 +43,14 @@ build:
# Clean build artifacts
clean:
@echo "Cleaning build artifacts..."
- @rm -f gibidify gibidify-benchmark
- @rm -f coverage.out coverage.html
+ @rm -f gibidify gibidify-benchmark coverage.out coverage.html *.out
@echo "Clean complete"
# CI-specific targets
+.PHONY: ci-lint ci-test
+
ci-lint:
- @golangci-lint run --out-format=github-actions ./...
+ @revive -config revive.toml -formatter friendly -set_exit_status ./...
ci-test:
@go test -race -coverprofile=coverage.out -json ./... > test-results.json
@@ -72,11 +61,36 @@ build-benchmark:
@go build -ldflags="-s -w" -o gibidify-benchmark ./cmd/benchmark
@echo "Build complete: ./gibidify-benchmark"
-# Run benchmarks
+# Run custom benchmark binary
benchmark: build-benchmark
- @echo "Running all benchmarks..."
+ @echo "Running custom benchmarks..."
@./gibidify-benchmark -type=all
+# Run all Go test benchmarks
+benchmark-go:
+ @echo "Running all Go test benchmarks..."
+ @go test -bench=. -benchtime=100ms -run=^$$ ./...
+
+# Run Go test benchmarks for specific packages
+benchmark-go-cli:
+ @echo "Running CLI benchmarks..."
+ @go test -bench=. -benchtime=100ms -run=^$$ ./cli/...
+
+benchmark-go-fileproc:
+ @echo "Running fileproc benchmarks..."
+ @go test -bench=. -benchtime=100ms -run=^$$ ./fileproc/...
+
+benchmark-go-metrics:
+ @echo "Running metrics benchmarks..."
+ @go test -bench=. -benchtime=100ms -run=^$$ ./metrics/...
+
+benchmark-go-shared:
+ @echo "Running shared benchmarks..."
+ @go test -bench=. -benchtime=100ms -run=^$$ ./shared/...
+
+# Run all benchmarks (custom + Go test)
+benchmark-all: benchmark benchmark-go
+
# Run specific benchmark types
benchmark-collection: build-benchmark
@echo "Running file collection benchmarks..."
@@ -99,24 +113,19 @@ security:
@echo "Running comprehensive security scan..."
@./scripts/security-scan.sh
-security-full:
+security-full: install-tools
@echo "Running full security analysis..."
@./scripts/security-scan.sh
+ @echo "Running additional security checks..."
+ @gosec -fmt=json -out=security-report.json ./...
+ @staticcheck -checks=all ./...
vuln-check:
@echo "Checking for dependency vulnerabilities..."
- @go install golang.org/x/vuln/cmd/govulncheck@latest
+ @go install golang.org/x/vuln/cmd/govulncheck@v1.1.4
@govulncheck ./...
-# Dependency management targets
-deps-check:
- @./scripts/deps-check.sh
-
-deps-update:
- @./scripts/deps-update.sh
-
-deps-tidy:
- @echo "Cleaning up dependencies..."
- @go mod tidy
- @go mod verify
- @echo "Dependencies cleaned and verified successfully!"
+# Update dependencies
+update-deps:
+ @echo "Updating Go dependencies..."
+ @./scripts/update-deps.sh
diff --git a/README.md b/README.md
index 4e151ae..d6a78ce 100644
--- a/README.md
+++ b/README.md
@@ -14,9 +14,11 @@ file sections with separators, and a suffix.
- **Concurrent processing** with configurable worker pools
- **Comprehensive configuration** via YAML with validation
- **Production-ready** with structured error handling and benchmarking
-- **Modular architecture** - clean, focused codebase with ~63ns registry lookups
+- **Modular architecture** - clean, focused codebase (92 files, ~21.5K lines) with ~63ns registry lookups
- **Enhanced CLI experience** - progress bars, colored output, helpful error messages
- **Cross-platform** with Docker support
+- **Advanced template system** - 4 built-in templates (default, minimal, detailed, compact) with custom template support, variable substitution, and YAML-based configuration
+- **Comprehensive metrics and profiling** - real-time processing statistics, performance analysis, memory usage tracking, and automated recommendations
## Installation
@@ -32,15 +34,16 @@ go build -o gibidify .
```bash
./gibidify \
- -source \
- -destination \
- -format markdown|json|yaml \
- -concurrency \
- --prefix="..." \
- --suffix="..." \
- --no-colors \
- --no-progress \
- --verbose
+ -source \
+ -destination \
+ -format markdown|json|yaml \
+ -concurrency \
+ --prefix="..." \
+ --suffix="..." \
+ --no-colors \
+ --no-progress \
+ --verbose \
+ --log-level debug
```
Flags:
@@ -53,6 +56,7 @@ Flags:
- `--no-colors`: disable colored terminal output.
- `--no-progress`: disable progress bars.
- `--verbose`: enable verbose output and detailed logging.
+- `--log-level`: set log level (default: warn; accepted values: debug, info, warn, error).
## Docker
@@ -66,13 +70,13 @@ Run the Docker container:
```bash
docker run --rm \
- -v $(pwd):/workspace \
- -v $HOME/.config/gibidify:/config \
- ghcr.io/ivuorinen/gibidify: \
- -source /workspace/your_source_directory \
- -destination /workspace/output.txt \
- --prefix="Your prefix text" \
- --suffix="Your suffix text"
+ -v $(pwd):/workspace \
+ -v $HOME/.config/gibidify:/config \
+ ghcr.io/ivuorinen/gibidify: \
+ -source /workspace/your_source_directory \
+ -destination /workspace/output.txt \
+ --prefix="Your prefix text" \
+ --suffix="Your suffix text"
```
## Configuration
@@ -123,6 +127,33 @@ backpressure:
maxPendingWrites: 100 # Max writes in write channel buffer
maxMemoryUsage: 104857600 # 100MB max memory usage
memoryCheckInterval: 1000 # Check memory every 1000 files
+
+# Output and template customization
+output:
+ # Template selection: default, minimal, detailed, compact, or custom
+ # Templates control output structure and formatting
+ template: "default"
+ # Metadata options
+ metadata:
+ includeStats: true
+ includeTimestamp: true
+ includeFileCount: true
+ includeSourcePath: true
+ includeMetrics: true
+ # Markdown-specific options
+ markdown:
+ useCodeBlocks: true
+ includeLanguage: true
+ headerLevel: 2
+ tableOfContents: false
+ useCollapsible: false
+ syntaxHighlighting: true
+ lineNumbers: false
+ # Custom template variables
+ variables:
+ project_name: "My Project"
+ author: "Developer Name"
+ version: "1.0.0"
```
See `config.example.yaml` for a comprehensive configuration example.
diff --git a/TODO.md b/TODO.md
index 7194a1f..1234651 100644
--- a/TODO.md
+++ b/TODO.md
@@ -4,43 +4,127 @@ Prioritized improvements by impact/effort.
## ✅ Completed
-**Core**: Testing (84%+), config validation, structured errors, benchmarking ✅
-**Architecture**: Modularization (50-200 lines), CLI (progress/colors), security (path validation, resource limits, scanning) ✅
+**Core**: Config validation, structured errors, benchmarking, linting (revive: 0 issues) ✅
+**Architecture**: Modularization (92 files, ~21.5K lines), CLI (progress/colors), security (path validation, resource limits, scanning) ✅
-## 🚀 Current Priorities
+## 🚀 Critical Priorities
-### Metrics & Profiling
-- [ ] Processing stats, timing
+### Testing Coverage (URGENT)
+- [x] **CLI module testing** (0% → 83.8%) - COMPLETED ✅
+ - [x] cli/flags_test.go - Flag parsing and validation ✅
+ - [x] cli/errors_test.go - Error formatting and structured errors ✅
+ - [x] cli/ui_test.go - UI components, colors, progress bars ✅
+ - [x] cli/processor_test.go - Processing workflow integration ✅
+- [x] **Utils module testing** (7.4% → 90.0%) - COMPLETED ✅
+ - [x] utils/writers_test.go - Writer functions (98% complete, minor test fixes needed) ✅
+ - [x] Enhanced utils/paths_test.go - Security and edge cases ✅
+ - [x] Enhanced utils/errors_test.go - StructuredError system ✅
+- [x] **Testutil module testing** (45.1% → 73.7%) - COMPLETED ✅
+ - [x] testutil/utility_test.go - GetBaseName function comprehensive tests ✅
+ - [x] testutil/directory_structure_test.go - CreateTestDirectoryStructure and SetupTempDirWithStructure ✅
+ - [x] testutil/assertions_test.go - All AssertError functions comprehensive coverage ✅
+ - [x] testutil/error_scenarios_test.go - Edge cases and performance benchmarks ✅
+- [x] **Main module testing** (41% → 50.0%) - COMPLETED ✅
+- [x] **Fileproc module improvement** (66% → 74.5%) - COMPLETED ✅
-### Output Customization
-- [ ] Templates, markdown config, metadata
+### ✅ Metrics & Profiling - COMPLETED
+- [x] **Comprehensive metrics collection system** with processing statistics ✅
+ - [x] File processing metrics (processed, skipped, errors) ✅
+ - [x] Size metrics (total, average, largest, smallest file sizes) ✅
+ - [x] Performance metrics (files/sec, bytes/sec, processing time) ✅
+ - [x] Memory and resource tracking (peak memory, current memory, goroutine count) ✅
+ - [x] Format-specific metrics and error breakdown ✅
+ - [x] Phase timing (collection, processing, writing, finalize) ✅
+ - [x] Concurrency tracking and recommendations ✅
+- [x] **Performance measurements and reporting** ✅
+ - [x] Real-time progress reporting in CLI ✅
+ - [x] Verbose mode with detailed statistics ✅
+ - [x] Final comprehensive profiling reports ✅
+ - [x] Performance recommendations based on metrics ✅
+- [x] **Structured logging integration** with centralized logging service ✅
+ - [x] Configurable log levels (debug, info, warn, error) ✅
+ - [x] Context-aware logging with structured data ✅
+ - [x] Metrics data integration in log output ✅
+
+### ✅ Output Customization - COMPLETED
+- [x] **Template system for output formatting** ✅
+ - [x] Builtin templates: default, minimal, detailed, compact ✅
+ - [x] Custom template support with variables ✅
+ - [x] Template functions for formatting (formatSize, basename, etc.) ✅
+ - [x] Header/footer and file header/footer customization ✅
+- [x] **Configurable markdown options** ✅
+ - [x] Code block controls (syntax highlighting, line numbers) ✅
+ - [x] Header levels and table of contents ✅
+ - [x] Collapsible sections for space efficiency ✅
+ - [x] Line length limits and long file folding ✅
+ - [x] Custom CSS support ✅
+- [x] **Metadata integration in outputs** ✅
+ - [x] Configurable metadata inclusion (stats, timestamp, file counts) ✅
+ - [x] Processing metrics in output (performance, memory usage) ✅
+ - [x] File type breakdown and error summaries ✅
+ - [x] Source path and processing time information ✅
+- [x] **Enhanced configuration system** ✅
+ - [x] Template selection and customization options ✅
+ - [x] Metadata control flags ✅
+ - [x] Markdown formatting preferences ✅
+ - [x] Custom template variables support ✅
### Documentation
- [ ] API docs, user guides
-## 🌟 Future
-
-**Plugins**: Custom handlers, formats
-**Git**: Commit filtering, blame
-**Rich output**: HTML, PDF, web UI
-**Monitoring**: Prometheus, structured logging
-
## Guidelines
-**Before**: `make lint-fix && make lint`, >80% coverage
-**Priorities**: Security → UX → Extensions
+**Before**: `make lint-fix && make lint` (0 issues), >80% coverage
+**Priorities**: Testing → Security → UX → Extensions
-## Status (2025-07-19)
+## Status (2025-08-23 - Phase 3 Feature Implementation Complete)
-**Health: 10/10** - Production-ready, 42 files (8.2K lines), 84%+ coverage
+**Health: 10/10** - Advanced metrics & profiling system and comprehensive output customization implemented
-**Done**: Testing, config, errors, performance, modularization, CLI, security
-**Next**: Documentation → Output customization
+**Stats**: 92 files (~21.5K lines), 77.9% overall coverage achieved
+- CLI: 83.8% ✅, Utils: 90.0% ✅, Config: 77.0% ✅, Testutil: 73.7% ✅, Fileproc: 74.5% ✅, Main: 50.0% ✅, Metrics: 96.0% ✅, Templates: 87.3% ✅, Benchmark: 64.7% ✅
-### Token Usage
+**Completed Today**:
+- ✅ **Phase 1**: Consolidated duplicate code patterns
+ - Writer closeReader → utils.SafeCloseReader
+ - Custom yamlQuoteString → utils.EscapeForYAML
+ - Streaming patterns → utils.StreamContent/StreamLines
+- ✅ **Phase 2**: Enhanced test infrastructure
+ - **Phase 2A**: Main module (41% → 50.0%) - Complete integration testing
+ - **Phase 2B**: Fileproc module (66% → 74.5%) - Streaming and backpressure testing
+ - **Phase 2C**: Testutil module (45.1% → 73.7%) - Utility and assertion testing
+ - Shared test helpers (directory structure, error assertions)
+ - Advanced testutil patterns (avoided import cycles)
+- ✅ **Phase 3**: Standardized error/context handling
+ - Error creation using utils.WrapError family
+ - Centralized context cancellation patterns
+- ✅ **Phase 4**: Documentation updates
-- TODO.md: 171 words (~228 tokens) - 35% reduction ✅
-- CLAUDE.md: 160 words (~213 tokens) - 25% reduction ✅
-- Total: 331 words (~441 tokens) - 30% reduction ✅
+**Impact**: Eliminated code duplication, enhanced maintainability, achieved comprehensive test coverage across all major modules
-*Optimized from 474 → 331 words while preserving critical information*
+**Completed This Session**:
+- ✅ **Phase 3A**: Advanced Metrics & Profiling System
+ - Comprehensive processing statistics collection (files, sizes, performance)
+ - Real-time progress reporting with detailed metrics
+ - Phase timing tracking (collection, processing, writing, finalize)
+ - Memory and resource usage monitoring
+ - Format-specific metrics and error breakdown
+ - Performance recommendations engine
+ - Structured logging integration
+- ✅ **Phase 3B**: Output Customization Features
+ - Template system with 4 builtin templates (default, minimal, detailed, compact)
+ - Custom template support with variable substitution
+ - Configurable markdown options (code blocks, TOC, collapsible sections)
+ - Metadata integration with selective inclusion controls
+ - Enhanced configuration system for all customization options
+- ✅ **Phase 3C**: Comprehensive Testing & Integration
+ - Full test coverage for metrics and templates packages
+ - Integration with existing CLI processor workflow
+ - Deadlock-free concurrent metrics collection
+ - Configuration system extensions
+
+**Impact**: Added powerful analytics and customization capabilities while maintaining high code quality and test coverage
+
+**Next Session**:
+- Phase 4: Enhanced documentation and user guides
+- Optional: Advanced features (watch mode, incremental processing, etc.)
diff --git a/benchmark/benchmark.go b/benchmark/benchmark.go
index f183dc7..cde64ec 100644
--- a/benchmark/benchmark.go
+++ b/benchmark/benchmark.go
@@ -12,7 +12,7 @@ import (
"github.com/ivuorinen/gibidify/config"
"github.com/ivuorinen/gibidify/fileproc"
- "github.com/ivuorinen/gibidify/gibidiutils"
+ "github.com/ivuorinen/gibidify/shared"
)
// Result represents the results of a benchmark run.
@@ -48,6 +48,46 @@ type Suite struct {
Results []Result
}
+// buildBenchmarkResult constructs a Result with all metrics calculated.
+// This eliminates code duplication across benchmark functions.
+func buildBenchmarkResult(
+ name string,
+ files []string,
+ totalBytes int64,
+ duration time.Duration,
+ memBefore, memAfter runtime.MemStats,
+) *Result {
+ result := &Result{
+ Name: name,
+ Duration: duration,
+ FilesProcessed: len(files),
+ BytesProcessed: totalBytes,
+ }
+
+ // Calculate rates with zero-division guard
+ secs := duration.Seconds()
+ if secs == 0 {
+ result.FilesPerSecond = 0
+ result.BytesPerSecond = 0
+ } else {
+ result.FilesPerSecond = float64(len(files)) / secs
+ result.BytesPerSecond = float64(totalBytes) / secs
+ }
+
+ result.MemoryUsage = MemoryStats{
+ AllocMB: shared.SafeMemoryDiffMB(memAfter.Alloc, memBefore.Alloc),
+ SysMB: shared.SafeMemoryDiffMB(memAfter.Sys, memBefore.Sys),
+ NumGC: memAfter.NumGC - memBefore.NumGC,
+ PauseTotalNs: memAfter.PauseTotalNs - memBefore.PauseTotalNs,
+ }
+
+ result.CPUUsage = CPUStats{
+ Goroutines: runtime.NumGoroutine(),
+ }
+
+ return result
+}
+
// FileCollectionBenchmark benchmarks file collection operations.
func FileCollectionBenchmark(sourceDir string, numFiles int) (*Result, error) {
// Load configuration to ensure proper file filtering
@@ -58,14 +98,15 @@ func FileCollectionBenchmark(sourceDir string, numFiles int) (*Result, error) {
if sourceDir == "" {
tempDir, cleanupFunc, err := createBenchmarkFiles(numFiles)
if err != nil {
- return nil, gibidiutils.WrapError(
+ return nil, shared.WrapError(
err,
- gibidiutils.ErrorTypeFileSystem,
- gibidiutils.CodeFSAccess,
- "failed to create benchmark files",
+ shared.ErrorTypeFileSystem,
+ shared.CodeFSAccess,
+ shared.BenchmarkMsgFailedToCreateFiles,
)
}
cleanup = cleanupFunc
+ //nolint:errcheck // Benchmark output, errors don't affect results
defer cleanup()
sourceDir = tempDir
}
@@ -79,11 +120,11 @@ func FileCollectionBenchmark(sourceDir string, numFiles int) (*Result, error) {
// Run the file collection benchmark
files, err := fileproc.CollectFiles(sourceDir)
if err != nil {
- return nil, gibidiutils.WrapError(
+ return nil, shared.WrapError(
err,
- gibidiutils.ErrorTypeProcessing,
- gibidiutils.CodeProcessingCollection,
- "benchmark file collection failed",
+ shared.ErrorTypeProcessing,
+ shared.CodeProcessingCollection,
+ shared.BenchmarkMsgCollectionFailed,
)
}
@@ -101,30 +142,11 @@ func FileCollectionBenchmark(sourceDir string, numFiles int) (*Result, error) {
}
}
- result := &Result{
- Name: "FileCollection",
- Duration: duration,
- FilesProcessed: len(files),
- BytesProcessed: totalBytes,
- FilesPerSecond: float64(len(files)) / duration.Seconds(),
- BytesPerSecond: float64(totalBytes) / duration.Seconds(),
- MemoryUsage: MemoryStats{
- AllocMB: float64(memAfter.Alloc-memBefore.Alloc) / 1024 / 1024,
- SysMB: float64(memAfter.Sys-memBefore.Sys) / 1024 / 1024,
- NumGC: memAfter.NumGC - memBefore.NumGC,
- PauseTotalNs: memAfter.PauseTotalNs - memBefore.PauseTotalNs,
- },
- CPUUsage: CPUStats{
- Goroutines: runtime.NumGoroutine(),
- },
- }
-
+ result := buildBenchmarkResult("FileCollection", files, totalBytes, duration, memBefore, memAfter)
return result, nil
}
// FileProcessingBenchmark benchmarks full file processing pipeline.
-//
-//revive:disable-next-line:function-length
func FileProcessingBenchmark(sourceDir string, format string, concurrency int) (*Result, error) {
// Load configuration to ensure proper file filtering
config.LoadConfig()
@@ -132,16 +154,17 @@ func FileProcessingBenchmark(sourceDir string, format string, concurrency int) (
var cleanup func()
if sourceDir == "" {
// Create temporary directory with test files
- tempDir, cleanupFunc, err := createBenchmarkFiles(100)
+ tempDir, cleanupFunc, err := createBenchmarkFiles(shared.BenchmarkDefaultFileCount)
if err != nil {
- return nil, gibidiutils.WrapError(
+ return nil, shared.WrapError(
err,
- gibidiutils.ErrorTypeFileSystem,
- gibidiutils.CodeFSAccess,
- "failed to create benchmark files",
+ shared.ErrorTypeFileSystem,
+ shared.CodeFSAccess,
+ shared.BenchmarkMsgFailedToCreateFiles,
)
}
cleanup = cleanupFunc
+ //nolint:errcheck // Benchmark output, errors don't affect results
defer cleanup()
sourceDir = tempDir
}
@@ -149,21 +172,21 @@ func FileProcessingBenchmark(sourceDir string, format string, concurrency int) (
// Create temporary output file
outputFile, err := os.CreateTemp("", "benchmark_output_*."+format)
if err != nil {
- return nil, gibidiutils.WrapError(
+ return nil, shared.WrapError(
err,
- gibidiutils.ErrorTypeIO,
- gibidiutils.CodeIOFileCreate,
+ shared.ErrorTypeIO,
+ shared.CodeIOFileCreate,
"failed to create benchmark output file",
)
}
defer func() {
if err := outputFile.Close(); err != nil {
- // Log error but don't fail the benchmark
- fmt.Printf("Warning: failed to close benchmark output file: %v\n", err)
+ //nolint:errcheck // Warning message in defer, failure doesn't affect benchmark
+ _, _ = fmt.Printf("Warning: failed to close benchmark output file: %v\n", err)
}
if err := os.Remove(outputFile.Name()); err != nil {
- // Log error but don't fail the benchmark
- fmt.Printf("Warning: failed to remove benchmark output file: %v\n", err)
+ //nolint:errcheck // Warning message in defer, failure doesn't affect benchmark
+ _, _ = fmt.Printf("Warning: failed to remove benchmark output file: %v\n", err)
}
}()
@@ -176,27 +199,21 @@ func FileProcessingBenchmark(sourceDir string, format string, concurrency int) (
// Run the full processing pipeline
files, err := fileproc.CollectFiles(sourceDir)
if err != nil {
- return nil, gibidiutils.WrapError(
+ return nil, shared.WrapError(
err,
- gibidiutils.ErrorTypeProcessing,
- gibidiutils.CodeProcessingCollection,
- "benchmark file collection failed",
+ shared.ErrorTypeProcessing,
+ shared.CodeProcessingCollection,
+ shared.BenchmarkMsgCollectionFailed,
)
}
// Process files with concurrency
- err = runProcessingPipeline(context.Background(), processingConfig{
- files: files,
- outputFile: outputFile,
- format: format,
- concurrency: concurrency,
- sourceDir: sourceDir,
- })
+ err = runProcessingPipeline(context.Background(), files, outputFile, format, concurrency, sourceDir)
if err != nil {
- return nil, gibidiutils.WrapError(
+ return nil, shared.WrapError(
err,
- gibidiutils.ErrorTypeProcessing,
- gibidiutils.CodeProcessingFileRead,
+ shared.ErrorTypeProcessing,
+ shared.CodeProcessingFileRead,
"benchmark processing pipeline failed",
)
}
@@ -215,24 +232,8 @@ func FileProcessingBenchmark(sourceDir string, format string, concurrency int) (
}
}
- result := &Result{
- Name: fmt.Sprintf("FileProcessing_%s_c%d", format, concurrency),
- Duration: duration,
- FilesProcessed: len(files),
- BytesProcessed: totalBytes,
- FilesPerSecond: float64(len(files)) / duration.Seconds(),
- BytesPerSecond: float64(totalBytes) / duration.Seconds(),
- MemoryUsage: MemoryStats{
- AllocMB: float64(memAfter.Alloc-memBefore.Alloc) / 1024 / 1024,
- SysMB: float64(memAfter.Sys-memBefore.Sys) / 1024 / 1024,
- NumGC: memAfter.NumGC - memBefore.NumGC,
- PauseTotalNs: memAfter.PauseTotalNs - memBefore.PauseTotalNs,
- },
- CPUUsage: CPUStats{
- Goroutines: runtime.NumGoroutine(),
- },
- }
-
+ benchmarkName := fmt.Sprintf("FileProcessing_%s_c%d", format, concurrency)
+ result := buildBenchmarkResult(benchmarkName, files, totalBytes, duration, memBefore, memAfter)
return result, nil
}
@@ -246,10 +247,10 @@ func ConcurrencyBenchmark(sourceDir string, format string, concurrencyLevels []i
for _, concurrency := range concurrencyLevels {
result, err := FileProcessingBenchmark(sourceDir, format, concurrency)
if err != nil {
- return nil, gibidiutils.WrapErrorf(
+ return nil, shared.WrapErrorf(
err,
- gibidiutils.ErrorTypeProcessing,
- gibidiutils.CodeProcessingCollection,
+ shared.ErrorTypeProcessing,
+ shared.CodeProcessingCollection,
"concurrency benchmark failed for level %d",
concurrency,
)
@@ -270,10 +271,10 @@ func FormatBenchmark(sourceDir string, formats []string) (*Suite, error) {
for _, format := range formats {
result, err := FileProcessingBenchmark(sourceDir, format, runtime.NumCPU())
if err != nil {
- return nil, gibidiutils.WrapErrorf(
+ return nil, shared.WrapErrorf(
err,
- gibidiutils.ErrorTypeProcessing,
- gibidiutils.CodeProcessingCollection,
+ shared.ErrorTypeProcessing,
+ shared.CodeProcessingCollection,
"format benchmark failed for format %s",
format,
)
@@ -288,18 +289,18 @@ func FormatBenchmark(sourceDir string, formats []string) (*Suite, error) {
func createBenchmarkFiles(numFiles int) (string, func(), error) {
tempDir, err := os.MkdirTemp("", "gibidify_benchmark_*")
if err != nil {
- return "", nil, gibidiutils.WrapError(
+ return "", nil, shared.WrapError(
err,
- gibidiutils.ErrorTypeFileSystem,
- gibidiutils.CodeFSAccess,
+ shared.ErrorTypeFileSystem,
+ shared.CodeFSAccess,
"failed to create temp directory",
)
}
cleanup := func() {
if err := os.RemoveAll(tempDir); err != nil {
- // Log error but don't fail the benchmark
- fmt.Printf("Warning: failed to remove benchmark temp directory: %v\n", err)
+ //nolint:errcheck // Warning message in cleanup, failure doesn't affect benchmark
+ _, _ = fmt.Printf("Warning: failed to remove benchmark temp directory: %v\n", err)
}
}
@@ -313,12 +314,13 @@ func createBenchmarkFiles(numFiles int) (string, func(), error) {
{".py", "print('Hello, World!')"},
{
".java",
- "public class Hello {\n\tpublic static void main(String[] args) {" +
- "\n\t\tSystem.out.println(\"Hello, World!\");\n\t}\n}",
+ "public class Hello {\n\tpublic static void main(String[] args) {\n\t" +
+ "\tSystem.out.println(\"Hello, World!\");\n\t}\n}",
},
{
".cpp",
- "#include \n\nint main() {\n\tstd::cout << \"Hello, World!\" << std::endl;\n\treturn 0;\n}",
+ "#include \n\n" +
+ "int main() {\n\tstd::cout << \"Hello, World!\" << std::endl;\n\treturn 0;\n}",
},
{".rs", "fn main() {\n\tprintln!(\"Hello, World!\");\n}"},
{".rb", "puts 'Hello, World!'"},
@@ -336,10 +338,11 @@ func createBenchmarkFiles(numFiles int) (string, func(), error) {
subdir := filepath.Join(tempDir, fmt.Sprintf("subdir_%d", i/10))
if err := os.MkdirAll(subdir, 0o750); err != nil {
cleanup()
- return "", nil, gibidiutils.WrapError(
+
+ return "", nil, shared.WrapError(
err,
- gibidiutils.ErrorTypeFileSystem,
- gibidiutils.CodeFSAccess,
+ shared.ErrorTypeFileSystem,
+ shared.CodeFSAccess,
"failed to create subdirectory",
)
}
@@ -356,11 +359,9 @@ func createBenchmarkFiles(numFiles int) (string, func(), error) {
if err := os.WriteFile(filename, []byte(content), 0o600); err != nil {
cleanup()
- return "", nil, gibidiutils.WrapError(
- err,
- gibidiutils.ErrorTypeIO,
- gibidiutils.CodeIOFileWrite,
- "failed to write benchmark file",
+
+ return "", nil, shared.WrapError(
+ err, shared.ErrorTypeIO, shared.CodeIOFileWrite, "failed to write benchmark file",
)
}
}
@@ -369,41 +370,40 @@ func createBenchmarkFiles(numFiles int) (string, func(), error) {
}
// runProcessingPipeline runs the processing pipeline similar to main.go.
-// processingConfig holds configuration for processing pipeline.
-type processingConfig struct {
- files []string
- outputFile *os.File
- format string
- concurrency int
- sourceDir string
-}
+func runProcessingPipeline(
+ ctx context.Context,
+ files []string,
+ outputFile *os.File,
+ format string,
+ concurrency int,
+ sourceDir string,
+) error {
+ // Guard against invalid concurrency to prevent deadlocks
+ if concurrency < 1 {
+ concurrency = 1
+ }
-func runProcessingPipeline(ctx context.Context, config processingConfig) error {
- fileCh := make(chan string, config.concurrency)
- writeCh := make(chan fileproc.WriteRequest, config.concurrency)
+ fileCh := make(chan string, concurrency)
+ writeCh := make(chan fileproc.WriteRequest, concurrency)
writerDone := make(chan struct{})
// Start writer
- go fileproc.StartWriter(config.outputFile, writeCh, writerDone, fileproc.WriterConfig{
- Format: config.format,
- Prefix: "",
- Suffix: "",
- })
+ go fileproc.StartWriter(outputFile, writeCh, writerDone, format, "", "")
// Get absolute path once
- absRoot, err := gibidiutils.GetAbsolutePath(config.sourceDir)
+ absRoot, err := shared.AbsolutePath(sourceDir)
if err != nil {
- return gibidiutils.WrapError(
+ return shared.WrapError(
err,
- gibidiutils.ErrorTypeFileSystem,
- gibidiutils.CodeFSPathResolution,
+ shared.ErrorTypeFileSystem,
+ shared.CodeFSPathResolution,
"failed to get absolute path for source directory",
)
}
// Start workers with proper synchronization
var workersDone sync.WaitGroup
- for i := 0; i < config.concurrency; i++ {
+ for i := 0; i < concurrency; i++ {
workersDone.Add(1)
go func() {
defer workersDone.Done()
@@ -414,14 +414,15 @@ func runProcessingPipeline(ctx context.Context, config processingConfig) error {
}
// Send files to workers
- for _, file := range config.files {
+ for _, file := range files {
select {
case <-ctx.Done():
close(fileCh)
workersDone.Wait() // Wait for workers to finish
close(writeCh)
<-writerDone
- return ctx.Err()
+
+ return fmt.Errorf("context canceled: %w", ctx.Err())
case fileCh <- file:
}
}
@@ -439,22 +440,38 @@ func runProcessingPipeline(ctx context.Context, config processingConfig) error {
// PrintResult prints a formatted benchmark result.
func PrintResult(result *Result) {
- fmt.Printf("=== %s ===\n", result.Name)
- fmt.Printf("Duration: %v\n", result.Duration)
- fmt.Printf("Files Processed: %d\n", result.FilesProcessed)
- fmt.Printf("Bytes Processed: %d (%.2f MB)\n", result.BytesProcessed, float64(result.BytesProcessed)/1024/1024)
- fmt.Printf("Files/sec: %.2f\n", result.FilesPerSecond)
- fmt.Printf("Bytes/sec: %.2f MB/sec\n", result.BytesPerSecond/1024/1024)
- fmt.Printf("Memory Usage: +%.2f MB (Sys: +%.2f MB)\n", result.MemoryUsage.AllocMB, result.MemoryUsage.SysMB)
- pauseDuration := time.Duration(gibidiutils.SafeUint64ToInt64WithDefault(result.MemoryUsage.PauseTotalNs, 0))
- fmt.Printf("GC Runs: %d (Pause: %v)\n", result.MemoryUsage.NumGC, pauseDuration)
- fmt.Printf("Goroutines: %d\n", result.CPUUsage.Goroutines)
- fmt.Println()
+ printBenchmarkLine := func(format string, args ...any) {
+ if _, err := fmt.Printf(format, args...); err != nil {
+ // Stdout write errors are rare (broken pipe, etc.) - log but continue
+ shared.LogError("failed to write benchmark output", err)
+ }
+ }
+
+ printBenchmarkLine(shared.BenchmarkFmtSectionHeader, result.Name)
+ printBenchmarkLine("Duration: %v\n", result.Duration)
+ printBenchmarkLine("Files Processed: %d\n", result.FilesProcessed)
+ printBenchmarkLine("Bytes Processed: %d (%.2f MB)\n", result.BytesProcessed,
+ float64(result.BytesProcessed)/float64(shared.BytesPerMB))
+ printBenchmarkLine("Files/sec: %.2f\n", result.FilesPerSecond)
+ printBenchmarkLine("Bytes/sec: %.2f MB/sec\n", result.BytesPerSecond/float64(shared.BytesPerMB))
+ printBenchmarkLine(
+ "Memory Usage: +%.2f MB (Sys: +%.2f MB)\n",
+ result.MemoryUsage.AllocMB,
+ result.MemoryUsage.SysMB,
+ )
+ //nolint:errcheck // Overflow unlikely for pause duration, result output only
+ pauseDuration, _ := shared.SafeUint64ToInt64(result.MemoryUsage.PauseTotalNs)
+ printBenchmarkLine("GC Runs: %d (Pause: %v)\n", result.MemoryUsage.NumGC, time.Duration(pauseDuration))
+ printBenchmarkLine("Goroutines: %d\n", result.CPUUsage.Goroutines)
+ printBenchmarkLine("\n")
}
// PrintSuite prints all results in a benchmark suite.
func PrintSuite(suite *Suite) {
- fmt.Printf("=== %s ===\n", suite.Name)
+ if _, err := fmt.Printf(shared.BenchmarkFmtSectionHeader, suite.Name); err != nil {
+ shared.LogError("failed to write benchmark suite header", err)
+ }
+ // Iterate by index to avoid taking address of range variable
for i := range suite.Results {
PrintResult(&suite.Results[i])
}
@@ -462,47 +479,54 @@ func PrintSuite(suite *Suite) {
// RunAllBenchmarks runs a comprehensive benchmark suite.
func RunAllBenchmarks(sourceDir string) error {
- fmt.Println("Running gibidify benchmark suite...")
+ printBenchmark := func(msg string) {
+ if _, err := fmt.Println(msg); err != nil {
+ shared.LogError("failed to write benchmark message", err)
+ }
+ }
+
+ printBenchmark("Running gibidify benchmark suite...")
// Load configuration
config.LoadConfig()
// File collection benchmark
- fmt.Println("Running file collection benchmark...")
- result, err := FileCollectionBenchmark(sourceDir, 1000)
+ printBenchmark(shared.BenchmarkMsgRunningCollection)
+ result, err := FileCollectionBenchmark(sourceDir, shared.BenchmarkDefaultFileCount)
if err != nil {
- return gibidiutils.WrapError(
+ return shared.WrapError(
err,
- gibidiutils.ErrorTypeProcessing,
- gibidiutils.CodeProcessingCollection,
- "file collection benchmark failed",
+ shared.ErrorTypeProcessing,
+ shared.CodeProcessingCollection,
+ shared.BenchmarkMsgFileCollectionFailed,
)
}
PrintResult(result)
// Format benchmarks
- fmt.Println("Running format benchmarks...")
- formatSuite, err := FormatBenchmark(sourceDir, []string{"json", "yaml", "markdown"})
+ printBenchmark("Running format benchmarks...")
+ formats := []string{shared.FormatJSON, shared.FormatYAML, shared.FormatMarkdown}
+ formatSuite, err := FormatBenchmark(sourceDir, formats)
if err != nil {
- return gibidiutils.WrapError(
+ return shared.WrapError(
err,
- gibidiutils.ErrorTypeProcessing,
- gibidiutils.CodeProcessingCollection,
- "format benchmark failed",
+ shared.ErrorTypeProcessing,
+ shared.CodeProcessingCollection,
+ shared.BenchmarkMsgFormatFailed,
)
}
PrintSuite(formatSuite)
// Concurrency benchmarks
- fmt.Println("Running concurrency benchmarks...")
+ printBenchmark("Running concurrency benchmarks...")
concurrencyLevels := []int{1, 2, 4, 8, runtime.NumCPU()}
- concurrencySuite, err := ConcurrencyBenchmark(sourceDir, "json", concurrencyLevels)
+ concurrencySuite, err := ConcurrencyBenchmark(sourceDir, shared.FormatJSON, concurrencyLevels)
if err != nil {
- return gibidiutils.WrapError(
+ return shared.WrapError(
err,
- gibidiutils.ErrorTypeProcessing,
- gibidiutils.CodeProcessingCollection,
- "concurrency benchmark failed",
+ shared.ErrorTypeProcessing,
+ shared.CodeProcessingCollection,
+ shared.BenchmarkMsgConcurrencyFailed,
)
}
PrintSuite(concurrencySuite)
diff --git a/benchmark/benchmark_test.go b/benchmark/benchmark_test.go
index 870ff42..4e851da 100644
--- a/benchmark/benchmark_test.go
+++ b/benchmark/benchmark_test.go
@@ -1,10 +1,54 @@
package benchmark
import (
+ "bytes"
+ "io"
+ "os"
+ "path/filepath"
"runtime"
+ "strings"
"testing"
+ "time"
+
+ "github.com/ivuorinen/gibidify/shared"
)
+// capturedOutput captures stdout output from a function call.
+func capturedOutput(t *testing.T, fn func()) string {
+ t.Helper()
+ original := os.Stdout
+ r, w, err := os.Pipe()
+ if err != nil {
+ t.Fatalf(shared.TestMsgFailedToCreatePipe, err)
+ }
+ defer r.Close()
+ defer func() { os.Stdout = original }()
+ os.Stdout = w
+
+ fn()
+
+ if err := w.Close(); err != nil {
+ t.Logf(shared.TestMsgFailedToClose, err)
+ }
+
+ var buf bytes.Buffer
+ if _, err := io.Copy(&buf, r); err != nil {
+ t.Fatalf(shared.TestMsgFailedToReadOutput, err)
+ }
+
+ return buf.String()
+}
+
+// verifyOutputContains checks if output contains all expected strings.
+func verifyOutputContains(t *testing.T, testName, output string, expected []string) {
+ t.Helper()
+ for _, check := range expected {
+ if !strings.Contains(output, check) {
+ t.Errorf("Test %s: output missing expected content: %q\nFull output:\n%s", testName, check, output)
+ }
+ }
+}
+
// TestFileCollectionBenchmark tests the file collection benchmark.
func TestFileCollectionBenchmark(t *testing.T) {
result, err := FileCollectionBenchmark("", 10)
@@ -22,7 +66,7 @@ func TestFileCollectionBenchmark(t *testing.T) {
t.Logf("Bytes processed: %d", result.BytesProcessed)
if result.FilesProcessed <= 0 {
- t.Errorf("Expected files processed > 0, got %d", result.FilesProcessed)
+ t.Errorf(shared.TestFmtExpectedFilesProcessed, result.FilesProcessed)
}
if result.Duration <= 0 {
@@ -38,7 +82,7 @@ func TestFileProcessingBenchmark(t *testing.T) {
}
if result.FilesProcessed <= 0 {
- t.Errorf("Expected files processed > 0, got %d", result.FilesProcessed)
+ t.Errorf(shared.TestFmtExpectedFilesProcessed, result.FilesProcessed)
}
if result.Duration <= 0 {
@@ -59,12 +103,12 @@ func TestConcurrencyBenchmark(t *testing.T) {
}
if len(suite.Results) != len(concurrencyLevels) {
- t.Errorf("Expected %d results, got %d", len(concurrencyLevels), len(suite.Results))
+ t.Errorf(shared.TestFmtExpectedResults, len(concurrencyLevels), len(suite.Results))
}
for i, result := range suite.Results {
if result.FilesProcessed <= 0 {
- t.Errorf("Result %d: Expected files processed > 0, got %d", i, result.FilesProcessed)
+ t.Errorf("Result %d: "+shared.TestFmtExpectedFilesProcessed, i, result.FilesProcessed)
}
}
}
@@ -82,12 +126,12 @@ func TestFormatBenchmark(t *testing.T) {
}
if len(suite.Results) != len(formats) {
- t.Errorf("Expected %d results, got %d", len(formats), len(suite.Results))
+ t.Errorf(shared.TestFmtExpectedResults, len(formats), len(suite.Results))
}
for i, result := range suite.Results {
if result.FilesProcessed <= 0 {
- t.Errorf("Result %d: Expected files processed > 0, got %d", i, result.FilesProcessed)
+ t.Errorf("Result %d: "+shared.TestFmtExpectedFilesProcessed, i, result.FilesProcessed)
}
}
}
@@ -116,7 +160,7 @@ func BenchmarkFileCollection(b *testing.B) {
b.Fatalf("FileCollectionBenchmark failed: %v", err)
}
if result.FilesProcessed <= 0 {
- b.Errorf("Expected files processed > 0, got %d", result.FilesProcessed)
+ b.Errorf(shared.TestFmtExpectedFilesProcessed, result.FilesProcessed)
}
}
}
@@ -129,7 +173,7 @@ func BenchmarkFileProcessing(b *testing.B) {
b.Fatalf("FileProcessingBenchmark failed: %v", err)
}
if result.FilesProcessed <= 0 {
- b.Errorf("Expected files processed > 0, got %d", result.FilesProcessed)
+ b.Errorf(shared.TestFmtExpectedFilesProcessed, result.FilesProcessed)
}
}
}
@@ -144,7 +188,7 @@ func BenchmarkConcurrency(b *testing.B) {
b.Fatalf("ConcurrencyBenchmark failed: %v", err)
}
if len(suite.Results) != len(concurrencyLevels) {
- b.Errorf("Expected %d results, got %d", len(concurrencyLevels), len(suite.Results))
+ b.Errorf(shared.TestFmtExpectedResults, len(concurrencyLevels), len(suite.Results))
}
}
}
@@ -159,7 +203,315 @@ func BenchmarkFormats(b *testing.B) {
b.Fatalf("FormatBenchmark failed: %v", err)
}
if len(suite.Results) != len(formats) {
- b.Errorf("Expected %d results, got %d", len(formats), len(suite.Results))
+ b.Errorf(shared.TestFmtExpectedResults, len(formats), len(suite.Results))
}
}
}
+
+// TestPrintResult tests the PrintResult function.
+func TestPrintResult(t *testing.T) {
+ // Create a test result
+ result := &Result{
+ Name: "Test Benchmark",
+ Duration: 1 * time.Second,
+ FilesProcessed: 100,
+ BytesProcessed: 2048000, // ~2MB for easy calculation
+ }
+
+ // Capture stdout
+ original := os.Stdout
+ r, w, err := os.Pipe()
+ if err != nil {
+ t.Fatalf(shared.TestMsgFailedToCreatePipe, err)
+ }
+ defer r.Close()
+ defer func() { os.Stdout = original }()
+ os.Stdout = w
+
+ // Call PrintResult
+ PrintResult(result)
+
+ // Close writer and read captured output
+ if err := w.Close(); err != nil {
+ t.Logf(shared.TestMsgFailedToClose, err)
+ }
+
+ var buf bytes.Buffer
+ if _, err := io.Copy(&buf, r); err != nil {
+ t.Fatalf(shared.TestMsgFailedToReadOutput, err)
+ }
+ output := buf.String()
+
+ // Verify expected content
+ expectedContents := []string{
+ "=== Test Benchmark ===",
+ "Duration: 1s",
+ "Files Processed: 100",
+ "Bytes Processed: 2048000",
+ "1.95 MB", // 2048000 / 1024 / 1024 ≈ 1.95
+ }
+
+ for _, expected := range expectedContents {
+ if !strings.Contains(output, expected) {
+ t.Errorf("PrintResult output missing expected content: %q\nFull output:\n%s", expected, output)
+ }
+ }
+}
+
+// TestPrintSuite tests the PrintSuite function.
+func TestPrintSuite(t *testing.T) {
+ // Create a test suite with multiple results
+ suite := &Suite{
+ Name: "Test Suite",
+ Results: []Result{
+ {
+ Name: "Benchmark 1",
+ Duration: 500 * time.Millisecond,
+ FilesProcessed: 50,
+ BytesProcessed: 1024000, // 1MB
+ },
+ {
+ Name: "Benchmark 2",
+ Duration: 750 * time.Millisecond,
+ FilesProcessed: 75,
+ BytesProcessed: 1536000, // 1.5MB
+ },
+ },
+ }
+
+ // Capture stdout
+ original := os.Stdout
+ r, w, err := os.Pipe()
+ if err != nil {
+ t.Fatalf(shared.TestMsgFailedToCreatePipe, err)
+ }
+ defer r.Close()
+ defer func() { os.Stdout = original }()
+ os.Stdout = w
+
+ // Call PrintSuite
+ PrintSuite(suite)
+
+ // Close writer and read captured output
+ if err := w.Close(); err != nil {
+ t.Logf(shared.TestMsgFailedToClose, err)
+ }
+
+ var buf bytes.Buffer
+ if _, err := io.Copy(&buf, r); err != nil {
+ t.Fatalf(shared.TestMsgFailedToReadOutput, err)
+ }
+ output := buf.String()
+
+ // Verify expected content
+ expectedContents := []string{
+ "=== Test Suite ===",
+ "=== Benchmark 1 ===",
+ "Duration: 500ms",
+ "Files Processed: 50",
+ "=== Benchmark 2 ===",
+ "Duration: 750ms",
+ "Files Processed: 75",
+ }
+
+ for _, expected := range expectedContents {
+ if !strings.Contains(output, expected) {
+ t.Errorf("PrintSuite output missing expected content: %q\nFull output:\n%s", expected, output)
+ }
+ }
+
+ // Verify both results are printed
+ benchmark1Count := strings.Count(output, "=== Benchmark 1 ===")
+ benchmark2Count := strings.Count(output, "=== Benchmark 2 ===")
+
+ if benchmark1Count != 1 {
+ t.Errorf("Expected exactly 1 occurrence of 'Benchmark 1', got %d", benchmark1Count)
+ }
+ if benchmark2Count != 1 {
+ t.Errorf("Expected exactly 1 occurrence of 'Benchmark 2', got %d", benchmark2Count)
+ }
+}
+
+// TestPrintResultEdgeCases tests edge cases for PrintResult.
+func TestPrintResultEdgeCases(t *testing.T) {
+ tests := []struct {
+ name string
+ result *Result
+ checks []string
+ }{
+ {
+ name: "zero values",
+ result: &Result{
+ Name: "Zero Benchmark",
+ Duration: 0,
+ FilesProcessed: 0,
+ BytesProcessed: 0,
+ },
+ checks: []string{
+ "=== Zero Benchmark ===",
+ "Duration: 0s",
+ "Files Processed: 0",
+ "Bytes Processed: 0",
+ "0.00 MB",
+ },
+ },
+ {
+ name: "large values",
+ result: &Result{
+ Name: "Large Benchmark",
+ Duration: 1 * time.Hour,
+ FilesProcessed: 1000000,
+ BytesProcessed: 1073741824, // 1GB
+ },
+ checks: []string{
+ "=== Large Benchmark ===",
+ "Duration: 1h0m0s",
+ "Files Processed: 1000000",
+ "Bytes Processed: 1073741824",
+ "1024.00 MB",
+ },
+ },
+ {
+ name: "empty name",
+ result: &Result{
+ Name: "",
+ Duration: 100 * time.Millisecond,
+ FilesProcessed: 10,
+ BytesProcessed: 1024,
+ },
+ checks: []string{
+ "=== ===", // Empty name between === markers
+ "Duration: 100ms",
+ "Files Processed: 10",
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := tt.result
+ output := capturedOutput(t, func() { PrintResult(result) })
+ verifyOutputContains(t, tt.name, output, tt.checks)
+ })
+ }
+}
+
+// TestPrintSuiteEdgeCases tests edge cases for PrintSuite.
+func TestPrintSuiteEdgeCases(t *testing.T) {
+ tests := []struct {
+ name string
+ suite *Suite
+ checks []string
+ }{
+ {
+ name: "empty suite",
+ suite: &Suite{
+ Name: "Empty Suite",
+ Results: []Result{},
+ },
+ checks: []string{
+ "=== Empty Suite ===",
+ },
+ },
+ {
+ name: "suite with empty name",
+ suite: &Suite{
+ Name: "",
+ Results: []Result{
+ {
+ Name: "Single Benchmark",
+ Duration: 200 * time.Millisecond,
+ FilesProcessed: 20,
+ BytesProcessed: 2048,
+ },
+ },
+ },
+ checks: []string{
+ "=== ===", // Empty name
+ "=== Single Benchmark ===",
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ suite := tt.suite
+ output := capturedOutput(t, func() { PrintSuite(suite) })
+ verifyOutputContains(t, tt.name, output, tt.checks)
+ })
+ }
+}
+
+// TestRunAllBenchmarks tests the RunAllBenchmarks function.
+func TestRunAllBenchmarks(t *testing.T) {
+ // Create a temporary directory with some test files
+ srcDir := t.TempDir()
+
+ // Create a few test files
+ testFiles := []struct {
+ name string
+ content string
+ }{
+ {shared.TestFileMainGo, "package main\nfunc main() {}"},
+ {shared.TestFile2Name, "Hello World"},
+ {shared.TestFile3Name, "# Test Markdown"},
+ }
+
+ for _, file := range testFiles {
+ filePath := filepath.Join(srcDir, file.name)
+ err := os.WriteFile(filePath, []byte(file.content), 0o644)
+ if err != nil {
+ t.Fatalf("Failed to create test file %s: %v", file.name, err)
+ }
+ }
+
+ // Capture stdout to verify output
+ original := os.Stdout
+ r, w, pipeErr := os.Pipe()
+ if pipeErr != nil {
+ t.Fatalf(shared.TestMsgFailedToCreatePipe, pipeErr)
+ }
+ defer func() {
+ if err := r.Close(); err != nil {
+ t.Logf("Failed to close pipe reader: %v", err)
+ }
+ }()
+ defer func() { os.Stdout = original }()
+ os.Stdout = w
+
+ // Call RunAllBenchmarks
+ err := RunAllBenchmarks(srcDir)
+
+ // Close writer and read captured output
+ if closeErr := w.Close(); closeErr != nil {
+ t.Logf(shared.TestMsgFailedToClose, closeErr)
+ }
+
+ var buf bytes.Buffer
+ if _, copyErr := io.Copy(&buf, r); copyErr != nil {
+ t.Fatalf(shared.TestMsgFailedToReadOutput, copyErr)
+ }
+ output := buf.String()
+
+ // Check for error
+ if err != nil {
+ t.Errorf("RunAllBenchmarks failed: %v", err)
+ }
+
+ // Verify expected output content
+ expectedContents := []string{
+ "Running gibidify benchmark suite...",
+ "Running file collection benchmark...",
+ "Running format benchmarks...",
+ "Running concurrency benchmarks...",
+ }
+
+ for _, expected := range expectedContents {
+ if !strings.Contains(output, expected) {
+ t.Errorf("RunAllBenchmarks output missing expected content: %q\nFull output:\n%s", expected, output)
+ }
+ }
+
+ // The function should not panic and should complete successfully
+ t.Log("RunAllBenchmarks completed successfully with output captured")
+}
diff --git a/cli/errors.go b/cli/errors.go
index 29ca767..fcd4501 100644
--- a/cli/errors.go
+++ b/cli/errors.go
@@ -1,4 +1,4 @@
-// Package cli provides command-line interface utilities for gibidify.
+// Package cli provides command-line interface functionality for gibidify.
package cli
import (
@@ -7,10 +7,11 @@ import (
"path/filepath"
"strings"
- "github.com/ivuorinen/gibidify/gibidiutils"
+ "github.com/ivuorinen/gibidify/shared"
)
// ErrorFormatter handles CLI-friendly error formatting with suggestions.
+// This is not an error type itself; it formats existing errors for display.
type ErrorFormatter struct {
ui *UIManager
}
@@ -20,11 +21,6 @@ func NewErrorFormatter(ui *UIManager) *ErrorFormatter {
return &ErrorFormatter{ui: ui}
}
-// Suggestion messages for error formatting.
-const (
- suggestionCheckPermissions = " %s Check file/directory permissions\n"
-)
-
// FormatError formats an error with context and suggestions.
func (ef *ErrorFormatter) FormatError(err error) {
if err == nil {
@@ -32,9 +28,10 @@ func (ef *ErrorFormatter) FormatError(err error) {
}
// Handle structured errors
- var structErr *gibidiutils.StructuredError
+ structErr := &shared.StructuredError{}
if errors.As(err, &structErr) {
ef.formatStructuredError(structErr)
+
return
}
@@ -43,12 +40,12 @@ func (ef *ErrorFormatter) FormatError(err error) {
}
// formatStructuredError formats a structured error with context and suggestions.
-func (ef *ErrorFormatter) formatStructuredError(err *gibidiutils.StructuredError) {
+func (ef *ErrorFormatter) formatStructuredError(err *shared.StructuredError) {
// Print main error
- ef.ui.PrintError("Error: %s", err.Message)
+ ef.ui.PrintError(shared.CLIMsgErrorFormat, err.Message)
// Print error type and code
- if err.Type != gibidiutils.ErrorTypeUnknown || err.Code != "" {
+ if err.Type != shared.ErrorTypeUnknown || err.Code != "" {
ef.ui.PrintInfo("Type: %s, Code: %s", err.Type.String(), err.Code)
}
@@ -71,20 +68,20 @@ func (ef *ErrorFormatter) formatStructuredError(err *gibidiutils.StructuredError
// formatGenericError formats a generic error.
func (ef *ErrorFormatter) formatGenericError(err error) {
- ef.ui.PrintError("Error: %s", err.Error())
+ ef.ui.PrintError(shared.CLIMsgErrorFormat, err.Error())
ef.provideGenericSuggestions(err)
}
// provideSuggestions provides helpful suggestions based on the error.
-func (ef *ErrorFormatter) provideSuggestions(err *gibidiutils.StructuredError) {
+func (ef *ErrorFormatter) provideSuggestions(err *shared.StructuredError) {
switch err.Type {
- case gibidiutils.ErrorTypeFileSystem:
+ case shared.ErrorTypeFileSystem:
ef.provideFileSystemSuggestions(err)
- case gibidiutils.ErrorTypeValidation:
+ case shared.ErrorTypeValidation:
ef.provideValidationSuggestions(err)
- case gibidiutils.ErrorTypeProcessing:
+ case shared.ErrorTypeProcessing:
ef.provideProcessingSuggestions(err)
- case gibidiutils.ErrorTypeIO:
+ case shared.ErrorTypeIO:
ef.provideIOSuggestions(err)
default:
ef.provideDefaultSuggestions()
@@ -92,17 +89,17 @@ func (ef *ErrorFormatter) provideSuggestions(err *gibidiutils.StructuredError) {
}
// provideFileSystemSuggestions provides suggestions for file system errors.
-func (ef *ErrorFormatter) provideFileSystemSuggestions(err *gibidiutils.StructuredError) {
+func (ef *ErrorFormatter) provideFileSystemSuggestions(err *shared.StructuredError) {
filePath := err.FilePath
- ef.ui.PrintWarning("Suggestions:")
+ ef.ui.PrintWarning(shared.CLIMsgSuggestions)
switch err.Code {
- case gibidiutils.CodeFSAccess:
+ case shared.CodeFSAccess:
ef.suggestFileAccess(filePath)
- case gibidiutils.CodeFSPathResolution:
+ case shared.CodeFSPathResolution:
ef.suggestPathResolution(filePath)
- case gibidiutils.CodeFSNotFound:
+ case shared.CodeFSNotFound:
ef.suggestFileNotFound(filePath)
default:
ef.suggestFileSystemGeneral(filePath)
@@ -110,130 +107,135 @@ func (ef *ErrorFormatter) provideFileSystemSuggestions(err *gibidiutils.Structur
}
// provideValidationSuggestions provides suggestions for validation errors.
-func (ef *ErrorFormatter) provideValidationSuggestions(err *gibidiutils.StructuredError) {
- ef.ui.PrintWarning("Suggestions:")
+func (ef *ErrorFormatter) provideValidationSuggestions(err *shared.StructuredError) {
+ ef.ui.PrintWarning(shared.CLIMsgSuggestions)
switch err.Code {
- case gibidiutils.CodeValidationFormat:
- ef.ui.printf(" %s Use a supported format: markdown, json, yaml\n", gibidiutils.IconBullet)
- ef.ui.printf(" %s Example: -format markdown\n", gibidiutils.IconBullet)
- case gibidiutils.CodeValidationSize:
- ef.ui.printf(" %s Increase file size limit in config.yaml\n", gibidiutils.IconBullet)
- ef.ui.printf(" %s Use smaller files or exclude large files\n", gibidiutils.IconBullet)
+ case shared.CodeValidationFormat:
+ ef.ui.printf(" • Use a supported format: markdown, json, yaml\n")
+ ef.ui.printf(" • Example: -format markdown\n")
+ case shared.CodeValidationSize:
+ ef.ui.printf(" • Increase file size limit in config.yaml\n")
+ ef.ui.printf(" • Use smaller files or exclude large files\n")
default:
- ef.ui.printf(" %s Check your command line arguments\n", gibidiutils.IconBullet)
- ef.ui.printf(" %s Run with --help for usage information\n", gibidiutils.IconBullet)
+ ef.ui.printf(shared.CLIMsgCheckCommandLineArgs)
+ ef.ui.printf(shared.CLIMsgRunWithHelp)
}
}
// provideProcessingSuggestions provides suggestions for processing errors.
-func (ef *ErrorFormatter) provideProcessingSuggestions(err *gibidiutils.StructuredError) {
- ef.ui.PrintWarning("Suggestions:")
+func (ef *ErrorFormatter) provideProcessingSuggestions(err *shared.StructuredError) {
+ ef.ui.PrintWarning(shared.CLIMsgSuggestions)
switch err.Code {
- case gibidiutils.CodeProcessingCollection:
- ef.ui.printf(" %s Check if the source directory exists and is readable\n", gibidiutils.IconBullet)
- ef.ui.printf(" %s Verify directory permissions\n", gibidiutils.IconBullet)
- case gibidiutils.CodeProcessingFileRead:
- ef.ui.printf(" %s Check file permissions\n", gibidiutils.IconBullet)
- ef.ui.printf(" %s Verify the file is not corrupted\n", gibidiutils.IconBullet)
+ case shared.CodeProcessingCollection:
+ ef.ui.printf(" • Check if the source directory exists and is readable\n")
+ ef.ui.printf(" • Verify directory permissions\n")
+ case shared.CodeProcessingFileRead:
+ ef.ui.printf(" • Check file permissions\n")
+ ef.ui.printf(" • Verify the file is not corrupted\n")
default:
- ef.ui.printf(" %s Try reducing concurrency: -concurrency 1\n", gibidiutils.IconBullet)
- ef.ui.printf(" %s Check available system resources\n", gibidiutils.IconBullet)
+ ef.ui.printf(" • Try reducing concurrency: -concurrency 1\n")
+ ef.ui.printf(" • Check available system resources\n")
}
}
// provideIOSuggestions provides suggestions for I/O errors.
-func (ef *ErrorFormatter) provideIOSuggestions(err *gibidiutils.StructuredError) {
- ef.ui.PrintWarning("Suggestions:")
+func (ef *ErrorFormatter) provideIOSuggestions(err *shared.StructuredError) {
+ ef.ui.PrintWarning(shared.CLIMsgSuggestions)
switch err.Code {
- case gibidiutils.CodeIOFileCreate:
- ef.ui.printf(" %s Check if the destination directory exists\n", gibidiutils.IconBullet)
- ef.ui.printf(" %s Verify write permissions for the output file\n", gibidiutils.IconBullet)
- ef.ui.printf(" %s Ensure sufficient disk space\n", gibidiutils.IconBullet)
- case gibidiutils.CodeIOWrite:
- ef.ui.printf(" %s Check available disk space\n", gibidiutils.IconBullet)
- ef.ui.printf(" %s Verify write permissions\n", gibidiutils.IconBullet)
+ case shared.CodeIOFileCreate:
+ ef.ui.printf(" • Check if the destination directory exists\n")
+ ef.ui.printf(" • Verify write permissions for the output file\n")
+ ef.ui.printf(" • Ensure sufficient disk space\n")
+ case shared.CodeIOWrite:
+ ef.ui.printf(" • Check available disk space\n")
+ ef.ui.printf(" • Verify write permissions\n")
default:
- ef.ui.printf(suggestionCheckPermissions, gibidiutils.IconBullet)
- ef.ui.printf(" %s Verify available disk space\n", gibidiutils.IconBullet)
+ ef.ui.printf(shared.CLIMsgCheckFilePermissions)
+ ef.ui.printf(" • Verify available disk space\n")
}
}
-// Helper methods for specific suggestions
+// Helper methods for specific suggestions.
func (ef *ErrorFormatter) suggestFileAccess(filePath string) {
- ef.ui.printf(" %s Check if the path exists: %s\n", gibidiutils.IconBullet, filePath)
- ef.ui.printf(" %s Verify read permissions\n", gibidiutils.IconBullet)
+ ef.ui.printf(" • Check if the path exists: %s\n", filePath)
+ ef.ui.printf(" • Verify read permissions\n")
if filePath != "" {
if stat, err := os.Stat(filePath); err == nil {
- ef.ui.printf(" %s Path exists but may not be accessible\n", gibidiutils.IconBullet)
- ef.ui.printf(" %s Mode: %s\n", gibidiutils.IconBullet, stat.Mode())
+ ef.ui.printf(" • Path exists but may not be accessible\n")
+ ef.ui.printf(" • Mode: %s\n", stat.Mode())
}
}
}
func (ef *ErrorFormatter) suggestPathResolution(filePath string) {
- ef.ui.printf(" %s Use an absolute path instead of relative\n", gibidiutils.IconBullet)
+ ef.ui.printf(" • Use an absolute path instead of relative\n")
if filePath != "" {
if abs, err := filepath.Abs(filePath); err == nil {
- ef.ui.printf(" %s Try: %s\n", gibidiutils.IconBullet, abs)
+ ef.ui.printf(" • Try: %s\n", abs)
}
}
}
func (ef *ErrorFormatter) suggestFileNotFound(filePath string) {
- ef.ui.printf(" %s Check if the file/directory exists: %s\n", gibidiutils.IconBullet, filePath)
- if filePath != "" {
- dir := filepath.Dir(filePath)
- if entries, err := os.ReadDir(dir); err == nil {
- ef.ui.printf(" %s Similar files in %s:\n", gibidiutils.IconBullet, dir)
- count := 0
- for _, entry := range entries {
- if count >= 3 {
- break
- }
- if strings.Contains(entry.Name(), filepath.Base(filePath)) {
- ef.ui.printf(" %s %s\n", gibidiutils.IconBullet, entry.Name())
- count++
- }
- }
+ ef.ui.printf(" • Check if the file/directory exists: %s\n", filePath)
+ if filePath == "" {
+ return
+ }
+
+ dir := filepath.Dir(filePath)
+ entries, err := os.ReadDir(dir)
+ if err != nil {
+ return
+ }
+
+ ef.ui.printf(" • Similar files in %s:\n", dir)
+ count := 0
+ for _, entry := range entries {
+ if count >= 3 {
+ break
+ }
+ if strings.Contains(entry.Name(), filepath.Base(filePath)) {
+ ef.ui.printf(" - %s\n", entry.Name())
+ count++
}
}
}
func (ef *ErrorFormatter) suggestFileSystemGeneral(filePath string) {
- ef.ui.printf(suggestionCheckPermissions, gibidiutils.IconBullet)
- ef.ui.printf(" %s Verify the path is correct\n", gibidiutils.IconBullet)
+ ef.ui.printf(shared.CLIMsgCheckFilePermissions)
+ ef.ui.printf(" • Verify the path is correct\n")
if filePath != "" {
- ef.ui.printf(" %s Path: %s\n", gibidiutils.IconBullet, filePath)
+ ef.ui.printf(" • Path: %s\n", filePath)
}
}
// provideDefaultSuggestions provides general suggestions.
func (ef *ErrorFormatter) provideDefaultSuggestions() {
- ef.ui.printf(" %s Check your command line arguments\n", gibidiutils.IconBullet)
- ef.ui.printf(" %s Run with --help for usage information\n", gibidiutils.IconBullet)
- ef.ui.printf(" %s Try with -concurrency 1 to reduce resource usage\n", gibidiutils.IconBullet)
+ ef.ui.printf(shared.CLIMsgCheckCommandLineArgs)
+ ef.ui.printf(shared.CLIMsgRunWithHelp)
+ ef.ui.printf(" • Try with -concurrency 1 to reduce resource usage\n")
}
// provideGenericSuggestions provides suggestions for generic errors.
func (ef *ErrorFormatter) provideGenericSuggestions(err error) {
errorMsg := err.Error()
- ef.ui.PrintWarning("Suggestions:")
+ ef.ui.PrintWarning(shared.CLIMsgSuggestions)
// Pattern matching for common errors
switch {
case strings.Contains(errorMsg, "permission denied"):
- ef.ui.printf(suggestionCheckPermissions, gibidiutils.IconBullet)
- ef.ui.printf(" %s Try running with appropriate privileges\n", gibidiutils.IconBullet)
+ ef.ui.printf(shared.CLIMsgCheckFilePermissions)
+ ef.ui.printf(" • Try running with appropriate privileges\n")
case strings.Contains(errorMsg, "no such file or directory"):
- ef.ui.printf(" %s Verify the file/directory path is correct\n", gibidiutils.IconBullet)
- ef.ui.printf(" %s Check if the file exists\n", gibidiutils.IconBullet)
+ ef.ui.printf(" • Verify the file/directory path is correct\n")
+ ef.ui.printf(" • Check if the file exists\n")
case strings.Contains(errorMsg, "flag") && strings.Contains(errorMsg, "redefined"):
- ef.ui.printf(" %s This is likely a test environment issue\n", gibidiutils.IconBullet)
- ef.ui.printf(" %s Try running the command directly instead of in tests\n", gibidiutils.IconBullet)
+ ef.ui.printf(" • This is likely a test environment issue\n")
+ ef.ui.printf(" • Try running the command directly instead of in tests\n")
default:
ef.provideDefaultSuggestions()
}
@@ -248,8 +250,8 @@ func (e MissingSourceError) Error() string {
return "source directory is required"
}
-// NewMissingSourceError creates a new CLI missing source error with suggestions.
-func NewMissingSourceError() error {
+// NewCLIMissingSourceError creates a new CLI missing source error with suggestions.
+func NewCLIMissingSourceError() error {
return &MissingSourceError{}
}
@@ -266,11 +268,11 @@ func IsUserError(err error) bool {
}
// Check for structured errors that are user-facing
- var structErr *gibidiutils.StructuredError
+ structErr := &shared.StructuredError{}
if errors.As(err, &structErr) {
- return structErr.Type == gibidiutils.ErrorTypeValidation ||
- structErr.Code == gibidiutils.CodeValidationFormat ||
- structErr.Code == gibidiutils.CodeValidationSize
+ return structErr.Type == shared.ErrorTypeValidation ||
+ structErr.Code == shared.CodeValidationFormat ||
+ structErr.Code == shared.CodeValidationSize
}
// Check error message patterns
diff --git a/cli/errors_test.go b/cli/errors_test.go
index 7a2b9e9..eaefe80 100644
--- a/cli/errors_test.go
+++ b/cli/errors_test.go
@@ -3,399 +3,665 @@ package cli
import (
"bytes"
"errors"
+ "os"
+ "path/filepath"
"strings"
"testing"
- "github.com/fatih/color"
- "github.com/stretchr/testify/assert"
-
- "github.com/ivuorinen/gibidify/gibidiutils"
+ "github.com/ivuorinen/gibidify/shared"
)
func TestNewErrorFormatter(t *testing.T) {
- ui := &UIManager{
- output: &bytes.Buffer{},
+ ui := NewUIManager()
+ formatter := NewErrorFormatter(ui)
+
+ if formatter == nil {
+ t.Error("NewErrorFormatter() returned nil")
+
+ return
+ }
+ if formatter.ui != ui {
+ t.Error("NewErrorFormatter() did not set ui manager correctly")
}
-
- ef := NewErrorFormatter(ui)
-
- assert.NotNil(t, ef)
- assert.Equal(t, ui, ef.ui)
}
-func TestFormatError(t *testing.T) {
+func TestErrorFormatterFormatError(t *testing.T) {
tests := []struct {
name string
err error
- expectedOutput []string
- notExpected []string
+ expectedOutput []string // Substrings that should be present in output
}{
{
name: "nil error",
err: nil,
- expectedOutput: []string{},
+ expectedOutput: []string{}, // Should produce no output
},
{
- name: "structured error",
- err: gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeFileSystem,
- gibidiutils.CodeFSNotFound,
- testErrFileNotFound,
- "/test/file.txt",
- map[string]interface{}{"size": 1024},
- ),
- expectedOutput: []string{
- gibidiutils.IconError + testErrorSuffix,
- "FileSystem",
- testErrFileNotFound,
- "/test/file.txt",
- "NOT_FOUND",
+ name: "structured error with context",
+ err: &shared.StructuredError{
+ Type: shared.ErrorTypeFileSystem,
+ Code: shared.CodeFSAccess,
+ Message: shared.TestErrCannotAccessFile,
+ FilePath: shared.TestPathBase,
+ Context: map[string]any{
+ "permission": "0000",
+ "owner": "root",
+ },
},
- },
- {
- name: "generic error",
- err: errors.New("something went wrong"),
- expectedOutput: []string{gibidiutils.IconError + testErrorSuffix, "something went wrong"},
- },
- {
- name: "wrapped structured error",
- err: gibidiutils.WrapError(
- errors.New("inner error"),
- gibidiutils.ErrorTypeValidation,
- gibidiutils.CodeValidationRequired,
- "validation failed",
- ),
expectedOutput: []string{
- gibidiutils.IconError + testErrorSuffix,
- "validation failed",
- },
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- buf := &bytes.Buffer{}
- ui := &UIManager{
- enableColors: false,
- output: buf,
- }
- prev := color.NoColor
- color.NoColor = true
- t.Cleanup(func() { color.NoColor = prev })
-
- ef := NewErrorFormatter(ui)
- ef.FormatError(tt.err)
-
- output := buf.String()
- for _, expected := range tt.expectedOutput {
- assert.Contains(t, output, expected)
- }
- for _, notExpected := range tt.notExpected {
- assert.NotContains(t, output, notExpected)
- }
- })
- }
-}
-
-func TestFormatStructuredError(t *testing.T) {
- tests := []struct {
- name string
- err *gibidiutils.StructuredError
- expectedOutput []string
- }{
- {
- name: "filesystem error",
- err: gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeFileSystem,
- gibidiutils.CodeFSPermission,
- testErrPermissionDenied,
- "/etc/shadow",
- nil,
- ),
- expectedOutput: []string{
- "FileSystem",
- testErrPermissionDenied,
- "/etc/shadow",
- "PERMISSION_DENIED",
- testSuggestionsHeader,
+ "✗ Error: " + shared.TestErrCannotAccessFile,
+ "Type: FileSystem, Code: ACCESS_DENIED",
+ "File: " + shared.TestPathBase,
+ "Context:",
+ "permission: 0000",
+ "owner: root",
+ shared.TestSuggestionsWarning,
+ "Check if the path exists",
},
},
{
name: "validation error",
- err: gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeValidation,
- gibidiutils.CodeValidationFormat,
- testErrInvalidFormat,
- "",
- map[string]interface{}{"format": "xml"},
- ),
+ err: &shared.StructuredError{
+ Type: shared.ErrorTypeValidation,
+ Code: shared.CodeValidationFormat,
+ Message: "invalid output format",
+ },
expectedOutput: []string{
- "Validation",
- testErrInvalidFormat,
- "FORMAT",
- testSuggestionsHeader,
+ "✗ Error: invalid output format",
+ "Type: Validation, Code: FORMAT",
+ shared.TestSuggestionsWarning,
+ "Use a supported format: markdown, json, yaml",
},
},
{
name: "processing error",
- err: gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeProcessing,
- gibidiutils.CodeProcessingFileRead,
- "failed to read file",
- "large.bin",
- nil,
- ),
+ err: &shared.StructuredError{
+ Type: shared.ErrorTypeProcessing,
+ Code: shared.CodeProcessingCollection,
+ Message: "failed to collect files",
+ },
expectedOutput: []string{
- "Processing",
- "failed to read file",
- "large.bin",
- "FILE_READ",
- testSuggestionsHeader,
+ "✗ Error: failed to collect files",
+ "Type: Processing, Code: COLLECTION",
+ shared.TestSuggestionsWarning,
+ "Check if the source directory exists",
},
},
{
- name: "IO error",
- err: gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeIO,
- gibidiutils.CodeIOFileWrite,
- "disk full",
- "/output/result.txt",
- nil,
- ),
+ name: "I/O error",
+ err: &shared.StructuredError{
+ Type: shared.ErrorTypeIO,
+ Code: shared.CodeIOFileCreate,
+ Message: "cannot create output file",
+ },
expectedOutput: []string{
- "IO",
- "disk full",
- "/output/result.txt",
- "FILE_WRITE",
- testSuggestionsHeader,
+ "✗ Error: cannot create output file",
+ "Type: IO, Code: FILE_CREATE",
+ shared.TestSuggestionsWarning,
+ "Check if the destination directory exists",
+ },
+ },
+ {
+ name: "generic error with permission denied",
+ err: errors.New("permission denied: access to /secret/file"),
+ expectedOutput: []string{
+ "✗ Error: permission denied: access to /secret/file",
+ shared.TestSuggestionsWarning,
+ shared.TestSuggestCheckPermissions,
+ "Try running with appropriate privileges",
+ },
+ },
+ {
+ name: "generic error with file not found",
+ err: errors.New("no such file or directory"),
+ expectedOutput: []string{
+ "✗ Error: no such file or directory",
+ shared.TestSuggestionsWarning,
+ "Verify the file/directory path is correct",
+ "Check if the file exists",
+ },
+ },
+ {
+ name: "generic error with flag redefined",
+ err: errors.New("flag provided but not defined: -invalid"),
+ expectedOutput: []string{
+ "✗ Error: flag provided but not defined: -invalid",
+ shared.TestSuggestionsWarning,
+ shared.TestSuggestCheckArguments,
+ "Run with --help for usage information",
+ },
+ },
+ {
+ name: "unknown generic error",
+ err: errors.New("some unknown error"),
+ expectedOutput: []string{
+ "✗ Error: some unknown error",
+ shared.TestSuggestionsWarning,
+ shared.TestSuggestCheckArguments,
+ "Run with --help for usage information",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- buf := &bytes.Buffer{}
- ui := &UIManager{
- enableColors: false,
- output: buf,
+ // Capture output
+ ui, output := createTestUI()
+ formatter := NewErrorFormatter(ui)
+
+ formatter.FormatError(tt.err)
+
+ outputStr := output.String()
+
+ // For nil error, output should be empty
+ if tt.err == nil {
+ if outputStr != "" {
+ t.Errorf("Expected no output for nil error, got: %s", outputStr)
+ }
+
+ return
}
- prev := color.NoColor
- color.NoColor = true
- t.Cleanup(func() { color.NoColor = prev })
- ef := &ErrorFormatter{ui: ui}
- ef.formatStructuredError(tt.err)
-
- output := buf.String()
+ // Check that all expected substrings are present
for _, expected := range tt.expectedOutput {
- assert.Contains(t, output, expected)
+ if !strings.Contains(outputStr, expected) {
+ t.Errorf(shared.TestMsgOutputMissingSubstring, expected, outputStr)
+ }
}
})
}
}
-func TestFormatGenericError(t *testing.T) {
- buf := &bytes.Buffer{}
- ui := &UIManager{
- enableColors: false,
- output: buf,
+func TestErrorFormatterSuggestFileAccess(t *testing.T) {
+ ui, output := createTestUI()
+ formatter := NewErrorFormatter(ui)
+
+ // Create a temporary file to test with existing file
+ tempDir := t.TempDir()
+ tempFile, err := os.Create(filepath.Join(tempDir, "testfile"))
+ if err != nil {
+ t.Fatalf("Failed to create temp file: %v", err)
+ }
+ if err := tempFile.Close(); err != nil {
+ t.Errorf("Failed to close temp file: %v", err)
}
- prev := color.NoColor
- color.NoColor = true
- t.Cleanup(func() { color.NoColor = prev })
- ef := &ErrorFormatter{ui: ui}
- ef.formatGenericError(errors.New("generic error message"))
-
- output := buf.String()
- assert.Contains(t, output, gibidiutils.IconError+testErrorSuffix)
- assert.Contains(t, output, "generic error message")
-}
-
-func TestProvideSuggestions(t *testing.T) {
tests := []struct {
name string
- err *gibidiutils.StructuredError
- expectedSugges []string
+ filePath string
+ expectedOutput []string
}{
{
- name: "filesystem permission error",
- err: gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeFileSystem,
- gibidiutils.CodeFSPermission,
- testErrPermissionDenied,
- "/root/file",
- nil,
- ),
- expectedSugges: []string{
- testSuggestCheckPerms,
- testSuggestVerifyPath,
+ name: shared.TestErrEmptyFilePath,
+ filePath: "",
+ expectedOutput: []string{
+ shared.TestSuggestCheckExists,
+ "Verify read permissions",
},
},
{
- name: "filesystem not found error",
- err: gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeFileSystem,
- gibidiutils.CodeFSNotFound,
- testErrFileNotFound,
- "/missing/file",
- nil,
- ),
- expectedSugges: []string{
- "Check if the file/directory exists: /missing/file",
+ name: "existing file",
+ filePath: tempFile.Name(),
+ expectedOutput: []string{
+ shared.TestSuggestCheckExists,
+ "Path exists but may not be accessible",
+ "Mode:",
},
},
{
- name: "validation format error",
- err: gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeValidation,
- gibidiutils.CodeValidationFormat,
- "unsupported format",
- "",
- nil,
- ),
- expectedSugges: []string{
- testSuggestFormat,
- testSuggestFormatEx,
+ name: "nonexistent file",
+ filePath: "/nonexistent/file",
+ expectedOutput: []string{
+ shared.TestSuggestCheckExists,
+ "Verify read permissions",
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ output.Reset()
+ formatter.suggestFileAccess(tt.filePath)
+
+ outputStr := output.String()
+ for _, expected := range tt.expectedOutput {
+ if !strings.Contains(outputStr, expected) {
+ t.Errorf(shared.TestMsgOutputMissingSubstring, expected, outputStr)
+ }
+ }
+ })
+ }
+}
+
+func TestErrorFormatterSuggestFileNotFound(t *testing.T) {
+ // Create a test directory with some files
+ tempDir := t.TempDir()
+ testFiles := []string{"similar-file.txt", "another-similar.go", "different.md"}
+ for _, filename := range testFiles {
+ file, err := os.Create(filepath.Join(tempDir, filename))
+ if err != nil {
+ t.Fatalf("Failed to create test file %s: %v", filename, err)
+ }
+ if err := file.Close(); err != nil {
+ t.Errorf("Failed to close test file %s: %v", filename, err)
+ }
+ }
+
+ ui, output := createTestUI()
+ formatter := NewErrorFormatter(ui)
+
+ tests := []struct {
+ name string
+ filePath string
+ expectedOutput []string
+ }{
+ {
+ name: shared.TestErrEmptyFilePath,
+ filePath: "",
+ expectedOutput: []string{
+ shared.TestSuggestCheckFileExists,
},
},
{
- name: "validation path error",
- err: gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeValidation,
- gibidiutils.CodeValidationPath,
- "invalid path",
- "../../etc",
- nil,
- ),
- expectedSugges: []string{
- testSuggestCheckArgs,
- testSuggestHelp,
+ name: "file with similar matches",
+ filePath: tempDir + "/similar",
+ expectedOutput: []string{
+ shared.TestSuggestCheckFileExists,
+ "Similar files in",
+ "similar-file.txt",
},
},
{
- name: "processing file read error",
- err: gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeProcessing,
- gibidiutils.CodeProcessingFileRead,
- "read error",
- "corrupted.dat",
- nil,
- ),
- expectedSugges: []string{
- "Check file permissions",
- "Verify the file is not corrupted",
+ name: "nonexistent directory",
+ filePath: "/nonexistent/dir/file.txt",
+ expectedOutput: []string{
+ shared.TestSuggestCheckFileExists,
},
},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ output.Reset()
+ formatter.suggestFileNotFound(tt.filePath)
+
+ outputStr := output.String()
+ for _, expected := range tt.expectedOutput {
+ if !strings.Contains(outputStr, expected) {
+ t.Errorf(shared.TestMsgOutputMissingSubstring, expected, outputStr)
+ }
+ }
+ })
+ }
+}
+
+func TestErrorFormatterProvideSuggestions(t *testing.T) {
+ ui, output := createTestUI()
+ formatter := NewErrorFormatter(ui)
+
+ tests := []struct {
+ name string
+ err *shared.StructuredError
+ expectSuggestions []string
+ }{
{
- name: "IO file write error",
- err: gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeIO,
- gibidiutils.CodeIOFileWrite,
- "write failed",
- "/output.txt",
- nil,
- ),
- expectedSugges: []string{
- testSuggestCheckPerms,
- testSuggestDiskSpace,
+ name: "filesystem error",
+ err: &shared.StructuredError{
+ Type: shared.ErrorTypeFileSystem,
+ Code: shared.CodeFSAccess,
},
+ expectSuggestions: []string{shared.TestSuggestionsPlain, "Check if the path exists"},
+ },
+ {
+ name: "validation error",
+ err: &shared.StructuredError{
+ Type: shared.ErrorTypeValidation,
+ Code: shared.CodeValidationFormat,
+ },
+ expectSuggestions: []string{shared.TestSuggestionsPlain, "Use a supported format"},
+ },
+ {
+ name: "processing error",
+ err: &shared.StructuredError{
+ Type: shared.ErrorTypeProcessing,
+ Code: shared.CodeProcessingCollection,
+ },
+ expectSuggestions: []string{shared.TestSuggestionsPlain, "Check if the source directory exists"},
+ },
+ {
+ name: "I/O error",
+ err: &shared.StructuredError{
+ Type: shared.ErrorTypeIO,
+ Code: shared.CodeIOWrite,
+ },
+ expectSuggestions: []string{shared.TestSuggestionsPlain, "Check available disk space"},
},
{
name: "unknown error type",
- err: gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeUnknown,
- "UNKNOWN",
- "unknown error",
- "",
- nil,
- ),
- expectedSugges: []string{
- testSuggestCheckArgs,
+ err: &shared.StructuredError{
+ Type: shared.ErrorTypeUnknown,
+ },
+ expectSuggestions: []string{"Check your command line arguments"},
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ output.Reset()
+ formatter.provideSuggestions(tt.err)
+
+ outputStr := output.String()
+ for _, expected := range tt.expectSuggestions {
+ if !strings.Contains(outputStr, expected) {
+ t.Errorf(shared.TestMsgOutputMissingSubstring, expected, outputStr)
+ }
+ }
+ })
+ }
+}
+
+func TestMissingSourceError(t *testing.T) {
+ err := NewCLIMissingSourceError()
+
+ if err == nil {
+ t.Error("NewCLIMissingSourceError() returned nil")
+
+ return
+ }
+
+ expectedMsg := "source directory is required"
+ if err.Error() != expectedMsg {
+ t.Errorf("MissingSourceError.Error() = %v, want %v", err.Error(), expectedMsg)
+ }
+
+ // Test type assertion
+ var cliErr *MissingSourceError
+ if !errors.As(err, &cliErr) {
+ t.Error("NewCLIMissingSourceError() did not return *MissingSourceError type")
+ }
+}
+
+func TestIsUserError(t *testing.T) {
+ tests := []struct {
+ name string
+ err error
+ expected bool
+ }{
+ {
+ name: "nil error",
+ err: nil,
+ expected: false,
+ },
+ {
+ name: "CLI missing source error",
+ err: NewCLIMissingSourceError(),
+ expected: true,
+ },
+ {
+ name: "validation structured error",
+ err: &shared.StructuredError{
+ Type: shared.ErrorTypeValidation,
+ },
+ expected: true,
+ },
+ {
+ name: "validation format structured error",
+ err: &shared.StructuredError{
+ Code: shared.CodeValidationFormat,
+ },
+ expected: true,
+ },
+ {
+ name: "validation size structured error",
+ err: &shared.StructuredError{
+ Code: shared.CodeValidationSize,
+ },
+ expected: true,
+ },
+ {
+ name: "non-validation structured error",
+ err: &shared.StructuredError{
+ Type: shared.ErrorTypeFileSystem,
+ },
+ expected: false,
+ },
+ {
+ name: "generic error with flag keyword",
+ err: errors.New("flag provided but not defined"),
+ expected: true,
+ },
+ {
+ name: "generic error with usage keyword",
+ err: errors.New("usage: command [options]"),
+ expected: true,
+ },
+ {
+ name: "generic error with invalid argument",
+ err: errors.New("invalid argument provided"),
+ expected: true,
+ },
+ {
+ name: "generic error with file not found",
+ err: errors.New("file not found"),
+ expected: true,
+ },
+ {
+ name: "generic error with permission denied",
+ err: errors.New("permission denied"),
+ expected: true,
+ },
+ {
+ name: "system error not user-facing",
+ err: errors.New("internal system error"),
+ expected: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := IsUserError(tt.err)
+ if result != tt.expected {
+ t.Errorf("IsUserError(%v) = %v, want %v", tt.err, result, tt.expected)
+ }
+ })
+ }
+}
+
+// Helper functions for testing
+
+// createTestUI creates a UIManager with captured output for testing.
+func createTestUI() (*UIManager, *bytes.Buffer) {
+ output := &bytes.Buffer{}
+ ui := &UIManager{
+ enableColors: false, // Disable colors for consistent testing
+ enableProgress: false, // Disable progress for testing
+ output: output,
+ }
+
+ return ui, output
+}
+
+// TestErrorFormatterIntegration tests the complete error formatting workflow.
+func TestErrorFormatterIntegration(t *testing.T) {
+ ui, output := createTestUI()
+ formatter := NewErrorFormatter(ui)
+
+ // Test a complete workflow with a complex structured error
+ structuredErr := &shared.StructuredError{
+ Type: shared.ErrorTypeFileSystem,
+ Code: shared.CodeFSNotFound,
+ Message: "source directory not found",
+ FilePath: "/missing/directory",
+ Context: map[string]any{
+ "attempted_path": "/missing/directory",
+ "current_dir": "/working/dir",
+ },
+ }
+
+ formatter.FormatError(structuredErr)
+ outputStr := output.String()
+
+ // Verify all components are present
+ expectedComponents := []string{
+ "✗ Error: source directory not found",
+ "Type: FileSystem, Code: NOT_FOUND",
+ "File: /missing/directory",
+ "Context:",
+ "attempted_path: /missing/directory",
+ "current_dir: /working/dir",
+ shared.TestSuggestionsWarning,
+ "Check if the file/directory exists",
+ }
+
+ for _, expected := range expectedComponents {
+ if !strings.Contains(outputStr, expected) {
+ t.Errorf("Integration test output missing expected component: %q\nFull output:\n%s", expected, outputStr)
+ }
+ }
+}
+
+// TestErrorFormatter_SuggestPathResolution tests the suggestPathResolution function.
+func TestErrorFormatterSuggestPathResolution(t *testing.T) {
+ tests := []struct {
+ name string
+ filePath string
+ expectedOutput []string
+ }{
+ {
+ name: "with file path",
+ filePath: "relative/path/file.txt",
+ expectedOutput: []string{
+ shared.TestSuggestUseAbsolutePath,
+ "Try:",
+ },
+ },
+ {
+ name: shared.TestErrEmptyFilePath,
+ filePath: "",
+ expectedOutput: []string{
+ shared.TestSuggestUseAbsolutePath,
+ },
+ },
+ {
+ name: "current directory reference",
+ filePath: "./file.txt",
+ expectedOutput: []string{
+ shared.TestSuggestUseAbsolutePath,
+ "Try:",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- buf := &bytes.Buffer{}
- ui := &UIManager{
- enableColors: false,
- output: buf,
- }
- prev := color.NoColor
- color.NoColor = true
- t.Cleanup(func() { color.NoColor = prev })
+ ui, output := createTestUI()
+ formatter := NewErrorFormatter(ui)
- ef := &ErrorFormatter{ui: ui}
- ef.provideSuggestions(tt.err)
+ // Call the method
+ formatter.suggestPathResolution(tt.filePath)
- output := buf.String()
- for _, suggestion := range tt.expectedSugges {
- assert.Contains(t, output, suggestion)
+ // Check output
+ outputStr := output.String()
+ for _, expected := range tt.expectedOutput {
+ if !strings.Contains(outputStr, expected) {
+ t.Errorf("suggestPathResolution output missing: %q\nFull output: %q", expected, outputStr)
+ }
}
})
}
}
-func TestProvideFileSystemSuggestions(t *testing.T) {
+// TestErrorFormatter_SuggestFileSystemGeneral tests the suggestFileSystemGeneral function.
+func TestErrorFormatterSuggestFileSystemGeneral(t *testing.T) {
tests := []struct {
name string
- err *gibidiutils.StructuredError
- expectedSugges []string
+ filePath string
+ expectedOutput []string
}{
{
- name: testErrPermissionDenied,
- err: gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeFileSystem,
- gibidiutils.CodeFSPermission,
- testErrPermissionDenied,
- "/root/secret",
- nil,
- ),
- expectedSugges: []string{
- testSuggestCheckPerms,
- testSuggestVerifyPath,
+ name: "with file path",
+ filePath: "/path/to/file.txt",
+ expectedOutput: []string{
+ shared.TestSuggestCheckPermissions,
+ shared.TestSuggestVerifyPath,
+ "Path: /path/to/file.txt",
},
},
{
- name: "path resolution error",
- err: gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeFileSystem,
- gibidiutils.CodeFSPathResolution,
- "path error",
- "../../../etc",
- nil,
- ),
- expectedSugges: []string{
- "Use an absolute path instead of relative",
+ name: shared.TestErrEmptyFilePath,
+ filePath: "",
+ expectedOutput: []string{
+ shared.TestSuggestCheckPermissions,
+ shared.TestSuggestVerifyPath,
},
},
{
- name: testErrFileNotFound,
- err: gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeFileSystem,
- gibidiutils.CodeFSNotFound,
- "not found",
- "/missing.txt",
- nil,
- ),
- expectedSugges: []string{
- "Check if the file/directory exists: /missing.txt",
+ name: "relative path",
+ filePath: "../parent/file.txt",
+ expectedOutput: []string{
+ shared.TestSuggestCheckPermissions,
+ shared.TestSuggestVerifyPath,
+ "Path: ../parent/file.txt",
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ ui, output := createTestUI()
+ formatter := NewErrorFormatter(ui)
+
+ // Call the method
+ formatter.suggestFileSystemGeneral(tt.filePath)
+
+ // Check output
+ outputStr := output.String()
+ for _, expected := range tt.expectedOutput {
+ if !strings.Contains(outputStr, expected) {
+ t.Errorf("suggestFileSystemGeneral output missing: %q\nFull output: %q", expected, outputStr)
+ }
+ }
+
+ // When no file path is provided, should not contain "Path:" line
+ if tt.filePath == "" && strings.Contains(outputStr, "Path:") {
+ t.Error("suggestFileSystemGeneral should not include Path line when filePath is empty")
+ }
+ })
+ }
+}
+
+// TestErrorFormatter_SuggestionFunctions_Integration tests the integration of suggestion functions.
+func TestErrorFormatterSuggestionFunctionsIntegration(t *testing.T) {
+ // Test that suggestion functions work as part of the full error formatting workflow
+ tests := []struct {
+ name string
+ err *shared.StructuredError
+ expectedSuggestions []string
+ }{
+ {
+ name: "filesystem path resolution error",
+ err: &shared.StructuredError{
+ Type: shared.ErrorTypeFileSystem,
+ Code: shared.CodeFSPathResolution,
+ Message: "path resolution failed",
+ FilePath: "relative/path",
+ },
+ expectedSuggestions: []string{
+ shared.TestSuggestUseAbsolutePath,
+ "Try:",
},
},
{
- name: "default filesystem error",
- err: gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeFileSystem,
- "OTHER_FS_ERROR",
- testErrOther,
- "/some/path",
- nil,
- ),
- expectedSugges: []string{
- testSuggestCheckPerms,
- testSuggestVerifyPath,
+ name: "filesystem unknown error",
+ err: &shared.StructuredError{
+ Type: shared.ErrorTypeFileSystem,
+ Code: "UNKNOWN_FS_ERROR", // This will trigger default case
+ Message: "unknown filesystem error",
+ FilePath: "/some/path",
+ },
+ expectedSuggestions: []string{
+ shared.TestSuggestCheckPermissions,
+ shared.TestSuggestVerifyPath,
"Path: /some/path",
},
},
@@ -403,561 +669,76 @@ func TestProvideFileSystemSuggestions(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- buf := &bytes.Buffer{}
- ui := &UIManager{
- enableColors: false,
- output: buf,
- }
+ ui, output := createTestUI()
+ formatter := NewErrorFormatter(ui)
- ef := &ErrorFormatter{ui: ui}
- ef.provideFileSystemSuggestions(tt.err)
+ // Format the error (which should include suggestions)
+ formatter.FormatError(tt.err)
- output := buf.String()
- for _, suggestion := range tt.expectedSugges {
- assert.Contains(t, output, suggestion)
+ // Check that expected suggestions are present
+ outputStr := output.String()
+ for _, expected := range tt.expectedSuggestions {
+ if !strings.Contains(outputStr, expected) {
+ t.Errorf("Integrated suggestion missing: %q\nFull output: %q", expected, outputStr)
+ }
}
})
}
}
-func TestProvideValidationSuggestions(t *testing.T) {
- tests := []struct {
- name string
- err *gibidiutils.StructuredError
- expectedSugges []string
- }{
- {
- name: "format validation",
- err: gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeValidation,
- gibidiutils.CodeValidationFormat,
- testErrInvalidFormat,
- "",
- nil,
- ),
- expectedSugges: []string{
- testSuggestFormat,
- testSuggestFormatEx,
- },
- },
- {
- name: "path validation",
- err: gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeValidation,
- gibidiutils.CodeValidationPath,
- "invalid path",
- "",
- nil,
- ),
- expectedSugges: []string{
- testSuggestCheckArgs,
- testSuggestHelp,
- },
- },
- {
- name: "size validation",
- err: gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeValidation,
- gibidiutils.CodeValidationSize,
- "size error",
- "",
- nil,
- ),
- expectedSugges: []string{
- "Increase file size limit in config.yaml",
- "Use smaller files or exclude large files",
- },
- },
- {
- name: "required validation",
- err: gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeValidation,
- gibidiutils.CodeValidationRequired,
- "required",
- "",
- nil,
- ),
- expectedSugges: []string{
- testSuggestCheckArgs,
- testSuggestHelp,
- },
- },
- {
- name: "default validation",
- err: gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeValidation,
- "OTHER_VALIDATION",
- "other",
- "",
- nil,
- ),
- expectedSugges: []string{
- testSuggestCheckArgs,
- testSuggestHelp,
- },
- },
+// Benchmarks for error formatting performance
+
+// BenchmarkErrorFormatterFormatError benchmarks the FormatError method.
+func BenchmarkErrorFormatterFormatError(b *testing.B) {
+ ui, _ := createTestUI()
+ formatter := NewErrorFormatter(ui)
+ err := &shared.StructuredError{
+ Type: shared.ErrorTypeFileSystem,
+ Code: shared.CodeFSAccess,
+ Message: shared.TestErrCannotAccessFile,
+ FilePath: shared.TestPathBase,
}
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- buf := &bytes.Buffer{}
- ui := &UIManager{
- enableColors: false,
- output: buf,
- }
-
- ef := &ErrorFormatter{ui: ui}
- ef.provideValidationSuggestions(tt.err)
-
- output := buf.String()
- for _, suggestion := range tt.expectedSugges {
- assert.Contains(t, output, suggestion)
- }
- })
+ b.ResetTimer()
+ for b.Loop() {
+ formatter.FormatError(err)
}
}
-func TestProvideProcessingSuggestions(t *testing.T) {
- tests := []struct {
- name string
- err *gibidiutils.StructuredError
- expectedSugges []string
- }{
- {
- name: "file read error",
- err: gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeProcessing,
- gibidiutils.CodeProcessingFileRead,
- "read error",
- "",
- nil,
- ),
- expectedSugges: []string{
- "Check file permissions",
- "Verify the file is not corrupted",
- },
- },
- {
- name: "collection error",
- err: gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeProcessing,
- gibidiutils.CodeProcessingCollection,
- "collection error",
- "",
- nil,
- ),
- expectedSugges: []string{
- "Check if the source directory exists and is readable",
- "Verify directory permissions",
- },
- },
- {
- name: testErrEncoding,
- err: gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeProcessing,
- gibidiutils.CodeProcessingEncode,
- testErrEncoding,
- "",
- nil,
- ),
- expectedSugges: []string{
- "Try reducing concurrency: -concurrency 1",
- "Check available system resources",
- },
- },
- {
- name: "default processing",
- err: gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeProcessing,
- "OTHER",
- testErrOther,
- "",
- nil,
- ),
- expectedSugges: []string{
- "Try reducing concurrency: -concurrency 1",
- "Check available system resources",
- },
+// BenchmarkErrorFormatterFormatErrorWithContext benchmarks error formatting with context.
+func BenchmarkErrorFormatterFormatErrorWithContext(b *testing.B) {
+ ui, _ := createTestUI()
+ formatter := NewErrorFormatter(ui)
+ err := &shared.StructuredError{
+ Type: shared.ErrorTypeValidation,
+ Code: shared.CodeValidationFormat,
+ Message: "validation failed",
+ FilePath: shared.TestPathBase,
+ Context: map[string]any{
+ "field": "format",
+ "value": "invalid",
},
}
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- buf := &bytes.Buffer{}
- ui := &UIManager{
- enableColors: false,
- output: buf,
- }
-
- ef := &ErrorFormatter{ui: ui}
- ef.provideProcessingSuggestions(tt.err)
-
- output := buf.String()
- for _, suggestion := range tt.expectedSugges {
- assert.Contains(t, output, suggestion)
- }
- })
+ b.ResetTimer()
+ for b.Loop() {
+ formatter.FormatError(err)
}
}
-func TestProvideIOSuggestions(t *testing.T) {
- tests := []struct {
- name string
- err *gibidiutils.StructuredError
- expectedSugges []string
- }{
- {
- name: "file create error",
- err: gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeIO,
- gibidiutils.CodeIOFileCreate,
- "create error",
- "",
- nil,
- ),
- expectedSugges: []string{
- "Check if the destination directory exists",
- "Verify write permissions for the output file",
- "Ensure sufficient disk space",
- },
- },
- {
- name: "file write error",
- err: gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeIO,
- gibidiutils.CodeIOFileWrite,
- "write error",
- "",
- nil,
- ),
- expectedSugges: []string{
- testSuggestCheckPerms,
- testSuggestDiskSpace,
- },
- },
- {
- name: testErrEncoding,
- err: gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeIO,
- gibidiutils.CodeIOEncoding,
- testErrEncoding,
- "",
- nil,
- ),
- expectedSugges: []string{
- testSuggestCheckPerms,
- testSuggestDiskSpace,
- },
- },
- {
- name: "default IO error",
- err: gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeIO,
- "OTHER",
- testErrOther,
- "",
- nil,
- ),
- expectedSugges: []string{
- testSuggestCheckPerms,
- testSuggestDiskSpace,
- },
- },
+// BenchmarkErrorFormatterProvideSuggestions benchmarks suggestion generation.
+func BenchmarkErrorFormatterProvideSuggestions(b *testing.B) {
+ ui, _ := createTestUI()
+ formatter := NewErrorFormatter(ui)
+ err := &shared.StructuredError{
+ Type: shared.ErrorTypeFileSystem,
+ Code: shared.CodeFSAccess,
+ Message: shared.TestErrCannotAccessFile,
+ FilePath: shared.TestPathBase,
}
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- buf := &bytes.Buffer{}
- ui := &UIManager{
- enableColors: false,
- output: buf,
- }
-
- ef := &ErrorFormatter{ui: ui}
- ef.provideIOSuggestions(tt.err)
-
- output := buf.String()
- for _, suggestion := range tt.expectedSugges {
- assert.Contains(t, output, suggestion)
- }
- })
- }
-}
-
-func TestProvideGenericSuggestions(t *testing.T) {
- tests := []struct {
- name string
- err error
- expectedSugges []string
- }{
- {
- name: "permission error",
- err: errors.New("permission denied accessing file"),
- expectedSugges: []string{
- testSuggestCheckPerms,
- "Try running with appropriate privileges",
- },
- },
- {
- name: "not found error",
- err: errors.New("no such file or directory"),
- expectedSugges: []string{
- "Verify the file/directory path is correct",
- "Check if the file exists",
- },
- },
- {
- name: "memory error",
- err: errors.New("out of memory"),
- expectedSugges: []string{
- testSuggestCheckArgs,
- testSuggestHelp,
- testSuggestReduceConcur,
- },
- },
- {
- name: "timeout error",
- err: errors.New("operation timed out"),
- expectedSugges: []string{
- testSuggestCheckArgs,
- testSuggestHelp,
- testSuggestReduceConcur,
- },
- },
- {
- name: "connection error",
- err: errors.New("connection refused"),
- expectedSugges: []string{
- testSuggestCheckArgs,
- testSuggestHelp,
- testSuggestReduceConcur,
- },
- },
- {
- name: "default error",
- err: errors.New("unknown error occurred"),
- expectedSugges: []string{
- testSuggestCheckArgs,
- testSuggestHelp,
- testSuggestReduceConcur,
- },
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- buf := &bytes.Buffer{}
- ui := &UIManager{
- enableColors: false,
- output: buf,
- }
-
- ef := &ErrorFormatter{ui: ui}
- ef.provideGenericSuggestions(tt.err)
-
- output := buf.String()
- for _, suggestion := range tt.expectedSugges {
- assert.Contains(t, output, suggestion)
- }
- })
- }
-}
-
-func TestMissingSourceError(t *testing.T) {
- err := &MissingSourceError{}
-
- assert.Equal(t, "source directory is required", err.Error())
-}
-
-func TestNewMissingSourceErrorType(t *testing.T) {
- err := NewMissingSourceError()
-
- assert.NotNil(t, err)
- assert.Equal(t, "source directory is required", err.Error())
-
- var msErr *MissingSourceError
- ok := errors.As(err, &msErr)
- assert.True(t, ok)
- assert.NotNil(t, msErr)
-}
-
-// Test error formatting with colors enabled
-func TestFormatErrorWithColors(t *testing.T) {
- buf := &bytes.Buffer{}
- ui := &UIManager{
- enableColors: true,
- output: buf,
- }
- prev := color.NoColor
- color.NoColor = false
- t.Cleanup(func() { color.NoColor = prev })
-
- ef := NewErrorFormatter(ui)
- err := gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeValidation,
- gibidiutils.CodeValidationFormat,
- testErrInvalidFormat,
- "",
- nil,
- )
-
- ef.FormatError(err)
-
- output := buf.String()
- // When colors are enabled, some output may go directly to stdout
- // Check for suggestions that are captured in the buffer
- assert.Contains(t, output, testSuggestFormat)
- assert.Contains(t, output, testSuggestFormatEx)
-}
-
-// Test wrapped error handling
-func TestFormatWrappedError(t *testing.T) {
- buf := &bytes.Buffer{}
- ui := &UIManager{
- enableColors: false,
- output: buf,
- }
-
- ef := NewErrorFormatter(ui)
-
- innerErr := errors.New("inner error")
- wrappedErr := gibidiutils.WrapError(
- innerErr,
- gibidiutils.ErrorTypeProcessing,
- gibidiutils.CodeProcessingFileRead,
- "wrapper message",
- )
-
- ef.FormatError(wrappedErr)
-
- output := buf.String()
- assert.Contains(t, output, "wrapper message")
-}
-
-// Test all suggestion paths get called
-func TestSuggestionPathCoverage(t *testing.T) {
- buf := &bytes.Buffer{}
- ui := &UIManager{
- enableColors: false,
- output: buf,
- }
- ef := &ErrorFormatter{ui: ui}
-
- // Test all error types
- errorTypes := []gibidiutils.ErrorType{
- gibidiutils.ErrorTypeFileSystem,
- gibidiutils.ErrorTypeValidation,
- gibidiutils.ErrorTypeProcessing,
- gibidiutils.ErrorTypeIO,
- gibidiutils.ErrorTypeConfiguration,
- gibidiutils.ErrorTypeUnknown,
- }
-
- for _, errType := range errorTypes {
- t.Run(errType.String(), func(t *testing.T) {
- buf.Reset()
- err := gibidiutils.NewStructuredError(
- errType,
- "TEST_CODE",
- "test error",
- "/test/path",
- nil,
- )
- ef.provideSuggestions(err)
-
- output := buf.String()
- // Should have some suggestion output
- assert.NotEmpty(t, output)
- })
- }
-}
-
-// Test suggestion helper functions with various inputs
-func TestSuggestHelpers(t *testing.T) {
- tests := []struct {
- name string
- testFunc func(*ErrorFormatter)
- }{
- {
- name: "suggestFileAccess",
- testFunc: func(ef *ErrorFormatter) {
- ef.suggestFileAccess("/root/file")
- },
- },
- {
- name: "suggestPathResolution",
- testFunc: func(ef *ErrorFormatter) {
- ef.suggestPathResolution("../../../etc")
- },
- },
- {
- name: "suggestFileNotFound",
- testFunc: func(ef *ErrorFormatter) {
- ef.suggestFileNotFound("/missing")
- },
- },
- {
- name: "suggestFileSystemGeneral",
- testFunc: func(ef *ErrorFormatter) {
- ef.suggestFileSystemGeneral("/path")
- },
- },
- {
- name: "provideDefaultSuggestions",
- testFunc: func(ef *ErrorFormatter) {
- ef.provideDefaultSuggestions()
- },
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- buf := &bytes.Buffer{}
- ui := &UIManager{
- enableColors: false,
- output: buf,
- }
- ef := &ErrorFormatter{ui: ui}
-
- tt.testFunc(ef)
-
- output := buf.String()
- // Each should produce some output
- assert.NotEmpty(t, output)
- // Should contain bullet point
- assert.Contains(t, output, gibidiutils.IconBullet)
- })
- }
-}
-
-// Test edge cases in error message analysis
-func TestGenericSuggestionsEdgeCases(t *testing.T) {
- tests := []struct {
- name string
- err error
- }{
- {"empty message", errors.New("")},
- {"very long message", errors.New(strings.Repeat("error ", 100))},
- {"special characters", errors.New("error!@#$%^&*()")},
- {"newlines", errors.New("error\nwith\nnewlines")},
- {"unicode", errors.New("error with 中文 characters")},
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- buf := &bytes.Buffer{}
- ui := &UIManager{
- enableColors: false,
- output: buf,
- }
- ef := &ErrorFormatter{ui: ui}
-
- // Should not panic
- ef.provideGenericSuggestions(tt.err)
-
- output := buf.String()
- // Should have some output
- assert.NotEmpty(t, output)
- })
+ b.ResetTimer()
+ for b.Loop() {
+ formatter.provideSuggestions(err)
}
}
diff --git a/cli/flags.go b/cli/flags.go
index 6f366dc..916f80c 100644
--- a/cli/flags.go
+++ b/cli/flags.go
@@ -1,11 +1,14 @@
+// Package cli provides command-line interface functionality for gibidify.
package cli
import (
"flag"
+ "fmt"
+ "os"
"runtime"
"github.com/ivuorinen/gibidify/config"
- "github.com/ivuorinen/gibidify/gibidiutils"
+ "github.com/ivuorinen/gibidify/shared"
)
// Flags holds CLI flags values.
@@ -18,7 +21,9 @@ type Flags struct {
Format string
NoColors bool
NoProgress bool
+ NoUI bool
Verbose bool
+ LogLevel string
}
var (
@@ -26,6 +31,15 @@ var (
globalFlags *Flags
)
+// ResetFlags resets the global flag parsing state for testing.
+// This function should only be used in tests to ensure proper isolation.
+func ResetFlags() {
+ flagsParsed = false
+ globalFlags = nil
+ // Reset default FlagSet to avoid duplicate flag registration across tests
+ flag.CommandLine = flag.NewFlagSet(os.Args[0], flag.ContinueOnError)
+}
+
// ParseFlags parses and validates CLI flags.
func ParseFlags() (*Flags, error) {
if flagsParsed {
@@ -34,18 +48,20 @@ func ParseFlags() (*Flags, error) {
flags := &Flags{}
- flag.StringVar(&flags.SourceDir, "source", "", "Source directory to scan recursively")
+ flag.StringVar(&flags.SourceDir, shared.CLIArgSource, "", "Source directory to scan recursively")
flag.StringVar(&flags.Destination, "destination", "", "Output file to write aggregated code")
flag.StringVar(&flags.Prefix, "prefix", "", "Text to add at the beginning of the output file")
flag.StringVar(&flags.Suffix, "suffix", "", "Text to add at the end of the output file")
- flag.StringVar(&flags.Format, "format", "markdown", "Output format (json, markdown, yaml)")
- flag.IntVar(
- &flags.Concurrency, "concurrency", runtime.NumCPU(),
- "Number of concurrent workers (default: number of CPU cores)",
- )
+ flag.StringVar(&flags.Format, shared.CLIArgFormat, shared.FormatJSON, "Output format (json, markdown, yaml)")
+ flag.IntVar(&flags.Concurrency, shared.CLIArgConcurrency, runtime.NumCPU(),
+ "Number of concurrent workers (default: number of CPU cores)")
flag.BoolVar(&flags.NoColors, "no-colors", false, "Disable colored output")
flag.BoolVar(&flags.NoProgress, "no-progress", false, "Disable progress bars")
+ flag.BoolVar(&flags.NoUI, "no-ui", false, "Disable all UI output (implies no-colors and no-progress)")
flag.BoolVar(&flags.Verbose, "verbose", false, "Enable verbose output")
+ flag.StringVar(
+ &flags.LogLevel, "log-level", string(shared.LogLevelWarn), "Set log level (debug, info, warn, error)",
+ )
flag.Parse()
@@ -59,40 +75,54 @@ func ParseFlags() (*Flags, error) {
flagsParsed = true
globalFlags = flags
+
return flags, nil
}
// validate validates the CLI flags.
func (f *Flags) validate() error {
if f.SourceDir == "" {
- return NewMissingSourceError()
+ return NewCLIMissingSourceError()
}
// Validate source path for security
- if err := gibidiutils.ValidateSourcePath(f.SourceDir); err != nil {
- return err
+ if err := shared.ValidateSourcePath(f.SourceDir); err != nil {
+ return fmt.Errorf("validating source path: %w", err)
}
// Validate output format
if err := config.ValidateOutputFormat(f.Format); err != nil {
- return err
+ return fmt.Errorf("validating output format: %w", err)
}
// Validate concurrency
- return config.ValidateConcurrency(f.Concurrency)
+ if err := config.ValidateConcurrency(f.Concurrency); err != nil {
+ return fmt.Errorf("validating concurrency: %w", err)
+ }
+
+ // Validate log level
+ if !shared.ValidateLogLevel(f.LogLevel) {
+ return fmt.Errorf("invalid log level: %s (must be: debug, info, warn, error)", f.LogLevel)
+ }
+
+ return nil
}
// setDefaultDestination sets the default destination if not provided.
func (f *Flags) setDefaultDestination() error {
if f.Destination == "" {
- absRoot, err := gibidiutils.GetAbsolutePath(f.SourceDir)
+ absRoot, err := shared.AbsolutePath(f.SourceDir)
if err != nil {
- return err
+ return fmt.Errorf("getting absolute path: %w", err)
}
- baseName := gibidiutils.GetBaseName(absRoot)
+ baseName := shared.BaseName(absRoot)
f.Destination = baseName + "." + f.Format
}
// Validate destination path for security
- return gibidiutils.ValidateDestinationPath(f.Destination)
+ if err := shared.ValidateDestinationPath(f.Destination); err != nil {
+ return fmt.Errorf("validating destination path: %w", err)
+ }
+
+ return nil
}
diff --git a/cli/flags_test.go b/cli/flags_test.go
index 3c9fd70..53d022b 100644
--- a/cli/flags_test.go
+++ b/cli/flags_test.go
@@ -1,366 +1,664 @@
package cli
import (
- "errors"
"flag"
"os"
"runtime"
"strings"
"testing"
- "github.com/spf13/viper"
- "github.com/stretchr/testify/assert"
+ "github.com/ivuorinen/gibidify/shared"
+ "github.com/ivuorinen/gibidify/testutil"
)
-func TestParseFlags(t *testing.T) {
- // Save original command line args and restore after test
- oldArgs := os.Args
- oldFlagsParsed := flagsParsed
- defer func() {
- os.Args = oldArgs
- flagsParsed = oldFlagsParsed
- flag.CommandLine = flag.NewFlagSet(os.Args[0], flag.ContinueOnError)
- }()
+const testDirPlaceholder = "testdir"
+// setupTestArgs prepares test arguments by replacing testdir with actual temp directory.
+func setupTestArgs(t *testing.T, args []string, want *Flags) ([]string, *Flags) {
+ t.Helper()
+
+ if !containsFlag(args, shared.TestCLIFlagSource) {
+ return args, want
+ }
+
+ tempDir := t.TempDir()
+ modifiedArgs := replaceTestDirInArgs(args, tempDir)
+
+ // Handle nil want parameter (used for error test cases)
+ if want == nil {
+ return modifiedArgs, nil
+ }
+
+ modifiedWant := updateWantFlags(*want, tempDir)
+
+ return modifiedArgs, &modifiedWant
+}
+
+// replaceTestDirInArgs replaces testdir placeholder with actual temp directory in args.
+func replaceTestDirInArgs(args []string, tempDir string) []string {
+ modifiedArgs := make([]string, len(args))
+ copy(modifiedArgs, args)
+
+ for i, arg := range modifiedArgs {
+ if arg == testDirPlaceholder {
+ modifiedArgs[i] = tempDir
+
+ break
+ }
+ }
+
+ return modifiedArgs
+}
+
+// updateWantFlags updates the want flags with temp directory replacements.
+func updateWantFlags(want Flags, tempDir string) Flags {
+ modifiedWant := want
+
+ if want.SourceDir == testDirPlaceholder {
+ modifiedWant.SourceDir = tempDir
+ if strings.HasPrefix(want.Destination, testDirPlaceholder+".") {
+ baseName := testutil.BaseName(tempDir)
+ modifiedWant.Destination = baseName + "." + want.Format
+ }
+ }
+
+ return modifiedWant
+}
+
+// runParseFlagsTest runs a single parse flags test.
+func runParseFlagsTest(t *testing.T, args []string, want *Flags, wantErr bool, errContains string) {
+ t.Helper()
+
+ // Capture and restore original os.Args
+ origArgs := os.Args
+ defer func() { os.Args = origArgs }()
+
+ resetFlagsState()
+ modifiedArgs, modifiedWant := setupTestArgs(t, args, want)
+ setupCommandLineArgs(modifiedArgs)
+
+ got, err := ParseFlags()
+
+ if wantErr {
+ if err == nil {
+ t.Error("ParseFlags() expected error, got nil")
+
+ return
+ }
+ if errContains != "" && !strings.Contains(err.Error(), errContains) {
+ t.Errorf("ParseFlags() error = %v, want error containing %v", err, errContains)
+ }
+
+ return
+ }
+
+ if err != nil {
+ t.Errorf("ParseFlags() unexpected error = %v", err)
+
+ return
+ }
+
+ verifyFlags(t, got, modifiedWant)
+}
+
+func TestParseFlags(t *testing.T) {
tests := []struct {
- name string
- args []string
- expectedError string
- validate func(t *testing.T, f *Flags)
- setup func(t *testing.T)
+ name string
+ args []string
+ want *Flags
+ wantErr bool
+ errContains string
}{
{
- name: "valid flags with all options",
+ name: "valid basic flags",
+ args: []string{shared.TestCLIFlagSource, "testdir", shared.TestCLIFlagFormat, "markdown"},
+ want: &Flags{
+ SourceDir: "testdir",
+ Format: "markdown",
+ Concurrency: runtime.NumCPU(),
+ Destination: "testdir.markdown",
+ LogLevel: string(shared.LogLevelWarn),
+ },
+ wantErr: false,
+ },
+ {
+ name: "valid with all flags",
args: []string{
- "gibidify",
- testFlagSource, "", // will set to tempDir in test body
- "-destination", "output.md",
- "-format", "json",
- testFlagConcurrency, "4",
- "-prefix", "prefix",
- "-suffix", "suffix",
+ shared.TestCLIFlagSource, "testdir",
+ shared.TestCLIFlagDestination, shared.TestOutputMD,
+ "-prefix", "# Header",
+ "-suffix", "# Footer",
+ shared.TestCLIFlagFormat, "json",
+ shared.TestCLIFlagConcurrency, "4",
+ "-verbose",
"-no-colors",
"-no-progress",
- "-verbose",
},
- validate: nil, // set in test body using closure
+ want: &Flags{
+ SourceDir: "testdir",
+ Destination: shared.TestOutputMD,
+ Prefix: "# Header",
+ Suffix: "# Footer",
+ Format: "json",
+ Concurrency: 4,
+ Verbose: true,
+ NoColors: true,
+ NoProgress: true,
+ LogLevel: string(shared.LogLevelWarn),
+ },
+ wantErr: false,
},
{
- name: "missing source directory",
- args: []string{"gibidify"},
- expectedError: testErrSourceRequired,
+ name: "missing source directory",
+ args: []string{shared.TestCLIFlagFormat, "markdown"},
+ wantErr: true,
+ errContains: "source directory is required",
},
{
- name: "invalid format",
- args: []string{
- "gibidify",
- testFlagSource, "", // will set to tempDir in test body
- "-format", "invalid",
- },
- expectedError: "unsupported output format: invalid",
+ name: "invalid format",
+ args: []string{shared.TestCLIFlagSource, "testdir", shared.TestCLIFlagFormat, "invalid"},
+ wantErr: true,
+ errContains: "validating output format",
},
{
- name: "invalid concurrency (zero)",
- args: []string{
- "gibidify",
- testFlagSource, "", // will set to tempDir in test body
- testFlagConcurrency, "0",
- },
- expectedError: "concurrency (0) must be at least 1",
+ name: "invalid concurrency zero",
+ args: []string{shared.TestCLIFlagSource, "testdir", shared.TestCLIFlagConcurrency, "0"},
+ wantErr: true,
+ errContains: shared.TestOpValidatingConcurrency,
},
{
- name: "invalid concurrency (too high)",
- args: []string{
- "gibidify",
- testFlagSource, "", // will set to tempDir in test body
- testFlagConcurrency, "200",
- },
- // Set maxConcurrency so the upper bound is enforced
- expectedError: "concurrency (200) exceeds maximum (128)",
- setup: func(t *testing.T) {
- orig := viper.Get("maxConcurrency")
- viper.Set("maxConcurrency", 128)
- t.Cleanup(func() { viper.Set("maxConcurrency", orig) })
- },
- },
- {
- name: "path traversal in source",
- args: []string{
- "gibidify",
- testFlagSource, testPathTraversalPath,
- },
- expectedError: testErrPathTraversal,
- },
- {
- name: "default values",
- args: []string{
- "gibidify",
- testFlagSource, "", // will set to tempDir in test body
- },
- validate: nil, // set in test body using closure
+ name: "negative concurrency",
+ args: []string{shared.TestCLIFlagSource, "testdir", shared.TestCLIFlagConcurrency, "-1"},
+ wantErr: true,
+ errContains: shared.TestOpValidatingConcurrency,
},
}
for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- // Reset flags for each test
- flagsParsed = false
- globalFlags = nil
- flag.CommandLine = flag.NewFlagSet(os.Args[0], flag.ContinueOnError)
-
- // Create a local copy of args to avoid corrupting shared test data
- args := append([]string{}, tt.args...)
-
- // Use t.TempDir for source directory if needed
- tempDir := ""
- for i := range args {
- if i > 0 && args[i-1] == testFlagSource && args[i] == "" {
- tempDir = t.TempDir()
- args[i] = tempDir
- }
- }
- os.Args = args
-
- // Set validate closure if needed (for tempDir)
- if tt.name == "valid flags with all options" {
- tt.validate = func(t *testing.T, f *Flags) {
- assert.Equal(t, tempDir, f.SourceDir)
- assert.Equal(t, "output.md", f.Destination)
- assert.Equal(t, "json", f.Format)
- assert.Equal(t, 4, f.Concurrency)
- assert.Equal(t, "prefix", f.Prefix)
- assert.Equal(t, "suffix", f.Suffix)
- assert.True(t, f.NoColors)
- assert.True(t, f.NoProgress)
- assert.True(t, f.Verbose)
- }
- }
- if tt.name == "default values" {
- tt.validate = func(t *testing.T, f *Flags) {
- assert.Equal(t, tempDir, f.SourceDir)
- assert.Equal(t, "markdown", f.Format)
- assert.Equal(t, runtime.NumCPU(), f.Concurrency)
- assert.Equal(t, "", f.Prefix)
- assert.Equal(t, "", f.Suffix)
- assert.False(t, f.NoColors)
- assert.False(t, f.NoProgress)
- assert.False(t, f.Verbose)
- // Destination should be set by setDefaultDestination
- assert.NotEmpty(t, f.Destination)
- }
- }
-
- // Call setup if present (e.g. for maxConcurrency)
- if tt.setup != nil {
- tt.setup(t)
- }
-
- flags, err := ParseFlags()
-
- if tt.expectedError != "" {
- if assert.Error(t, err) {
- assert.Contains(t, err.Error(), tt.expectedError)
- }
- assert.Nil(t, flags)
- } else {
- assert.NoError(t, err)
- assert.NotNil(t, flags)
- if tt.validate != nil {
- tt.validate(t, flags)
- }
- }
- })
+ t.Run(
+ tt.name, func(t *testing.T) {
+ runParseFlagsTest(t, tt.args, tt.want, tt.wantErr, tt.errContains)
+ },
+ )
}
}
-func TestFlagsValidate(t *testing.T) {
+// validateFlagsValidationResult validates flag validation test results.
+func validateFlagsValidationResult(t *testing.T, err error, wantErr bool, errContains string) {
+ t.Helper()
+
+ if wantErr {
+ if err == nil {
+ t.Error("Flags.validate() expected error, got nil")
+
+ return
+ }
+ if errContains != "" && !strings.Contains(err.Error(), errContains) {
+ t.Errorf("Flags.validate() error = %v, want error containing %v", err, errContains)
+ }
+
+ return
+ }
+
+ if err != nil {
+ t.Errorf("Flags.validate() unexpected error = %v", err)
+ }
+}
+
+func TestFlagsvalidate(t *testing.T) {
+ tempDir := t.TempDir()
+
tests := []struct {
- name string
- flags *Flags
- setupFunc func(t *testing.T, f *Flags)
- expectedError string
+ name string
+ flags *Flags
+ wantErr bool
+ errContains string
}{
- {
- name: "missing source directory",
- flags: &Flags{},
- expectedError: testErrSourceRequired,
- },
- {
- name: "invalid format",
- flags: &Flags{
- Format: "invalid",
- },
- setupFunc: func(t *testing.T, f *Flags) {
- f.SourceDir = t.TempDir()
- },
- expectedError: "unsupported output format: invalid",
- },
- {
- name: "invalid concurrency",
- flags: &Flags{
- Format: "markdown",
- Concurrency: 0,
- },
- setupFunc: func(t *testing.T, f *Flags) {
- f.SourceDir = t.TempDir()
- },
- expectedError: "concurrency (0) must be at least 1",
- },
- {
- name: "path traversal attempt",
- flags: &Flags{
- SourceDir: testPathTraversalPath,
- Format: "markdown",
- },
- expectedError: testErrPathTraversal,
- },
{
name: "valid flags",
flags: &Flags{
+ SourceDir: tempDir,
+ Format: "markdown",
+ Concurrency: 4,
+ LogLevel: "warn",
+ },
+ wantErr: false,
+ },
+ {
+ name: "empty source directory",
+ flags: &Flags{
+ Format: "markdown",
+ Concurrency: 4,
+ LogLevel: "warn",
+ },
+ wantErr: true,
+ errContains: "source directory is required",
+ },
+ {
+ name: "invalid format",
+ flags: &Flags{
+ SourceDir: tempDir,
+ Format: "invalid",
+ Concurrency: 4,
+ LogLevel: "warn",
+ },
+ wantErr: true,
+ errContains: "validating output format",
+ },
+ {
+ name: "zero concurrency",
+ flags: &Flags{
+ SourceDir: tempDir,
+ Format: "markdown",
+ Concurrency: 0,
+ LogLevel: "warn",
+ },
+ wantErr: true,
+ errContains: shared.TestOpValidatingConcurrency,
+ },
+ {
+ name: "negative concurrency",
+ flags: &Flags{
+ SourceDir: tempDir,
+ Format: "json",
+ Concurrency: -1,
+ LogLevel: "warn",
+ },
+ wantErr: true,
+ errContains: shared.TestOpValidatingConcurrency,
+ },
+ {
+ name: "invalid log level",
+ flags: &Flags{
+ SourceDir: tempDir,
Format: "json",
Concurrency: 4,
+ LogLevel: "invalid",
},
- setupFunc: func(t *testing.T, f *Flags) {
- f.SourceDir = t.TempDir()
- },
+ wantErr: true,
+ errContains: "invalid log level",
},
}
for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- if tt.setupFunc != nil {
- tt.setupFunc(t, tt.flags)
- }
-
- err := tt.flags.validate()
-
- if tt.expectedError != "" {
- assert.Error(t, err)
- assert.Contains(t, err.Error(), tt.expectedError)
- } else {
- assert.NoError(t, err)
- }
- })
+ t.Run(
+ tt.name, func(t *testing.T) {
+ err := tt.flags.validate()
+ validateFlagsValidationResult(t, err, tt.wantErr, tt.errContains)
+ },
+ )
}
}
-func TestSetDefaultDestination(t *testing.T) {
+// validateDefaultDestinationResult validates default destination test results.
+func validateDefaultDestinationResult(
+ t *testing.T,
+ flags *Flags,
+ err error,
+ wantDestination string,
+ wantErr bool,
+ errContains string,
+) {
+ t.Helper()
+
+ if wantErr {
+ if err == nil {
+ t.Error("Flags.setDefaultDestination() expected error, got nil")
+
+ return
+ }
+ if errContains != "" && !strings.Contains(err.Error(), errContains) {
+ t.Errorf("Flags.setDefaultDestination() error = %v, want error containing %v", err, errContains)
+ }
+
+ return
+ }
+
+ if err != nil {
+ t.Errorf("Flags.setDefaultDestination() unexpected error = %v", err)
+
+ return
+ }
+
+ if flags.Destination != wantDestination {
+ t.Errorf("Flags.Destination = %v, want %v", flags.Destination, wantDestination)
+ }
+}
+
+func TestFlagssetDefaultDestination(t *testing.T) {
+ tempDir := t.TempDir()
+ baseName := testutil.BaseName(tempDir)
+
tests := []struct {
- name string
- flags *Flags
- setupFunc func(t *testing.T, f *Flags)
- expectedDest string
- expectedError string
+ name string
+ flags *Flags
+ wantDestination string
+ wantErr bool
+ errContains string
}{
{
- name: "default destination for directory",
+ name: "set default destination markdown",
flags: &Flags{
- Format: "markdown",
+ SourceDir: tempDir,
+ Format: "markdown",
+ LogLevel: "warn",
},
- setupFunc: func(t *testing.T, f *Flags) {
- f.SourceDir = t.TempDir()
- },
- expectedDest: "", // will check suffix below
+ wantDestination: baseName + ".markdown",
+ wantErr: false,
},
{
- name: "default destination for json format",
+ name: "set default destination json",
flags: &Flags{
- Format: "json",
+ SourceDir: tempDir,
+ Format: "json",
+ LogLevel: "warn",
},
- setupFunc: func(t *testing.T, f *Flags) {
- f.SourceDir = t.TempDir()
- },
- expectedDest: "", // will check suffix below
+ wantDestination: baseName + ".json",
+ wantErr: false,
},
{
- name: "provided destination unchanged",
+ name: "set default destination yaml",
flags: &Flags{
- Format: "markdown",
- Destination: "custom-output.txt",
+ SourceDir: tempDir,
+ Format: "yaml",
+ LogLevel: "warn",
},
- setupFunc: func(t *testing.T, f *Flags) {
- f.SourceDir = t.TempDir()
- },
- expectedDest: "custom-output.txt",
+ wantDestination: baseName + ".yaml",
+ wantErr: false,
},
{
- name: "path traversal in destination",
+ name: "preserve existing destination",
flags: &Flags{
- Format: "markdown",
- Destination: testPathTraversalPath,
+ SourceDir: tempDir,
+ Format: "yaml",
+ Destination: "custom-output.yaml",
+ LogLevel: "warn",
},
- setupFunc: func(t *testing.T, f *Flags) {
- f.SourceDir = t.TempDir()
+ wantDestination: "custom-output.yaml",
+ wantErr: false,
+ },
+ {
+ name: "nonexistent source path still generates destination",
+ flags: &Flags{
+ SourceDir: "/nonexistent/path/that/should/not/exist",
+ Format: "markdown",
+ LogLevel: "warn",
},
- expectedError: testErrPathTraversal,
+ wantDestination: "exist.markdown", // Based on filepath.Base of the path
+ wantErr: false, // AbsolutePath doesn't validate existence, only converts to absolute
},
}
for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- if tt.setupFunc != nil {
- tt.setupFunc(t, tt.flags)
+ t.Run(
+ tt.name, func(t *testing.T) {
+ err := tt.flags.setDefaultDestination()
+ validateDefaultDestinationResult(t, tt.flags, err, tt.wantDestination, tt.wantErr, tt.errContains)
+ },
+ )
+ }
+}
+
+func TestParseFlagsSingleton(t *testing.T) {
+ // Capture and restore original os.Args
+ origArgs := os.Args
+ defer func() { os.Args = origArgs }()
+
+ resetFlagsState()
+ tempDir := t.TempDir()
+
+ // First call
+ setupCommandLineArgs([]string{shared.TestCLIFlagSource, tempDir, shared.TestCLIFlagFormat, "markdown"})
+ flags1, err := ParseFlags()
+ if err != nil {
+ t.Fatalf("First ParseFlags() failed: %v", err)
+ }
+
+ // Second call should return the same instance
+ flags2, err := ParseFlags()
+ if err != nil {
+ t.Fatalf("Second ParseFlags() failed: %v", err)
+ }
+
+ if flags1 != flags2 {
+ t.Error("ParseFlags() should return singleton instance, got different pointers")
+ }
+}
+
+// Helper functions
+
+// resetFlagsState resets the global flags state for testing.
+func resetFlagsState() {
+ flagsParsed = false
+ globalFlags = nil
+ // Reset the flag.CommandLine for clean testing (use ContinueOnError to match ResetFlags)
+ flag.CommandLine = flag.NewFlagSet(os.Args[0], flag.ContinueOnError)
+}
+
+// setupCommandLineArgs sets up command line arguments for testing.
+func setupCommandLineArgs(args []string) {
+ os.Args = append([]string{"gibidify"}, args...)
+}
+
+// containsFlag checks if a flag is present in the arguments.
+func containsFlag(args []string, flagName string) bool {
+ for _, arg := range args {
+ if arg == flagName {
+ return true
+ }
+ }
+
+ return false
+}
+
+// verifyFlags compares two Flags structs for testing.
+func verifyFlags(t *testing.T, got, want *Flags) {
+ t.Helper()
+
+ if got.SourceDir != want.SourceDir {
+ t.Errorf("SourceDir = %v, want %v", got.SourceDir, want.SourceDir)
+ }
+ if got.Destination != want.Destination {
+ t.Errorf("Destination = %v, want %v", got.Destination, want.Destination)
+ }
+ if got.Prefix != want.Prefix {
+ t.Errorf("Prefix = %v, want %v", got.Prefix, want.Prefix)
+ }
+ if got.Suffix != want.Suffix {
+ t.Errorf("Suffix = %v, want %v", got.Suffix, want.Suffix)
+ }
+ if got.Format != want.Format {
+ t.Errorf("Format = %v, want %v", got.Format, want.Format)
+ }
+ if got.Concurrency != want.Concurrency {
+ t.Errorf("Concurrency = %v, want %v", got.Concurrency, want.Concurrency)
+ }
+ if got.NoColors != want.NoColors {
+ t.Errorf("NoColors = %v, want %v", got.NoColors, want.NoColors)
+ }
+ if got.NoProgress != want.NoProgress {
+ t.Errorf("NoProgress = %v, want %v", got.NoProgress, want.NoProgress)
+ }
+ if got.Verbose != want.Verbose {
+ t.Errorf("Verbose = %v, want %v", got.Verbose, want.Verbose)
+ }
+ if got.LogLevel != want.LogLevel {
+ t.Errorf("LogLevel = %v, want %v", got.LogLevel, want.LogLevel)
+ }
+ if got.NoUI != want.NoUI {
+ t.Errorf("NoUI = %v, want %v", got.NoUI, want.NoUI)
+ }
+}
+
+// TestResetFlags tests the ResetFlags function.
+func TestResetFlags(t *testing.T) {
+ // Save original state
+ originalArgs := os.Args
+ originalFlagsParsed := flagsParsed
+ originalGlobalFlags := globalFlags
+ originalCommandLine := flag.CommandLine
+
+ defer func() {
+ // Restore original state
+ os.Args = originalArgs
+ flagsParsed = originalFlagsParsed
+ globalFlags = originalGlobalFlags
+ flag.CommandLine = originalCommandLine
+ }()
+
+ // Simplified test cases to reduce complexity
+ testCases := map[string]func(t *testing.T){
+ "reset after flags have been parsed": func(t *testing.T) {
+ srcDir := t.TempDir()
+ testutil.CreateTestFile(t, srcDir, "test.txt", []byte("test"))
+ os.Args = []string{"test", "-source", srcDir, "-destination", "out.json"}
+
+ // Parse flags first
+ if _, err := ParseFlags(); err != nil {
+ t.Fatalf("Setup failed: %v", err)
}
+ },
+ "reset with clean state": func(t *testing.T) {
+ if flagsParsed {
+ t.Log("Note: flagsParsed was already true at start")
+ }
+ },
+ "multiple resets": func(t *testing.T) {
+ srcDir := t.TempDir()
+ testutil.CreateTestFile(t, srcDir, "test.txt", []byte("test"))
+ os.Args = []string{"test", "-source", srcDir, "-destination", "out.json"}
- err := tt.flags.setDefaultDestination()
+ if _, err := ParseFlags(); err != nil {
+ t.Fatalf("Setup failed: %v", err)
+ }
+ },
+ }
- if tt.expectedError != "" {
- assert.Error(t, err)
- assert.Contains(t, err.Error(), tt.expectedError)
- } else {
- assert.NoError(t, err)
- switch {
- case tt.expectedDest != "":
- assert.Equal(t, tt.expectedDest, tt.flags.Destination)
- case tt.flags.Format == "json":
- assert.True(
- t, strings.HasSuffix(tt.flags.Destination, ".json"),
- "expected %q to have suffix .json", tt.flags.Destination,
- )
- case tt.flags.Format == "markdown":
- assert.True(
- t, strings.HasSuffix(tt.flags.Destination, ".markdown"),
- "expected %q to have suffix .markdown", tt.flags.Destination,
- )
- }
+ for name, setup := range testCases {
+ t.Run(name, func(t *testing.T) {
+ // Setup test scenario
+ setup(t)
+
+ // Call ResetFlags
+ ResetFlags()
+
+ // Basic verification that reset worked
+ if flagsParsed {
+ t.Error("flagsParsed should be false after ResetFlags()")
+ }
+ if globalFlags != nil {
+ t.Error("globalFlags should be nil after ResetFlags()")
}
})
}
}
-func TestFlagsSingleton(t *testing.T) {
- // Save original state
- oldFlagsParsed := flagsParsed
- oldGlobalFlags := globalFlags
- defer func() {
- flagsParsed = oldFlagsParsed
- globalFlags = oldGlobalFlags
- }()
+// TestResetFlags_Integration tests ResetFlags in integration scenarios.
+func TestResetFlagsIntegration(t *testing.T) {
+ // This test verifies that ResetFlags properly resets the internal state
+ // to allow multiple calls to ParseFlags in test scenarios.
- // Test singleton behavior
- flagsParsed = true
- expectedFlags := &Flags{
- SourceDir: "/test",
- Format: "json",
- Concurrency: 2,
+ // Note: This test documents the expected behavior of ResetFlags
+ // The actual integration with ParseFlags is already tested in main tests
+ // where ResetFlags is used to enable proper test isolation.
+
+ t.Run("state_reset_behavior", func(t *testing.T) {
+ // Test behavior is already covered in TestResetFlags
+ // This is mainly for documentation of the integration pattern
+
+ t.Log("ResetFlags integration behavior:")
+ t.Log("1. Resets flagsParsed to false")
+ t.Log("2. Sets globalFlags to nil")
+ t.Log("3. Creates new flag.CommandLine FlagSet")
+ t.Log("4. Allows subsequent ParseFlags calls")
+
+ // The actual mechanics are tested in TestResetFlags
+ // This test serves to document the integration contract
+
+ // Reset state (this should not panic)
+ ResetFlags()
+
+ // Verify basic state expectations
+ if flagsParsed {
+ t.Error("flagsParsed should be false after ResetFlags")
+ }
+ if globalFlags != nil {
+ t.Error("globalFlags should be nil after ResetFlags")
+ }
+ if flag.CommandLine == nil {
+ t.Error("flag.CommandLine should not be nil after ResetFlags")
+ }
+ })
+}
+
+// Benchmarks for flag-related operations.
+// While flag parsing is a one-time startup operation, these benchmarks
+// document baseline performance and catch regressions if parsing logic becomes more complex.
+//
+// Note: ParseFlags benchmarks are omitted because resetFlagsState() interferes with
+// Go's testing framework flags. The core operations (setDefaultDestination, validate)
+// are benchmarked instead.
+
+// BenchmarkSetDefaultDestination measures the setDefaultDestination operation.
+func BenchmarkSetDefaultDestination(b *testing.B) {
+ tempDir := b.TempDir()
+
+ for b.Loop() {
+ flags := &Flags{
+ SourceDir: tempDir,
+ Format: "markdown",
+ LogLevel: "warn",
+ }
+ _ = flags.setDefaultDestination()
}
- globalFlags = expectedFlags
-
- // Should return cached flags without parsing
- flags, err := ParseFlags()
- assert.NoError(t, err)
- assert.Equal(t, expectedFlags, flags)
- assert.Same(t, globalFlags, flags)
}
-func TestNewMissingSourceError(t *testing.T) {
- err := NewMissingSourceError()
+// BenchmarkSetDefaultDestinationAllFormats measures setDefaultDestination across all formats.
+func BenchmarkSetDefaultDestinationAllFormats(b *testing.B) {
+ tempDir := b.TempDir()
+ formats := []string{"markdown", "json", "yaml"}
- assert.Error(t, err)
- assert.Equal(t, testErrSourceRequired, err.Error())
-
- // Check if it's the right type
- var missingSourceError *MissingSourceError
- ok := errors.As(err, &missingSourceError)
- assert.True(t, ok)
+ for b.Loop() {
+ for _, format := range formats {
+ flags := &Flags{
+ SourceDir: tempDir,
+ Format: format,
+ LogLevel: "warn",
+ }
+ _ = flags.setDefaultDestination()
+ }
+ }
+}
+
+// BenchmarkFlagsValidate measures the validate operation.
+func BenchmarkFlagsValidate(b *testing.B) {
+ tempDir := b.TempDir()
+ flags := &Flags{
+ SourceDir: tempDir,
+ Destination: "output.md",
+ Format: "markdown",
+ LogLevel: "warn",
+ }
+
+ for b.Loop() {
+ _ = flags.validate()
+ }
+}
+
+// BenchmarkFlagsValidateAllFormats measures validate across all formats.
+func BenchmarkFlagsValidateAllFormats(b *testing.B) {
+ tempDir := b.TempDir()
+ formats := []string{"markdown", "json", "yaml"}
+
+ for b.Loop() {
+ for _, format := range formats {
+ flags := &Flags{
+ SourceDir: tempDir,
+ Destination: "output." + format,
+ Format: format,
+ LogLevel: "warn",
+ }
+ _ = flags.validate()
+ }
+ }
}
diff --git a/cli/processor_collection.go b/cli/processor_collection.go
index 5471440..d07c55f 100644
--- a/cli/processor_collection.go
+++ b/cli/processor_collection.go
@@ -1,46 +1,48 @@
+// Package cli provides command-line interface functionality for gibidify.
package cli
import (
"fmt"
"os"
- "github.com/sirupsen/logrus"
-
"github.com/ivuorinen/gibidify/config"
"github.com/ivuorinen/gibidify/fileproc"
- "github.com/ivuorinen/gibidify/gibidiutils"
+ "github.com/ivuorinen/gibidify/shared"
)
// collectFiles collects all files to be processed.
func (p *Processor) collectFiles() ([]string, error) {
files, err := fileproc.CollectFiles(p.flags.SourceDir)
if err != nil {
- return nil, gibidiutils.WrapError(
+ return nil, shared.WrapError(
err,
- gibidiutils.ErrorTypeProcessing,
- gibidiutils.CodeProcessingCollection,
+ shared.ErrorTypeProcessing,
+ shared.CodeProcessingCollection,
"error collecting files",
)
}
- logrus.Infof("Found %d files to process", len(files))
+
+ logger := shared.GetLogger()
+ logger.Infof(shared.CLIMsgFoundFilesToProcess, len(files))
+
return files, nil
}
// validateFileCollection validates the collected files against resource limits.
func (p *Processor) validateFileCollection(files []string) error {
- if !config.GetResourceLimitsEnabled() {
+ if !config.ResourceLimitsEnabled() {
return nil
}
// Check file count limit
- maxFiles := config.GetMaxFiles()
+ maxFiles := config.MaxFiles()
if len(files) > maxFiles {
- return gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeValidation,
- gibidiutils.CodeResourceLimitFiles,
+ return shared.NewStructuredError(
+ shared.ErrorTypeValidation,
+ shared.CodeResourceLimitFiles,
fmt.Sprintf("file count (%d) exceeds maximum limit (%d)", len(files), maxFiles),
"",
- map[string]interface{}{
+ map[string]any{
"file_count": len(files),
"max_files": maxFiles,
},
@@ -48,7 +50,7 @@ func (p *Processor) validateFileCollection(files []string) error {
}
// Check total size limit (estimate)
- maxTotalSize := config.GetMaxTotalSize()
+ maxTotalSize := config.MaxTotalSize()
totalSize := int64(0)
oversizedFiles := 0
@@ -56,16 +58,14 @@ func (p *Processor) validateFileCollection(files []string) error {
if fileInfo, err := os.Stat(filePath); err == nil {
totalSize += fileInfo.Size()
if totalSize > maxTotalSize {
- return gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeValidation,
- gibidiutils.CodeResourceLimitTotalSize,
+ return shared.NewStructuredError(
+ shared.ErrorTypeValidation,
+ shared.CodeResourceLimitTotalSize,
fmt.Sprintf(
- "total file size (%d bytes) would exceed maximum limit (%d bytes)",
- totalSize,
- maxTotalSize,
+ "total file size (%d bytes) would exceed maximum limit (%d bytes)", totalSize, maxTotalSize,
),
"",
- map[string]interface{}{
+ map[string]any{
"total_size": totalSize,
"max_total_size": maxTotalSize,
"files_checked": len(files),
@@ -77,10 +77,12 @@ func (p *Processor) validateFileCollection(files []string) error {
}
}
+ logger := shared.GetLogger()
if oversizedFiles > 0 {
- logrus.Warnf("Could not stat %d files during pre-validation", oversizedFiles)
+ logger.Warnf("Could not stat %d files during pre-validation", oversizedFiles)
}
- logrus.Infof("Pre-validation passed: %d files, %d MB total", len(files), totalSize/1024/1024)
+ logger.Infof("Pre-validation passed: %d files, %d MB total", len(files), totalSize/int64(shared.BytesPerMB))
+
return nil
}
diff --git a/cli/processor_processing.go b/cli/processor_processing.go
index 0e9c183..d3c465e 100644
--- a/cli/processor_processing.go
+++ b/cli/processor_processing.go
@@ -1,12 +1,14 @@
+// Package cli provides command-line interface functionality for gibidify.
package cli
import (
"context"
"os"
"sync"
+ "time"
"github.com/ivuorinen/gibidify/fileproc"
- "github.com/ivuorinen/gibidify/gibidiutils"
+ "github.com/ivuorinen/gibidify/shared"
)
// Process executes the main file processing workflow.
@@ -16,9 +18,7 @@ func (p *Processor) Process(ctx context.Context) error {
defer overallCancel()
// Configure file type registry
- if err := p.configureFileTypes(); err != nil {
- return err
- }
+ p.configureFileTypes()
// Print startup info with colors
p.ui.PrintHeader("🚀 Starting gibidify")
@@ -31,23 +31,32 @@ func (p *Processor) Process(ctx context.Context) error {
p.resourceMonitor.LogResourceInfo()
p.backpressure.LogBackpressureInfo()
- // Collect files with progress indication
+ // Collect files with progress indication and timing
p.ui.PrintInfo("📁 Collecting files...")
+ collectionStart := time.Now()
files, err := p.collectFiles()
+ collectionTime := time.Since(collectionStart)
+ p.metricsCollector.RecordPhaseTime(shared.MetricsPhaseCollection, collectionTime)
+
if err != nil {
return err
}
// Show collection results
- p.ui.PrintSuccess("Found %d files to process", len(files))
+ p.ui.PrintSuccess(shared.CLIMsgFoundFilesToProcess, len(files))
// Pre-validate file collection against resource limits
if err := p.validateFileCollection(files); err != nil {
return err
}
- // Process files with overall timeout
- return p.processFiles(overallCtx, files)
+ // Process files with overall timeout and timing
+ processingStart := time.Now()
+ err = p.processFiles(overallCtx, files)
+ processingTime := time.Since(processingStart)
+ p.metricsCollector.RecordPhaseTime(shared.MetricsPhaseProcessing, processingTime)
+
+ return err
}
// processFiles processes the collected files.
@@ -57,7 +66,7 @@ func (p *Processor) processFiles(ctx context.Context, files []string) error {
return err
}
defer func() {
- gibidiutils.LogError("Error closing output file", outFile.Close())
+ shared.LogError("Error closing output file", outFile.Close())
}()
// Initialize back-pressure and channels
@@ -67,11 +76,7 @@ func (p *Processor) processFiles(ctx context.Context, files []string) error {
writerDone := make(chan struct{})
// Start writer
- go fileproc.StartWriter(outFile, writeCh, writerDone, fileproc.WriterConfig{
- Format: p.flags.Format,
- Prefix: p.flags.Prefix,
- Suffix: p.flags.Suffix,
- })
+ go fileproc.StartWriter(outFile, writeCh, writerDone, p.flags.Format, p.flags.Prefix, p.flags.Suffix)
// Start workers
var wg sync.WaitGroup
@@ -83,28 +88,41 @@ func (p *Processor) processFiles(ctx context.Context, files []string) error {
// Send files to workers
if err := p.sendFiles(ctx, files, fileCh); err != nil {
p.ui.FinishProgress()
+
return err
}
- // Wait for completion
+ // Wait for completion with timing
+ writingStart := time.Now()
p.waitForCompletion(&wg, writeCh, writerDone)
+ writingTime := time.Since(writingStart)
+ p.metricsCollector.RecordPhaseTime(shared.MetricsPhaseWriting, writingTime)
+
p.ui.FinishProgress()
+ // Final cleanup with timing
+ finalizeStart := time.Now()
p.logFinalStats()
+ finalizeTime := time.Since(finalizeStart)
+ p.metricsCollector.RecordPhaseTime(shared.MetricsPhaseFinalize, finalizeTime)
+
p.ui.PrintSuccess("Processing completed. Output saved to %s", p.flags.Destination)
+
return nil
}
// createOutputFile creates the output file.
func (p *Processor) createOutputFile() (*os.File, error) {
// Destination path has been validated in CLI flags validation for path traversal attempts
- // #nosec G304 - destination is validated in flags.validate()
- outFile, err := os.Create(p.flags.Destination)
+ outFile, err := os.Create(p.flags.Destination) // #nosec G304 - destination is validated in flags.validate()
if err != nil {
- return nil, gibidiutils.WrapError(
- err, gibidiutils.ErrorTypeIO, gibidiutils.CodeIOFileCreate,
+ return nil, shared.WrapError(
+ err,
+ shared.ErrorTypeIO,
+ shared.CodeIOFileCreate,
"failed to create output file",
).WithFilePath(p.flags.Destination)
}
+
return outFile, nil
}
diff --git a/cli/processor_simple_test.go b/cli/processor_simple_test.go
deleted file mode 100644
index 7a5ec44..0000000
--- a/cli/processor_simple_test.go
+++ /dev/null
@@ -1,265 +0,0 @@
-package cli
-
-import (
- "context"
- "os"
- "path/filepath"
- "sync"
- "testing"
-
- "github.com/spf13/viper"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-
- "github.com/ivuorinen/gibidify/fileproc"
-)
-
-func TestProcessorSimple(t *testing.T) {
- t.Run("NewProcessor", func(t *testing.T) {
- flags := &Flags{
- SourceDir: "/tmp/test",
- Destination: "output.md",
- Format: "markdown",
- Concurrency: 2,
- NoColors: true,
- NoProgress: true,
- Verbose: false,
- }
-
- p := NewProcessor(flags)
-
- assert.NotNil(t, p)
- assert.Equal(t, flags, p.flags)
- assert.NotNil(t, p.ui)
- assert.NotNil(t, p.backpressure)
- assert.NotNil(t, p.resourceMonitor)
- assert.False(t, p.ui.enableColors)
- assert.False(t, p.ui.enableProgress)
- })
-
- t.Run("ConfigureFileTypes", func(t *testing.T) {
- p := &Processor{
- flags: &Flags{},
- ui: NewUIManager(),
- }
-
- // Should not panic or error
- err := p.configureFileTypes()
- assert.NoError(t, err)
- assert.NotNil(t, p)
- })
-
- t.Run("CreateOutputFile", func(t *testing.T) {
- // Create temp file path
- tempDir := t.TempDir()
- outputPath := filepath.Join(tempDir, "output.txt")
-
- p := &Processor{
- flags: &Flags{
- Destination: outputPath,
- },
- ui: NewUIManager(),
- }
-
- file, err := p.createOutputFile()
- assert.NoError(t, err)
- assert.NotNil(t, file)
-
- // Clean up
- err = file.Close()
- require.NoError(t, err)
- err = os.Remove(outputPath)
- require.NoError(t, err)
- })
-
- t.Run("ValidateFileCollection", func(t *testing.T) {
- p := &Processor{
- ui: NewUIManager(),
- }
-
- // Empty collection should be valid (just checks limits)
- err := p.validateFileCollection([]string{})
- assert.NoError(t, err)
-
- // Small collection should be valid
- err = p.validateFileCollection([]string{
- testFilePath1,
- testFilePath2,
- })
- assert.NoError(t, err)
- })
-
- t.Run("CollectFiles_EmptyDir", func(t *testing.T) {
- tempDir := t.TempDir()
-
- p := &Processor{
- flags: &Flags{
- SourceDir: tempDir,
- },
- ui: NewUIManager(),
- }
-
- files, err := p.collectFiles()
- assert.NoError(t, err)
- assert.Empty(t, files)
- })
-
- t.Run("CollectFiles_WithFiles", func(t *testing.T) {
- tempDir := t.TempDir()
-
- // Create test files
- require.NoError(t, os.WriteFile(filepath.Join(tempDir, "test1.go"), []byte("package main"), 0o600))
- require.NoError(t, os.WriteFile(filepath.Join(tempDir, "test2.go"), []byte("package test"), 0o600))
-
- // Set config so no files are ignored, and restore after test
- origIgnoreDirs := viper.Get("ignoreDirectories")
- origFileSizeLimit := viper.Get("fileSizeLimit")
- viper.Set("ignoreDirectories", []string{})
- viper.Set("fileSizeLimit", 1024*1024*10) // 10MB
- t.Cleanup(func() {
- viper.Set("ignoreDirectories", origIgnoreDirs)
- viper.Set("fileSizeLimit", origFileSizeLimit)
- })
-
- p := &Processor{
- flags: &Flags{
- SourceDir: tempDir,
- },
- ui: NewUIManager(),
- }
-
- files, err := p.collectFiles()
- assert.NoError(t, err)
- assert.Len(t, files, 2)
- })
-
- t.Run("SendFiles", func(t *testing.T) {
- p := &Processor{
- backpressure: fileproc.NewBackpressureManager(),
- ui: NewUIManager(),
- }
-
- ctx := context.Background()
- fileCh := make(chan string, 3)
- files := []string{
- testFilePath1,
- testFilePath2,
- }
-
- var wg sync.WaitGroup
- wg.Add(1)
- // Send files in a goroutine since it might block
- go func() {
- defer wg.Done()
- err := p.sendFiles(ctx, files, fileCh)
- assert.NoError(t, err)
- }()
-
- // Read all files from channel
- var received []string
- for i := 0; i < len(files); i++ {
- file := <-fileCh
- received = append(received, file)
- }
-
- assert.Equal(t, len(files), len(received))
-
- // Wait for sendFiles goroutine to finish (and close fileCh)
- wg.Wait()
-
- // Now channel should be closed
- _, ok := <-fileCh
- assert.False(t, ok, "channel should be closed")
- })
-
- t.Run("WaitForCompletion", func(t *testing.T) {
- p := &Processor{
- ui: NewUIManager(),
- }
-
- writeCh := make(chan fileproc.WriteRequest)
- writerDone := make(chan struct{})
-
- // Simulate writer finishing
- go func() {
- <-writeCh // Wait for close
- close(writerDone)
- }()
-
- var wg sync.WaitGroup
- // Start and finish immediately
- wg.Add(1)
- wg.Done()
-
- // Should complete without hanging
- p.waitForCompletion(&wg, writeCh, writerDone)
- assert.NotNil(t, p)
- })
-
- t.Run("LogFinalStats", func(t *testing.T) {
- p := &Processor{
- flags: &Flags{
- Verbose: true,
- },
- ui: NewUIManager(),
- resourceMonitor: fileproc.NewResourceMonitor(),
- backpressure: fileproc.NewBackpressureManager(),
- }
-
- // Should not panic
- p.logFinalStats()
- assert.NotNil(t, p)
- })
-}
-
-// Test error handling scenarios
-func TestProcessorErrors(t *testing.T) {
- t.Run("CreateOutputFile_InvalidPath", func(t *testing.T) {
- p := &Processor{
- flags: &Flags{
- Destination: "/root/cannot-write-here.txt",
- },
- ui: NewUIManager(),
- }
-
- file, err := p.createOutputFile()
- assert.Error(t, err)
- assert.Nil(t, file)
- })
-
- t.Run("CollectFiles_NonExistentDir", func(t *testing.T) {
- p := &Processor{
- flags: &Flags{
- SourceDir: "/non/existent/path",
- },
- ui: NewUIManager(),
- }
-
- files, err := p.collectFiles()
- assert.Error(t, err)
- assert.Nil(t, files)
- })
-
- t.Run("SendFiles_WithCancellation", func(t *testing.T) {
- p := &Processor{
- backpressure: fileproc.NewBackpressureManager(),
- ui: NewUIManager(),
- }
-
- ctx, cancel := context.WithCancel(context.Background())
- fileCh := make(chan string) // Unbuffered to force blocking
-
- files := []string{
- testFilePath1,
- testFilePath2,
- "/test/file3.go",
- }
-
- // Cancel immediately
- cancel()
-
- err := p.sendFiles(ctx, files, fileCh)
- assert.Error(t, err)
- assert.Equal(t, context.Canceled, err)
- })
-}
diff --git a/cli/processor_stats.go b/cli/processor_stats.go
index c55fbd7..60e247d 100644
--- a/cli/processor_stats.go
+++ b/cli/processor_stats.go
@@ -1,44 +1,108 @@
+// Package cli provides command-line interface functionality for gibidify.
package cli
import (
- "github.com/sirupsen/logrus"
+ "strings"
"github.com/ivuorinen/gibidify/config"
+ "github.com/ivuorinen/gibidify/shared"
)
-// logFinalStats logs the final back-pressure and resource monitoring statistics.
+// logFinalStats logs back-pressure, resource usage, and processing statistics.
func (p *Processor) logFinalStats() {
- // Log back-pressure stats
- backpressureStats := p.backpressure.GetStats()
+ p.logBackpressureStats()
+ p.logResourceStats()
+ p.finalizeAndReportMetrics()
+ p.logVerboseStats()
+ if p.resourceMonitor != nil {
+ p.resourceMonitor.Close()
+ }
+}
+
+// logBackpressureStats logs back-pressure statistics.
+func (p *Processor) logBackpressureStats() {
+ // Check backpressure is non-nil before dereferencing
+ if p.backpressure == nil {
+ return
+ }
+
+ logger := shared.GetLogger()
+ backpressureStats := p.backpressure.Stats()
if backpressureStats.Enabled {
- logrus.Infof(
+ logger.Infof(
"Back-pressure stats: processed=%d files, memory=%dMB/%dMB",
backpressureStats.FilesProcessed,
- backpressureStats.CurrentMemoryUsage/1024/1024,
- backpressureStats.MaxMemoryUsage/1024/1024,
+ backpressureStats.CurrentMemoryUsage/int64(shared.BytesPerMB),
+ backpressureStats.MaxMemoryUsage/int64(shared.BytesPerMB),
)
}
+}
- // Log resource monitoring stats
- resourceStats := p.resourceMonitor.GetMetrics()
- if config.GetResourceLimitsEnabled() {
- logrus.Infof("Resource stats: processed=%d files, totalSize=%dMB, avgFileSize=%.2fKB, rate=%.2f files/sec",
- resourceStats.FilesProcessed, resourceStats.TotalSizeProcessed/1024/1024,
- resourceStats.AverageFileSize/1024, resourceStats.ProcessingRate)
-
- if len(resourceStats.ViolationsDetected) > 0 {
- logrus.Warnf("Resource violations detected: %v", resourceStats.ViolationsDetected)
- }
-
- if resourceStats.DegradationActive {
- logrus.Warnf("Processing completed with degradation mode active")
- }
-
- if resourceStats.EmergencyStopActive {
- logrus.Errorf("Processing completed with emergency stop active")
- }
+// logResourceStats logs resource monitoring statistics.
+func (p *Processor) logResourceStats() {
+ // Check resource monitoring is enabled and monitor is non-nil before dereferencing
+ if !config.ResourceLimitsEnabled() {
+ return
}
- // Clean up resource monitor
- p.resourceMonitor.Close()
+ if p.resourceMonitor == nil {
+ return
+ }
+
+ logger := shared.GetLogger()
+ resourceStats := p.resourceMonitor.Metrics()
+
+ logger.Infof(
+ "Resource stats: processed=%d files, totalSize=%dMB, avgFileSize=%.2fKB, rate=%.2f files/sec",
+ resourceStats.FilesProcessed, resourceStats.TotalSizeProcessed/int64(shared.BytesPerMB),
+ resourceStats.AverageFileSize/float64(shared.BytesPerKB), resourceStats.ProcessingRate,
+ )
+
+ if len(resourceStats.ViolationsDetected) > 0 {
+ logger.Warnf("Resource violations detected: %v", resourceStats.ViolationsDetected)
+ }
+
+ if resourceStats.DegradationActive {
+ logger.Warnf("Processing completed with degradation mode active")
+ }
+
+ if resourceStats.EmergencyStopActive {
+ logger.Errorf("Processing completed with emergency stop active")
+ }
+}
+
+// finalizeAndReportMetrics finalizes metrics collection and displays the final report.
+func (p *Processor) finalizeAndReportMetrics() {
+ if p.metricsCollector != nil {
+ p.metricsCollector.Finish()
+ }
+
+ if p.metricsReporter != nil {
+ finalReport := p.metricsReporter.ReportFinal()
+ if finalReport != "" && p.ui != nil {
+ // Use UI manager to respect NoUI flag - remove trailing newline if present
+ p.ui.PrintInfo("%s", strings.TrimSuffix(finalReport, "\n"))
+ }
+ }
+}
+
+// logVerboseStats logs detailed structured statistics when verbose mode is enabled.
+func (p *Processor) logVerboseStats() {
+ if !p.flags.Verbose || p.metricsCollector == nil {
+ return
+ }
+
+ logger := shared.GetLogger()
+ report := p.metricsCollector.GenerateReport()
+ fields := map[string]any{
+ "total_files": report.Summary.TotalFiles,
+ "processed_files": report.Summary.ProcessedFiles,
+ "skipped_files": report.Summary.SkippedFiles,
+ "error_files": report.Summary.ErrorFiles,
+ "processing_time": report.Summary.ProcessingTime,
+ "files_per_second": report.Summary.FilesPerSecond,
+ "bytes_per_second": report.Summary.BytesPerSecond,
+ "memory_usage_mb": report.Summary.CurrentMemoryMB,
+ }
+ logger.WithFields(fields).Info("Processing completed with comprehensive metrics")
}
diff --git a/cli/processor_test.go b/cli/processor_test.go
new file mode 100644
index 0000000..1287c66
--- /dev/null
+++ b/cli/processor_test.go
@@ -0,0 +1,1025 @@
+package cli
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/spf13/viper"
+
+ "github.com/ivuorinen/gibidify/config"
+ "github.com/ivuorinen/gibidify/fileproc"
+ "github.com/ivuorinen/gibidify/shared"
+ "github.com/ivuorinen/gibidify/testutil"
+)
+
+// TestNewProcessor tests the processor constructor.
+func TestNewProcessor(t *testing.T) {
+ tests := []struct {
+ name string
+ flags *Flags
+ want processorValidation
+ }{
+ {
+ name: "basic processor creation",
+ flags: &Flags{
+ SourceDir: shared.TestSourcePath,
+ Format: "markdown",
+ Concurrency: 2,
+ Destination: shared.TestOutputMarkdown,
+ NoColors: false,
+ NoProgress: false,
+ },
+ want: processorValidation{
+ hasBackpressure: true,
+ hasResourceMonitor: true,
+ hasUI: true,
+ colorsEnabled: true,
+ progressEnabled: true,
+ },
+ },
+ {
+ name: "processor with colors and progress disabled",
+ flags: &Flags{
+ SourceDir: shared.TestSourcePath,
+ Format: "json",
+ Concurrency: 4,
+ Destination: "/test/output.json",
+ NoColors: true,
+ NoProgress: true,
+ },
+ want: processorValidation{
+ hasBackpressure: true,
+ hasResourceMonitor: true,
+ hasUI: true,
+ colorsEnabled: false,
+ progressEnabled: false,
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(
+ tt.name, func(t *testing.T) {
+ processor := NewProcessor(tt.flags)
+
+ validateProcessor(t, processor, tt.want)
+ validateProcessorFlags(t, processor, tt.flags)
+ },
+ )
+ }
+}
+
+// configureFileTypesTestCase holds test case data for file types configuration.
+type configureFileTypesTestCase struct {
+ name string
+ fileTypesEnabled bool
+ customExtensions []string
+ wantCustom bool
+}
+
+// setupFileTypesConfig initializes viper config for file types test.
+func setupFileTypesConfig(t *testing.T, tt configureFileTypesTestCase) {
+ t.Helper()
+ viper.Reset()
+ config.SetDefaultConfig()
+ viper.Set(shared.ConfigKeyFileTypesEnabled, tt.fileTypesEnabled)
+ if len(tt.customExtensions) > 0 {
+ viper.Set("fileTypes.customImageExtensions", tt.customExtensions)
+ }
+}
+
+// verifyDefaultExtensions checks that default extensions are recognized.
+func verifyDefaultExtensions(t *testing.T, registry *fileproc.FileTypeRegistry) {
+ t.Helper()
+ if !registry.IsImage(shared.TestFilePNG) {
+ t.Error("expected .png to be recognized as image (default extension)")
+ }
+ if !registry.IsImage(shared.TestFileJPG) {
+ t.Error("expected .jpg to be recognized as image (default extension)")
+ }
+ if registry.Language(shared.TestFileGo) == "" {
+ t.Error("expected .go to have language mapping (default extension)")
+ }
+}
+
+// verifyCustomExtensions checks that custom extensions are recognized when expected.
+func verifyCustomExtensions(t *testing.T, registry *fileproc.FileTypeRegistry, tt configureFileTypesTestCase) {
+ t.Helper()
+ if !tt.wantCustom || len(tt.customExtensions) == 0 {
+ return
+ }
+ testFile := "test" + tt.customExtensions[0]
+ if !registry.IsImage(testFile) {
+ t.Errorf("expected %s to be recognized as image (custom extension)", testFile)
+ }
+}
+
+// verifyRegistryState checks registry has reasonable state.
+func verifyRegistryState(t *testing.T, registry *fileproc.FileTypeRegistry) {
+ t.Helper()
+ _, _, maxCache := registry.CacheInfo()
+ if maxCache <= 0 {
+ t.Errorf("expected positive maxCacheSize, got %d", maxCache)
+ }
+}
+
+// TestProcessorConfigureFileTypes tests file type registry configuration.
+func TestProcessorConfigureFileTypes(t *testing.T) {
+ tests := []configureFileTypesTestCase{
+ {
+ name: "file types disabled - no custom extensions applied",
+ fileTypesEnabled: false,
+ customExtensions: []string{".testcustom"},
+ wantCustom: false,
+ },
+ {
+ name: "file types enabled - custom extensions applied",
+ fileTypesEnabled: true,
+ customExtensions: []string{".mycustomext"},
+ wantCustom: true,
+ },
+ {
+ name: "file types enabled with defaults",
+ fileTypesEnabled: true,
+ customExtensions: nil,
+ wantCustom: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ setupFileTypesConfig(t, tt)
+
+ if got := config.FileTypesEnabled(); got != tt.fileTypesEnabled {
+ t.Errorf("FileTypesEnabled() = %v, want %v", got, tt.fileTypesEnabled)
+ }
+
+ flags := &Flags{
+ SourceDir: shared.TestSourcePath,
+ Format: shared.FormatMarkdown,
+ Concurrency: 1,
+ Destination: shared.TestOutputMarkdown,
+ }
+ processor := NewProcessor(flags)
+ processor.configureFileTypes()
+
+ registry := fileproc.DefaultRegistry()
+ verifyDefaultExtensions(t, registry)
+ verifyCustomExtensions(t, registry, tt)
+ verifyRegistryState(t, registry)
+ })
+ }
+}
+
+// setupCollectFilesTest sets up test directory for file collection tests.
+func setupCollectFilesTest(t *testing.T, useNonExistent bool, setupFiles func(dir string) []string) string {
+ t.Helper()
+
+ if useNonExistent {
+ return "/non/existent/directory"
+ }
+
+ testDir := t.TempDir()
+ setupFiles(testDir)
+
+ return testDir
+}
+
+// validateCollectFiles validates file collection results.
+func validateCollectFiles(t *testing.T, files []string, err error, wantCount int, wantErr bool, errContains string) {
+ t.Helper()
+
+ if wantErr {
+ if err == nil {
+ t.Error(shared.TestMsgExpectedError)
+
+ return
+ }
+ if errContains != "" && !strings.Contains(err.Error(), errContains) {
+ t.Errorf(shared.TestMsgErrorShouldContain, errContains, err)
+ }
+
+ return
+ }
+
+ if err != nil {
+ t.Errorf(shared.TestMsgUnexpectedError, err)
+
+ return
+ }
+
+ if len(files) != wantCount {
+ t.Errorf("Expected %d files, got %d", wantCount, len(files))
+ }
+}
+
+// TestProcessor_collectFiles tests file collection integration.
+func TestProcessorCollectFiles(t *testing.T) {
+ tests := []struct {
+ name string
+ setupFiles func(dir string) []string
+ useNonExistent bool
+ wantCount int
+ wantErr bool
+ errContains string
+ }{
+ {
+ name: "collect valid files",
+ setupFiles: func(dir string) []string {
+ files := []testutil.FileSpec{
+ {Name: "file1.go", Content: shared.LiteralPackageMain + "\n"},
+ {Name: shared.TestFile2, Content: "text content\n"},
+ {Name: "subdir/file3.py", Content: "print('hello')\n"},
+ }
+
+ // Create subdirectory
+ testutil.CreateTestDirectory(t, dir, "subdir")
+
+ return testutil.CreateTestFiles(t, dir, files)
+ },
+ useNonExistent: false,
+ wantCount: 3,
+ wantErr: false,
+ },
+ {
+ name: "collect from empty directory",
+ setupFiles: func(_ string) []string {
+ return []string{}
+ },
+ useNonExistent: false,
+ wantCount: 0,
+ wantErr: false,
+ },
+ {
+ name: "collect from non-existent directory",
+ setupFiles: func(_ string) []string {
+ return []string{}
+ },
+ useNonExistent: true,
+ wantCount: 0,
+ wantErr: true,
+ errContains: "error collecting files",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(
+ tt.name, func(t *testing.T) {
+ testutil.ResetViperConfig(t, "")
+ testDir := setupCollectFilesTest(t, tt.useNonExistent, tt.setupFiles)
+
+ flags := &Flags{
+ SourceDir: testDir,
+ Format: "markdown",
+ Concurrency: 1,
+ Destination: filepath.Join(t.TempDir(), shared.TestOutputMD),
+ }
+
+ processor := NewProcessor(flags)
+ files, err := processor.collectFiles()
+ validateCollectFiles(t, files, err, tt.wantCount, tt.wantErr, tt.errContains)
+ },
+ )
+ }
+}
+
+// setupValidationTestFiles creates test files for validation tests.
+func setupValidationTestFiles(t *testing.T, tempDir string, files []string) []string {
+ t.Helper()
+
+ var testFiles []string
+ for i, fileName := range files {
+ if fileName != "" {
+ content := fmt.Sprintf("test content %d", i)
+ filePath := testutil.CreateTestFile(
+ t, tempDir,
+ fmt.Sprintf("test_%d.txt", i), []byte(content),
+ )
+ testFiles = append(testFiles, filePath)
+ } else {
+ testFiles = append(testFiles, fileName)
+ }
+ }
+
+ return testFiles
+}
+
+// validateFileCollectionResult validates file collection validation results.
+func validateFileCollectionResult(t *testing.T, err error, wantErr bool, errContains string) {
+ t.Helper()
+
+ if wantErr {
+ if err == nil {
+ t.Error(shared.TestMsgExpectedError)
+
+ return
+ }
+ if errContains != "" && !strings.Contains(err.Error(), errContains) {
+ t.Errorf(shared.TestMsgErrorShouldContain, errContains, err)
+ }
+
+ return
+ }
+
+ if err != nil {
+ t.Errorf(shared.TestMsgUnexpectedError, err)
+ }
+}
+
+// TestProcessor_validateFileCollection tests file validation against resource limits.
+func TestProcessorvalidateFileCollection(t *testing.T) {
+ tests := []struct {
+ name string
+ files []string
+ setupConfig func()
+ resourceLimitsEnabled bool
+ wantErr bool
+ errContains string
+ }{
+ {
+ name: "resource limits disabled",
+ files: []string{shared.TestFile1, shared.TestFile2},
+ resourceLimitsEnabled: false,
+ setupConfig: func() {
+ // No configuration needed for this test case
+ },
+ wantErr: false,
+ },
+ {
+ name: "within file count limit",
+ files: []string{shared.TestFile1},
+ resourceLimitsEnabled: true,
+ setupConfig: func() {
+ // Default configuration is sufficient for this test case
+ },
+ wantErr: false,
+ },
+ {
+ name: "exceeds file count limit",
+ files: make([]string, 10001), // Default limit is 10000
+ resourceLimitsEnabled: true,
+ setupConfig: func() {
+ // Default configuration is sufficient for this test case
+ },
+ wantErr: true,
+ errContains: "file count",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(
+ tt.name, func(t *testing.T) {
+ testutil.ResetViperConfig(t, "")
+ tt.setupConfig()
+
+ tempDir := t.TempDir()
+ testFiles := setupValidationTestFiles(t, tempDir, tt.files)
+
+ flags := &Flags{
+ SourceDir: tempDir,
+ Format: "markdown",
+ Concurrency: 1,
+ Destination: filepath.Join(t.TempDir(), shared.TestOutputMD),
+ }
+
+ processor := NewProcessor(flags)
+ err := processor.validateFileCollection(testFiles)
+ validateFileCollectionResult(t, err, tt.wantErr, tt.errContains)
+ },
+ )
+ }
+}
+
+// validateOutputFile validates output file creation results.
+func validateOutputFile(t *testing.T, outFile *os.File, err error, wantErr bool, errContains string) {
+ t.Helper()
+
+ if wantErr {
+ if err == nil {
+ t.Error(shared.TestMsgExpectedError)
+
+ return
+ }
+ if errContains != "" && !strings.Contains(err.Error(), errContains) {
+ t.Errorf(shared.TestMsgErrorShouldContain, errContains, err)
+ }
+
+ return
+ }
+
+ if err != nil {
+ t.Errorf(shared.TestMsgUnexpectedError, err)
+
+ return
+ }
+
+ if outFile == nil {
+ t.Error("Expected valid file handle")
+
+ return
+ }
+
+ testutil.CloseFile(t, outFile)
+}
+
+// TestProcessor_createOutputFile tests output file creation.
+func TestProcessorcreateOutputFile(t *testing.T) {
+ tests := []struct {
+ name string
+ setupDest func() string
+ wantErr bool
+ errContains string
+ }{
+ {
+ name: "create valid output file",
+ setupDest: func() string {
+ return filepath.Join(t.TempDir(), shared.TestOutputMD)
+ },
+ wantErr: false,
+ },
+ {
+ name: "create file in non-existent directory",
+ setupDest: func() string {
+ return "/non/existent/dir/" + shared.TestOutputMD
+ },
+ wantErr: true,
+ errContains: "failed to create output file",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(
+ tt.name, func(t *testing.T) {
+ flags := &Flags{
+ SourceDir: t.TempDir(),
+ Format: "markdown",
+ Concurrency: 1,
+ Destination: tt.setupDest(),
+ }
+
+ processor := NewProcessor(flags)
+ outFile, err := processor.createOutputFile()
+ validateOutputFile(t, outFile, err, tt.wantErr, tt.errContains)
+ },
+ )
+ }
+}
+
+// runProcessorIntegrationTest runs a single processor integration test.
+func runProcessorIntegrationTest(
+ t *testing.T,
+ testDir, format, outputPath string,
+ concurrency int,
+ timeout time.Duration,
+) error {
+ t.Helper()
+
+ flags := &Flags{
+ SourceDir: testDir,
+ Format: format,
+ Concurrency: concurrency,
+ Destination: outputPath,
+ NoColors: true, // Disable colors for consistent testing
+ NoProgress: true, // Disable progress for consistent testing
+ NoUI: true, // Disable all UI output for testing
+ }
+
+ processor := NewProcessor(flags)
+ ctx, cancel := context.WithTimeout(context.Background(), timeout)
+ defer cancel()
+
+ return processor.Process(ctx)
+}
+
+// validateProcessingResult validates processor integration test results.
+func validateProcessingResult(t *testing.T, err error, outputPath, format string, wantErr bool, errContains string) {
+ t.Helper()
+
+ if wantErr {
+ if err == nil {
+ t.Error(shared.TestMsgExpectedError)
+
+ return
+ }
+ if errContains != "" && !strings.Contains(err.Error(), errContains) {
+ t.Errorf(shared.TestMsgErrorShouldContain, errContains, err)
+ }
+
+ return
+ }
+
+ if err != nil {
+ t.Errorf(shared.TestMsgUnexpectedError, err)
+
+ return
+ }
+
+ if _, err := os.Stat(outputPath); os.IsNotExist(err) {
+ t.Errorf("Output file was not created: %s", outputPath)
+
+ return
+ }
+
+ content, err := os.ReadFile(outputPath)
+ if err != nil {
+ t.Errorf("Failed to read output file: %v", err)
+
+ return
+ }
+
+ validateOutputContent(t, string(content), format)
+}
+
+// TestProcessor_Process_Integration tests the complete processing workflow.
+func TestProcessorProcessIntegration(t *testing.T) {
+ // Suppress all output for cleaner test output
+ restore := testutil.SuppressAllOutput(t)
+ defer restore()
+
+ tests := []struct {
+ name string
+ setupFiles func(dir string) []string
+ format string
+ concurrency int
+ timeout time.Duration
+ wantErr bool
+ errContains string
+ }{
+ {
+ name: "successful markdown processing",
+ setupFiles: func(dir string) []string {
+ files := []testutil.FileSpec{
+ {
+ Name: "main.go",
+ Content: shared.LiteralPackageMain + "\n\nfunc main() {\n\tprintln(\"Hello\")\n}\n",
+ },
+ {Name: "README.md", Content: "# Test Project\n\nThis is a test.\n"},
+ }
+
+ return testutil.CreateTestFiles(t, dir, files)
+ },
+ format: "markdown",
+ concurrency: 2,
+ timeout: 30 * time.Second,
+ wantErr: false,
+ },
+ {
+ name: "successful json processing",
+ setupFiles: func(dir string) []string {
+ files := []testutil.FileSpec{
+ {Name: "config.json", Content: "{\"name\": \"test\"}\n"},
+ }
+
+ return testutil.CreateTestFiles(t, dir, files)
+ },
+ format: "json",
+ concurrency: 1,
+ timeout: 30 * time.Second,
+ wantErr: false,
+ },
+ {
+ name: "processing with no files",
+ setupFiles: func(_ string) []string {
+ return []string{}
+ },
+ format: "yaml",
+ concurrency: 1,
+ timeout: 30 * time.Second,
+ wantErr: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(
+ tt.name, func(t *testing.T) {
+ testutil.ResetViperConfig(t, "")
+
+ testDir := t.TempDir()
+ tt.setupFiles(testDir)
+
+ outputPath := filepath.Join(t.TempDir(), "output."+tt.format)
+ err := runProcessorIntegrationTest(t, testDir, tt.format, outputPath, tt.concurrency, tt.timeout)
+ validateProcessingResult(t, err, outputPath, tt.format, tt.wantErr, tt.errContains)
+ },
+ )
+ }
+}
+
+// TestProcessor_Process_ContextCancellation tests context cancellation handling.
+func TestProcessorProcessContextCancellation(t *testing.T) {
+ // Suppress all output for cleaner test output
+ restore := testutil.SuppressAllOutput(t)
+ defer restore()
+
+ testutil.ResetViperConfig(t, "")
+
+ // Create test files
+ testDir := t.TempDir()
+ files := []testutil.FileSpec{
+ {Name: shared.TestFile1, Content: "content1\n"},
+ {Name: shared.TestFile2, Content: "content2\n"},
+ }
+ testutil.CreateTestFiles(t, testDir, files)
+
+ outputPath := filepath.Join(t.TempDir(), shared.TestOutputMD)
+
+ flags := &Flags{
+ SourceDir: testDir,
+ Format: "markdown",
+ Concurrency: 1,
+ Destination: outputPath,
+ NoColors: true,
+ NoProgress: true,
+ NoUI: true, // Disable all UI output for testing
+ }
+
+ processor := NewProcessor(flags)
+
+ // Create context that will be canceled immediately
+ ctx, cancel := context.WithCancel(context.Background())
+ cancel() // Cancel immediately
+
+ err := processor.Process(ctx)
+
+ // Pre-canceled context must return an error
+ if err == nil {
+ t.Fatal("Expected error for pre-canceled context, got nil")
+ }
+
+ // Verify the error is related to context cancellation
+ if !strings.Contains(err.Error(), "context") {
+ t.Errorf("Expected error containing 'context', got: %v", err)
+ }
+}
+
+// TestProcessor_Process_ResourceLimits tests processing with resource limits.
+func TestProcessorProcessResourceLimits(t *testing.T) {
+ // Suppress all output for cleaner test output
+ restore := testutil.SuppressAllOutput(t)
+ defer restore()
+
+ tests := []struct {
+ name string
+ setupConfig func()
+ setupFiles func(dir string) []string
+ wantErr bool
+ errContains string
+ }{
+ {
+ name: "within resource limits",
+ setupConfig: func() {
+ // Use default limits
+ },
+ setupFiles: func(dir string) []string {
+ files := []testutil.FileSpec{
+ {Name: "small.txt", Content: "small content\n"},
+ }
+
+ return testutil.CreateTestFiles(t, dir, files)
+ },
+ wantErr: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(
+ tt.name, func(t *testing.T) {
+ testutil.ResetViperConfig(t, "")
+ tt.setupConfig()
+
+ testDir := t.TempDir()
+ tt.setupFiles(testDir)
+
+ outputPath := filepath.Join(t.TempDir(), shared.TestOutputMD)
+
+ flags := &Flags{
+ SourceDir: testDir,
+ Format: "markdown",
+ Concurrency: 1,
+ Destination: outputPath,
+ NoColors: true,
+ NoProgress: true,
+ NoUI: true, // Disable all UI output for testing
+ }
+
+ processor := NewProcessor(flags)
+ ctx := context.Background()
+
+ err := processor.Process(ctx)
+
+ if tt.wantErr {
+ if err == nil {
+ t.Error(shared.TestMsgExpectedError)
+
+ return
+ }
+ if tt.errContains != "" && !strings.Contains(err.Error(), tt.errContains) {
+ t.Errorf(shared.TestMsgErrorShouldContain, tt.errContains, err)
+ }
+ } else if err != nil {
+ t.Errorf(shared.TestMsgUnexpectedError, err)
+ }
+ },
+ )
+ }
+}
+
+// logFinalStatsTestCase holds test case data for log final stats tests.
+type logFinalStatsTestCase struct {
+ name string
+ enableBackpressure bool
+ enableResourceLimits bool
+ simulateProcessing bool
+ expectedKeywords []string
+ unexpectedKeywords []string
+}
+
+// setupLogStatsConfig initializes config for log stats test.
+func setupLogStatsConfig(t *testing.T, tt logFinalStatsTestCase) {
+ t.Helper()
+ viper.Reset()
+ config.SetDefaultConfig()
+ viper.Set(shared.ConfigKeyBackpressureEnabled, tt.enableBackpressure)
+ viper.Set(shared.ConfigKeyResourceLimitsEnabled, tt.enableResourceLimits)
+ shared.GetLogger().SetLevel(shared.LogLevelInfo)
+}
+
+// createLogStatsProcessor creates a processor for log stats testing.
+func createLogStatsProcessor(t *testing.T) *Processor {
+ t.Helper()
+ flags := &Flags{
+ SourceDir: t.TempDir(),
+ Format: shared.FormatMarkdown,
+ Concurrency: 1,
+ Destination: filepath.Join(t.TempDir(), shared.TestOutputMD),
+ NoUI: true,
+ NoColors: true,
+ NoProgress: true,
+ }
+ return NewProcessor(flags)
+}
+
+// simulateProcessing records file processing activity for stats generation.
+func simulateProcessing(processor *Processor, simulate bool) {
+ if !simulate || processor.resourceMonitor == nil {
+ return
+ }
+ processor.resourceMonitor.RecordFileProcessed(1024)
+ processor.resourceMonitor.RecordFileProcessed(2048)
+}
+
+// verifyLogKeywords checks expected and unexpected keywords in output.
+func verifyLogKeywords(t *testing.T, output string, expected, unexpected []string) {
+ t.Helper()
+ for _, keyword := range expected {
+ if !strings.Contains(output, keyword) {
+ t.Errorf("expected output to contain %q, got: %s", keyword, output)
+ }
+ }
+ for _, keyword := range unexpected {
+ if strings.Contains(output, keyword) {
+ t.Errorf("expected output NOT to contain %q, got: %s", keyword, output)
+ }
+ }
+}
+
+// TestProcessorLogFinalStats tests final statistics logging.
+func TestProcessorLogFinalStats(t *testing.T) {
+ tests := []logFinalStatsTestCase{
+ {
+ name: "basic stats without features enabled",
+ enableBackpressure: false,
+ enableResourceLimits: false,
+ simulateProcessing: false,
+ expectedKeywords: []string{},
+ unexpectedKeywords: []string{"Back-pressure stats", "Resource stats"},
+ },
+ {
+ name: "with backpressure enabled",
+ enableBackpressure: true,
+ enableResourceLimits: false,
+ simulateProcessing: true,
+ expectedKeywords: []string{"Back-pressure stats", "processed", "memory"},
+ unexpectedKeywords: []string{},
+ },
+ {
+ name: "with resource limits enabled",
+ enableBackpressure: false,
+ enableResourceLimits: true,
+ simulateProcessing: true,
+ expectedKeywords: []string{"Resource stats", "processed", "files"},
+ unexpectedKeywords: []string{},
+ },
+ {
+ name: "with all features enabled",
+ enableBackpressure: true,
+ enableResourceLimits: true,
+ simulateProcessing: true,
+ expectedKeywords: []string{"processed"},
+ unexpectedKeywords: []string{},
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ setupLogStatsConfig(t, tt)
+ _, getStderr, restore := testutil.CaptureOutput(t)
+
+ processor := createLogStatsProcessor(t)
+ simulateProcessing(processor, tt.simulateProcessing)
+ processor.logFinalStats()
+
+ restore()
+ verifyLogKeywords(t, getStderr(), tt.expectedKeywords, tt.unexpectedKeywords)
+
+ if processor.resourceMonitor != nil {
+ processor.resourceMonitor.Close()
+ }
+ })
+ }
+}
+
+// Helper types and functions
+
+type processorValidation struct {
+ hasBackpressure bool
+ hasResourceMonitor bool
+ hasUI bool
+ colorsEnabled bool
+ progressEnabled bool
+}
+
+func validateProcessor(t *testing.T, processor *Processor, want processorValidation) {
+ t.Helper()
+
+ if processor == nil {
+ t.Error("NewProcessor() returned nil")
+
+ return
+ }
+
+ if want.hasBackpressure && processor.backpressure == nil {
+ t.Error("Processor should have backpressure manager")
+ }
+
+ if want.hasResourceMonitor && processor.resourceMonitor == nil {
+ t.Error("Processor should have resource monitor")
+ }
+
+ if want.hasUI && processor.ui == nil {
+ t.Error("Processor should have UI manager")
+ }
+
+ if processor.ui != nil {
+ if processor.ui.enableColors != want.colorsEnabled {
+ t.Errorf("Colors enabled = %v, want %v", processor.ui.enableColors, want.colorsEnabled)
+ }
+
+ if processor.ui.enableProgress != want.progressEnabled {
+ t.Errorf("Progress enabled = %v, want %v", processor.ui.enableProgress, want.progressEnabled)
+ }
+ }
+}
+
+func validateProcessorFlags(t *testing.T, processor *Processor, flags *Flags) {
+ t.Helper()
+
+ if processor.flags != flags {
+ t.Error("Processor should store the provided flags")
+ }
+}
+
+func validateOutputContent(t *testing.T, content, format string) {
+ t.Helper()
+
+ if content == "" {
+ t.Error("Output content should not be empty")
+
+ return
+ }
+
+ switch format {
+ case "markdown":
+ // Markdown should have some structure
+ // Check for Markdown code blocks if content is substantial
+ // Empty directories might produce minimal output which is expected behavior
+ if !strings.Contains(content, "```") && len(content) > 10 {
+ t.Log("Markdown output may be minimal for empty directories")
+ }
+ case "json":
+ // JSON should start with [ or {
+ trimmed := strings.TrimSpace(content)
+ if len(trimmed) > 0 && !strings.HasPrefix(trimmed, "[") && !strings.HasPrefix(trimmed, "{") {
+ t.Error("JSON output should start with [ or {")
+ }
+ case "yaml":
+ // YAML output validation - content existence verified above
+ // Could add YAML structure validation if needed
+ default:
+ // For unknown formats, just log that we have content
+ t.Logf("Unknown format %s, content length: %d", format, len(content))
+ }
+}
+
+// Benchmark tests for processor performance
+
+func BenchmarkProcessorNewProcessor(b *testing.B) {
+ flags := &Flags{
+ SourceDir: shared.TestSourcePath,
+ Format: "markdown",
+ Concurrency: runtime.NumCPU(),
+ Destination: shared.TestOutputMarkdown,
+ }
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ processor := NewProcessor(flags)
+ _ = processor
+ }
+}
+
+func BenchmarkProcessorCollectFiles(b *testing.B) {
+ // Initialize config for file collection
+ viper.Reset()
+ config.LoadConfig()
+
+ fileSpecs := []testutil.FileSpec{
+ {Name: "file1.go", Content: shared.LiteralPackageMain + "\n"},
+ {Name: shared.TestFile2, Content: "content\n"},
+ {Name: "file3.py", Content: "print('hello')\n"},
+ }
+
+ for b.Loop() {
+ // Create fresh directories for each iteration
+ tempDir := b.TempDir()
+ outDir := b.TempDir()
+
+ // Create test files
+ for _, spec := range fileSpecs {
+ filePath := filepath.Join(tempDir, spec.Name)
+ if err := os.WriteFile(filePath, []byte(spec.Content), shared.TestFilePermission); err != nil {
+ b.Fatalf("Failed to create test file %s: %v", filePath, err)
+ }
+ }
+
+ flags := &Flags{
+ SourceDir: tempDir,
+ Format: "markdown",
+ Concurrency: 1,
+ Destination: filepath.Join(outDir, shared.TestOutputMD),
+ }
+
+ processor := NewProcessor(flags)
+ files, err := processor.collectFiles()
+ if err != nil {
+ b.Fatalf("collectFiles failed: %v", err)
+ }
+ if len(files) == 0 {
+ b.Fatal("Expected files to be collected")
+ }
+ }
+}
+
+// BenchmarkProcessor_Process benchmarks the full Process workflow.
+// This provides baseline measurements for the complete processing pipeline.
+func BenchmarkProcessorProcess(b *testing.B) {
+ // Initialize config for file collection and processing
+ viper.Reset()
+ config.LoadConfig()
+
+ tempDir := b.TempDir()
+
+ // Create a representative set of test files
+ for i := 0; i < 10; i++ {
+ filePath := filepath.Join(tempDir, fmt.Sprintf("file%d.go", i))
+ content := fmt.Sprintf("package main\n\nfunc fn%d() {}\n", i)
+ if err := os.WriteFile(filePath, []byte(content), shared.TestFilePermission); err != nil {
+ b.Fatalf("Failed to create test file: %v", err)
+ }
+ }
+
+ outputDir := b.TempDir()
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ flags := &Flags{
+ SourceDir: tempDir,
+ Format: "markdown",
+ Concurrency: runtime.NumCPU(),
+ Destination: filepath.Join(outputDir, fmt.Sprintf("output_%d.md", i)),
+ NoUI: true,
+ NoColors: true,
+ NoProgress: true,
+ LogLevel: "warn",
+ }
+
+ processor := NewProcessor(flags)
+ _ = processor.Process(context.Background())
+ }
+}
diff --git a/cli/processor_types.go b/cli/processor_types.go
index 1675a66..fb39a19 100644
--- a/cli/processor_types.go
+++ b/cli/processor_types.go
@@ -1,16 +1,20 @@
+// Package cli provides command-line interface functionality for gibidify.
package cli
import (
"github.com/ivuorinen/gibidify/config"
"github.com/ivuorinen/gibidify/fileproc"
+ "github.com/ivuorinen/gibidify/metrics"
)
// Processor handles the main file processing logic.
type Processor struct {
- flags *Flags
- backpressure *fileproc.BackpressureManager
- resourceMonitor *fileproc.ResourceMonitor
- ui *UIManager
+ flags *Flags
+ backpressure *fileproc.BackpressureManager
+ resourceMonitor *fileproc.ResourceMonitor
+ ui *UIManager
+ metricsCollector *metrics.Collector
+ metricsReporter *metrics.Reporter
}
// NewProcessor creates a new processor with the given flags.
@@ -18,30 +22,38 @@ func NewProcessor(flags *Flags) *Processor {
ui := NewUIManager()
// Configure UI based on flags
- ui.SetColorOutput(!flags.NoColors)
- ui.SetProgressOutput(!flags.NoProgress)
+ ui.SetColorOutput(!flags.NoColors && !flags.NoUI)
+ ui.SetProgressOutput(!flags.NoProgress && !flags.NoUI)
+ ui.SetSilentMode(flags.NoUI)
+
+ // Initialize metrics system
+ metricsCollector := metrics.NewCollector()
+ metricsReporter := metrics.NewReporter(
+ metricsCollector,
+ flags.Verbose && !flags.NoUI,
+ !flags.NoColors && !flags.NoUI,
+ )
return &Processor{
- flags: flags,
- backpressure: fileproc.NewBackpressureManager(),
- resourceMonitor: fileproc.NewResourceMonitor(),
- ui: ui,
+ flags: flags,
+ backpressure: fileproc.NewBackpressureManager(),
+ resourceMonitor: fileproc.NewResourceMonitor(),
+ ui: ui,
+ metricsCollector: metricsCollector,
+ metricsReporter: metricsReporter,
}
}
// configureFileTypes configures the file type registry.
-func (p *Processor) configureFileTypes() error {
- if config.GetFileTypesEnabled() {
- if err := fileproc.ConfigureFromSettings(fileproc.RegistryConfig{
- CustomImages: config.GetCustomImageExtensions(),
- CustomBinary: config.GetCustomBinaryExtensions(),
- CustomLanguages: config.GetCustomLanguages(),
- DisabledImages: config.GetDisabledImageExtensions(),
- DisabledBinary: config.GetDisabledBinaryExtensions(),
- DisabledLanguages: config.GetDisabledLanguageExtensions(),
- }); err != nil {
- return err
- }
+func (p *Processor) configureFileTypes() {
+ if config.FileTypesEnabled() {
+ fileproc.ConfigureFromSettings(
+ config.CustomImageExtensions(),
+ config.CustomBinaryExtensions(),
+ config.CustomLanguages(),
+ config.DisabledImageExtensions(),
+ config.DisabledBinaryExtensions(),
+ config.DisabledLanguageExtensions(),
+ )
}
- return nil
}
diff --git a/cli/processor_workers.go b/cli/processor_workers.go
index df67cbd..30c295f 100644
--- a/cli/processor_workers.go
+++ b/cli/processor_workers.go
@@ -1,13 +1,17 @@
+// Package cli provides command-line interface functionality for gibidify.
package cli
import (
"context"
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
"sync"
- "github.com/sirupsen/logrus"
-
"github.com/ivuorinen/gibidify/fileproc"
- "github.com/ivuorinen/gibidify/gibidiutils"
+ "github.com/ivuorinen/gibidify/metrics"
+ "github.com/ivuorinen/gibidify/shared"
)
// startWorkers starts the worker goroutines.
@@ -44,25 +48,69 @@ func (p *Processor) worker(
}
}
-// processFile processes a single file with resource monitoring.
+// processFile processes a single file with resource monitoring and metrics collection.
func (p *Processor) processFile(ctx context.Context, filePath string, writeCh chan fileproc.WriteRequest) {
+ // Create file processing context with timeout (resourceMonitor may be nil)
+ fileCtx, fileCancel := ctx, func() {}
+ if p.resourceMonitor != nil {
+ fileCtx, fileCancel = p.resourceMonitor.CreateFileProcessingContext(ctx)
+ }
+ defer fileCancel()
+
+ // Track concurrency
+ if p.metricsCollector != nil {
+ p.metricsCollector.IncrementConcurrency()
+ defer p.metricsCollector.DecrementConcurrency()
+ }
+
// Check for emergency stop
- if p.resourceMonitor.IsEmergencyStopActive() {
- logrus.Warnf("Emergency stop active, skipping file: %s", filePath)
+ if p.resourceMonitor != nil && p.resourceMonitor.IsEmergencyStopActive() {
+ logger := shared.GetLogger()
+ logger.Warnf("Emergency stop active, skipping file: %s", filePath)
+
+ // Record skipped file
+ p.recordFileResult(filePath, 0, "", false, true, "emergency stop active", nil)
+
+ if p.ui != nil {
+ p.ui.UpdateProgress(1)
+ }
+
return
}
- absRoot, err := gibidiutils.GetAbsolutePath(p.flags.SourceDir)
+ absRoot, err := shared.AbsolutePath(p.flags.SourceDir)
if err != nil {
- gibidiutils.LogError("Failed to get absolute path", err)
+ shared.LogError("Failed to get absolute path", err)
+
+ // Record error
+ p.recordFileResult(filePath, 0, "", false, false, "", err)
+
+ if p.ui != nil {
+ p.ui.UpdateProgress(1)
+ }
+
return
}
- // Use the resource monitor-aware processing
- fileproc.ProcessFileWithMonitor(ctx, filePath, writeCh, absRoot, p.resourceMonitor)
+ // Use the resource monitor-aware processing with metrics tracking
+ fileSize, format, success, processErr := p.processFileWithMetrics(fileCtx, filePath, writeCh, absRoot)
- // Update progress bar
- p.ui.UpdateProgress(1)
+ // Record the processing result (skipped=false, skipReason="" since processFileWithMetrics never skips)
+ p.recordFileResult(filePath, fileSize, format, success, false, "", processErr)
+
+ // Update progress bar with metrics
+ if p.ui != nil {
+ p.ui.UpdateProgress(1)
+ }
+
+ // Show real-time stats in verbose mode
+ if p.flags.Verbose && p.metricsCollector != nil {
+ currentMetrics := p.metricsCollector.CurrentMetrics()
+ if currentMetrics.ProcessedFiles%10 == 0 && p.metricsReporter != nil {
+ logger := shared.GetLogger()
+ logger.Info(p.metricsReporter.ReportProgress())
+ }
+ }
}
// sendFiles sends files to the worker channels with back-pressure handling.
@@ -78,15 +126,88 @@ func (p *Processor) sendFiles(ctx context.Context, files []string, fileCh chan s
// Wait for channel space if needed
p.backpressure.WaitForChannelSpace(ctx, fileCh, nil)
+ if err := shared.CheckContextCancellation(ctx, shared.CLIMsgFileProcessingWorker); err != nil {
+ return fmt.Errorf("context check failed: %w", err)
+ }
+
select {
- case <-ctx.Done():
- return ctx.Err()
case fileCh <- fp:
+ case <-ctx.Done():
+ if err := shared.CheckContextCancellation(ctx, shared.CLIMsgFileProcessingWorker); err != nil {
+ return fmt.Errorf("context cancellation during channel send: %w", err)
+ }
+
+ return errors.New("context canceled during channel send")
}
}
+
return nil
}
+// processFileWithMetrics wraps the file processing with detailed metrics collection.
+func (p *Processor) processFileWithMetrics(
+ ctx context.Context,
+ filePath string,
+ writeCh chan fileproc.WriteRequest,
+ absRoot string,
+) (fileSize int64, format string, success bool, err error) {
+ // Get file info
+ fileInfo, statErr := os.Stat(filePath)
+ if statErr != nil {
+ return 0, "", false, fmt.Errorf("getting file info for %s: %w", filePath, statErr)
+ }
+
+ fileSize = fileInfo.Size()
+
+ // Detect format from file extension
+ format = filepath.Ext(filePath)
+ if format != "" && format[0] == '.' {
+ format = format[1:] // Remove the dot
+ }
+
+ // Use the existing resource monitor-aware processing
+ err = fileproc.ProcessFileWithMonitor(ctx, filePath, writeCh, absRoot, p.resourceMonitor)
+
+ // Check if processing was successful
+ select {
+ case <-ctx.Done():
+ return fileSize, format, false, fmt.Errorf("file processing worker canceled: %w", ctx.Err())
+ default:
+ if err != nil {
+ return fileSize, format, false, fmt.Errorf("processing file %s: %w", filePath, err)
+ }
+
+ return fileSize, format, true, nil
+ }
+}
+
+// recordFileResult records the result of file processing in metrics.
+func (p *Processor) recordFileResult(
+ filePath string,
+ fileSize int64,
+ format string,
+ success bool,
+ skipped bool,
+ skipReason string,
+ err error,
+) {
+ if p.metricsCollector == nil {
+ return // No metrics collector, skip recording
+ }
+
+ result := metrics.FileProcessingResult{
+ FilePath: filePath,
+ FileSize: fileSize,
+ Format: format,
+ Success: success,
+ Error: err,
+ Skipped: skipped,
+ SkipReason: skipReason,
+ }
+
+ p.metricsCollector.RecordFileProcessed(result)
+}
+
// waitForCompletion waits for all workers to complete.
func (p *Processor) waitForCompletion(
wg *sync.WaitGroup,
diff --git a/cli/ui.go b/cli/ui.go
index 45a971e..26787f7 100644
--- a/cli/ui.go
+++ b/cli/ui.go
@@ -1,3 +1,4 @@
+// Package cli provides command-line interface functionality for gibidify.
package cli
import (
@@ -9,13 +10,14 @@ import (
"github.com/fatih/color"
"github.com/schollz/progressbar/v3"
- "github.com/ivuorinen/gibidify/gibidiutils"
+ "github.com/ivuorinen/gibidify/shared"
)
// UIManager handles CLI user interface elements.
type UIManager struct {
enableColors bool
enableProgress bool
+ silentMode bool
progressBar *progressbar.ProgressBar
output io.Writer
}
@@ -40,43 +42,42 @@ func (ui *UIManager) SetProgressOutput(enabled bool) {
ui.enableProgress = enabled
}
+// SetSilentMode enables or disables all UI output.
+func (ui *UIManager) SetSilentMode(silent bool) {
+ ui.silentMode = silent
+ if silent {
+ ui.output = io.Discard
+ } else {
+ ui.output = os.Stderr
+ }
+}
+
// StartProgress initializes a progress bar for file processing.
func (ui *UIManager) StartProgress(total int, description string) {
if !ui.enableProgress || total <= 0 {
return
}
- // Set progress bar theme based on color support
- var theme progressbar.Theme
- if ui.enableColors {
- theme = progressbar.Theme{
- Saucer: color.GreenString("█"),
- SaucerHead: color.GreenString("█"),
- SaucerPadding: " ",
- BarStart: "[",
- BarEnd: "]",
- }
- } else {
- theme = progressbar.Theme{
- Saucer: "█",
- SaucerHead: "█",
- SaucerPadding: " ",
- BarStart: "[",
- BarEnd: "]",
- }
- }
-
ui.progressBar = progressbar.NewOptions(
total,
progressbar.OptionSetWriter(ui.output),
progressbar.OptionSetDescription(description),
- progressbar.OptionSetTheme(theme),
+ progressbar.OptionSetTheme(
+ progressbar.Theme{
+ Saucer: color.GreenString(shared.UIProgressBarChar),
+ SaucerHead: color.GreenString(shared.UIProgressBarChar),
+ SaucerPadding: " ",
+ BarStart: "[",
+ BarEnd: "]",
+ },
+ ),
progressbar.OptionShowCount(),
progressbar.OptionShowIts(),
progressbar.OptionSetWidth(40),
progressbar.OptionThrottle(100*time.Millisecond),
progressbar.OptionOnCompletion(
func() {
+ //nolint:errcheck // UI output, errors don't affect processing
_, _ = fmt.Fprint(ui.output, "\n")
},
),
@@ -99,49 +100,62 @@ func (ui *UIManager) FinishProgress() {
}
}
-// writeMessage writes a formatted message with optional colorization.
-// It handles color enablement, formatting, writing to output, and error logging.
-func (ui *UIManager) writeMessage(
- icon, methodName, format string,
- colorFunc func(string, ...interface{}) string,
- args ...interface{},
-) {
- msg := icon + " " + format
- var output string
- if ui.enableColors && colorFunc != nil {
- output = colorFunc(msg, args...)
+// PrintSuccess prints a success message in green.
+func (ui *UIManager) PrintSuccess(format string, args ...any) {
+ if ui.silentMode {
+ return
+ }
+ if ui.enableColors {
+ color.Green("✓ "+format, args...)
} else {
- output = fmt.Sprintf(msg, args...)
- }
-
- if _, err := fmt.Fprintf(ui.output, "%s\n", output); err != nil {
- gibidiutils.LogError(fmt.Sprintf("UIManager.%s: failed to write to output", methodName), err)
+ ui.printf("✓ "+format+"\n", args...)
}
}
-// PrintSuccess prints a success message in green (to ui.output if set).
-func (ui *UIManager) PrintSuccess(format string, args ...interface{}) {
- ui.writeMessage(gibidiutils.IconSuccess, "PrintSuccess", format, color.GreenString, args...)
+// PrintError prints an error message in red.
+func (ui *UIManager) PrintError(format string, args ...any) {
+ if ui.silentMode {
+ return
+ }
+ if ui.enableColors {
+ color.Red("✗ "+format, args...)
+ } else {
+ ui.printf("✗ "+format+"\n", args...)
+ }
}
-// PrintError prints an error message in red (to ui.output if set).
-func (ui *UIManager) PrintError(format string, args ...interface{}) {
- ui.writeMessage(gibidiutils.IconError, "PrintError", format, color.RedString, args...)
+// PrintWarning prints a warning message in yellow.
+func (ui *UIManager) PrintWarning(format string, args ...any) {
+ if ui.silentMode {
+ return
+ }
+ if ui.enableColors {
+ color.Yellow("⚠ "+format, args...)
+ } else {
+ ui.printf("⚠ "+format+"\n", args...)
+ }
}
-// PrintWarning prints a warning message in yellow (to ui.output if set).
-func (ui *UIManager) PrintWarning(format string, args ...interface{}) {
- ui.writeMessage(gibidiutils.IconWarning, "PrintWarning", format, color.YellowString, args...)
-}
-
-// PrintInfo prints an info message in blue (to ui.output if set).
-func (ui *UIManager) PrintInfo(format string, args ...interface{}) {
- ui.writeMessage(gibidiutils.IconInfo, "PrintInfo", format, color.BlueString, args...)
+// PrintInfo prints an info message in blue.
+func (ui *UIManager) PrintInfo(format string, args ...any) {
+ if ui.silentMode {
+ return
+ }
+ if ui.enableColors {
+ //nolint:errcheck // UI output, errors don't affect processing
+ color.Blue("ℹ "+format, args...)
+ } else {
+ ui.printf("ℹ "+format+"\n", args...)
+ }
}
// PrintHeader prints a header message in bold.
-func (ui *UIManager) PrintHeader(format string, args ...interface{}) {
+func (ui *UIManager) PrintHeader(format string, args ...any) {
+ if ui.silentMode {
+ return
+ }
if ui.enableColors {
+ //nolint:errcheck // UI output, errors don't affect processing
_, _ = color.New(color.Bold).Fprintf(ui.output, format+"\n", args...)
} else {
ui.printf(format+"\n", args...)
@@ -150,11 +164,6 @@ func (ui *UIManager) PrintHeader(format string, args ...interface{}) {
// isColorTerminal checks if the terminal supports colors.
func isColorTerminal() bool {
- // Check if FORCE_COLOR is set
- if os.Getenv("FORCE_COLOR") != "" {
- return true
- }
-
// Check common environment variables
term := os.Getenv("TERM")
if term == "" || term == "dumb" {
@@ -164,7 +173,7 @@ func isColorTerminal() bool {
// Check for CI environments that typically don't support colors
if os.Getenv("CI") != "" {
// GitHub Actions supports colors
- if os.Getenv("GITHUB_ACTIONS") == "true" {
+ if os.Getenv("GITHUB_ACTIONS") == shared.LiteralTrue {
return true
}
// Most other CI systems don't
@@ -176,7 +185,13 @@ func isColorTerminal() bool {
return false
}
- return true
+ // Check if FORCE_COLOR is set
+ if os.Getenv("FORCE_COLOR") != "" {
+ return true
+ }
+
+ // Default to true for interactive terminals
+ return isInteractiveTerminal()
}
// isInteractiveTerminal checks if we're running in an interactive terminal.
@@ -186,10 +201,11 @@ func isInteractiveTerminal() bool {
if err != nil {
return false
}
+
return (fileInfo.Mode() & os.ModeCharDevice) != 0
}
// printf is a helper that ignores printf errors (for UI output).
-func (ui *UIManager) printf(format string, args ...interface{}) {
+func (ui *UIManager) printf(format string, args ...any) {
_, _ = fmt.Fprintf(ui.output, format, args...)
}
diff --git a/cli/ui_manager_test.go b/cli/ui_manager_test.go
deleted file mode 100644
index 5b26487..0000000
--- a/cli/ui_manager_test.go
+++ /dev/null
@@ -1,109 +0,0 @@
-package cli
-
-import (
- "bytes"
- "os"
- "testing"
-
- "github.com/fatih/color"
- "github.com/stretchr/testify/assert"
-)
-
-func TestNewUIManager(t *testing.T) {
- tests := []struct {
- name string
- env terminalEnvSetup
- expectedColors bool
- expectedProgress bool
- }{
- {
- name: "default terminal",
- env: envDefaultTerminal,
- expectedColors: true,
- expectedProgress: false, // Not a tty in test environment
- },
- {
- name: "dumb terminal",
- env: envDumbTerminal,
- expectedColors: false,
- expectedProgress: false,
- },
- {
- name: "CI environment without GitHub Actions",
- env: envCIWithoutGitHub,
- expectedColors: false,
- expectedProgress: false,
- },
- {
- name: "GitHub Actions CI",
- env: envGitHubActions,
- expectedColors: true,
- expectedProgress: false,
- },
- {
- name: "NO_COLOR set",
- env: envNoColor,
- expectedColors: false,
- expectedProgress: false,
- },
- {
- name: "FORCE_COLOR set",
- env: envForceColor,
- expectedColors: true,
- expectedProgress: false,
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- tt.env.apply(t)
-
- ui := NewUIManager()
- assert.NotNil(t, ui)
- assert.NotNil(t, ui.output)
- assert.Equal(t, tt.expectedColors, ui.enableColors, "color state mismatch")
- assert.Equal(t, tt.expectedProgress, ui.enableProgress, "progress state mismatch")
- })
- }
-}
-
-func TestSetColorOutput(t *testing.T) {
- // Capture original color.NoColor state and restore after test
- orig := color.NoColor
- defer func() { color.NoColor = orig }()
-
- ui := &UIManager{output: os.Stderr}
-
- // Test enabling colors
- ui.SetColorOutput(true)
- assert.False(t, color.NoColor)
- assert.True(t, ui.enableColors)
-
- // Test disabling colors
- ui.SetColorOutput(false)
- assert.True(t, color.NoColor)
- assert.False(t, ui.enableColors)
-}
-
-func TestSetProgressOutput(t *testing.T) {
- ui := &UIManager{output: os.Stderr}
-
- // Test enabling progress
- ui.SetProgressOutput(true)
- assert.True(t, ui.enableProgress)
-
- // Test disabling progress
- ui.SetProgressOutput(false)
- assert.False(t, ui.enableProgress)
-}
-
-func TestPrintf(t *testing.T) {
- buf := &bytes.Buffer{}
- ui := &UIManager{
- output: buf,
- }
-
- ui.printf("Test %s %d", "output", 123)
-
- assert.Equal(t, "Test output 123", buf.String())
-}
diff --git a/cli/ui_print_test.go b/cli/ui_print_test.go
deleted file mode 100644
index f21c4fa..0000000
--- a/cli/ui_print_test.go
+++ /dev/null
@@ -1,245 +0,0 @@
-package cli
-
-import (
- "bytes"
- "strings"
- "testing"
-
- "github.com/fatih/color"
- "github.com/stretchr/testify/assert"
-
- "github.com/ivuorinen/gibidify/gibidiutils"
-)
-
-func TestPrintSuccess(t *testing.T) {
- tests := []struct {
- name string
- enableColors bool
- format string
- args []interface{}
- expectSymbol string
- }{
- {
- name: testWithColors,
- enableColors: true,
- format: "Operation %s",
- args: []interface{}{"completed"},
- expectSymbol: gibidiutils.IconSuccess,
- },
- {
- name: testWithoutColors,
- enableColors: false,
- format: "Operation %s",
- args: []interface{}{"completed"},
- expectSymbol: gibidiutils.IconSuccess,
- },
- {
- name: "no arguments",
- enableColors: true,
- format: "Success",
- args: nil,
- expectSymbol: gibidiutils.IconSuccess,
- },
- }
-
- for _, tt := range tests {
- t.Run(
- tt.name, func(t *testing.T) {
- buf := &bytes.Buffer{}
- ui := &UIManager{
- enableColors: tt.enableColors,
- output: buf,
- }
- prev := color.NoColor
- color.NoColor = !tt.enableColors
- defer func() { color.NoColor = prev }()
-
- ui.PrintSuccess(tt.format, tt.args...)
-
- output := buf.String()
- assert.Contains(t, output, tt.expectSymbol)
- if len(tt.args) > 0 {
- assert.Contains(t, output, "completed")
- }
- },
- )
- }
-}
-
-func TestPrintError(t *testing.T) {
- tests := []struct {
- name string
- enableColors bool
- format string
- args []interface{}
- expectSymbol string
- }{
- {
- name: testWithColors,
- enableColors: true,
- format: "Failed to %s",
- args: []interface{}{"process"},
- expectSymbol: gibidiutils.IconError,
- },
- {
- name: testWithoutColors,
- enableColors: false,
- format: "Failed to %s",
- args: []interface{}{"process"},
- expectSymbol: gibidiutils.IconError,
- },
- }
-
- for _, tt := range tests {
- t.Run(
- tt.name, func(t *testing.T) {
- buf := &bytes.Buffer{}
- ui := &UIManager{
- enableColors: tt.enableColors,
- output: buf,
- }
- prev := color.NoColor
- color.NoColor = !tt.enableColors
- defer func() { color.NoColor = prev }()
-
- ui.PrintError(tt.format, tt.args...)
-
- output := buf.String()
- assert.Contains(t, output, tt.expectSymbol)
- if len(tt.args) > 0 {
- assert.Contains(t, output, "process")
- }
- },
- )
- }
-}
-
-func TestPrintWarning(t *testing.T) {
- buf := &bytes.Buffer{}
- ui := &UIManager{
- enableColors: true,
- output: buf,
- }
-
- ui.PrintWarning("This is a %s", "warning")
-
- output := buf.String()
- assert.Contains(t, output, gibidiutils.IconWarning)
-}
-
-func TestPrintInfo(t *testing.T) {
- // Capture original color.NoColor state and restore after test
- orig := color.NoColor
- defer func() { color.NoColor = orig }()
-
- buf := &bytes.Buffer{}
- ui := &UIManager{
- enableColors: true,
- output: buf,
- }
-
- color.NoColor = false
-
- ui.PrintInfo("Information: %d items", 42)
-
- output := buf.String()
- assert.Contains(t, output, gibidiutils.IconInfo)
- assert.Contains(t, output, "42")
-}
-
-func TestPrintHeader(t *testing.T) {
- tests := []struct {
- name string
- enableColors bool
- format string
- args []interface{}
- }{
- {
- name: testWithColors,
- enableColors: true,
- format: "Header %s",
- args: []interface{}{"Title"},
- },
- {
- name: testWithoutColors,
- enableColors: false,
- format: "Header %s",
- args: []interface{}{"Title"},
- },
- }
-
- for _, tt := range tests {
- t.Run(
- tt.name, func(t *testing.T) {
- // Capture original color.NoColor state and restore after test
- orig := color.NoColor
- defer func() { color.NoColor = orig }()
-
- buf := &bytes.Buffer{}
- ui := &UIManager{
- enableColors: tt.enableColors,
- output: buf,
- }
- color.NoColor = !tt.enableColors
-
- ui.PrintHeader(tt.format, tt.args...)
-
- output := buf.String()
- assert.Contains(t, output, "Title")
- },
- )
- }
-}
-
-// Test that all print methods handle newlines correctly
-func TestPrintMethodsNewlines(t *testing.T) {
- tests := []struct {
- name string
- method func(*UIManager, string, ...interface{})
- symbol string
- }{
- {
- name: "PrintSuccess",
- method: (*UIManager).PrintSuccess,
- symbol: gibidiutils.IconSuccess,
- },
- {
- name: "PrintError",
- method: (*UIManager).PrintError,
- symbol: gibidiutils.IconError,
- },
- {
- name: "PrintWarning",
- method: (*UIManager).PrintWarning,
- symbol: gibidiutils.IconWarning,
- },
- {
- name: "PrintInfo",
- method: (*UIManager).PrintInfo,
- symbol: gibidiutils.IconInfo,
- },
- }
-
- for _, tt := range tests {
- t.Run(
- tt.name, func(t *testing.T) {
- // Disable colors for consistent testing
- oldNoColor := color.NoColor
- color.NoColor = true
- defer func() { color.NoColor = oldNoColor }()
-
- buf := &bytes.Buffer{}
- ui := &UIManager{
- enableColors: false,
- output: buf,
- }
-
- tt.method(ui, "Test message")
-
- output := buf.String()
- assert.True(t, strings.HasSuffix(output, "\n"))
- assert.Contains(t, output, tt.symbol)
- },
- )
- }
-}
diff --git a/cli/ui_progress_test.go b/cli/ui_progress_test.go
deleted file mode 100644
index e9a8d6f..0000000
--- a/cli/ui_progress_test.go
+++ /dev/null
@@ -1,147 +0,0 @@
-package cli
-
-import (
- "bytes"
- "testing"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestStartProgress(t *testing.T) {
- tests := []struct {
- name string
- total int
- description string
- enabled bool
- expectBar bool
- }{
- {
- name: "progress enabled with valid total",
- total: 100,
- description: testProcessingMsg,
- enabled: true,
- expectBar: true,
- },
- {
- name: "progress disabled",
- total: 100,
- description: testProcessingMsg,
- enabled: false,
- expectBar: false,
- },
- {
- name: "zero total",
- total: 0,
- description: testProcessingMsg,
- enabled: true,
- expectBar: false,
- },
- {
- name: "negative total",
- total: -5,
- description: testProcessingMsg,
- enabled: true,
- expectBar: false,
- },
- }
-
- for _, tt := range tests {
- t.Run(
- tt.name, func(t *testing.T) {
- ui := &UIManager{
- enableProgress: tt.enabled,
- output: &bytes.Buffer{},
- }
-
- ui.StartProgress(tt.total, tt.description)
-
- if tt.expectBar {
- assert.NotNil(t, ui.progressBar)
- } else {
- assert.Nil(t, ui.progressBar)
- }
- },
- )
- }
-}
-
-func TestUpdateProgress(t *testing.T) {
- tests := []struct {
- name string
- setupBar bool
- enabledProg bool
- expectUpdate bool
- }{
- {
- name: "with progress bar",
- setupBar: true,
- enabledProg: true,
- expectUpdate: true,
- },
- {
- name: "without progress bar",
- setupBar: false,
- enabledProg: false,
- expectUpdate: false,
- },
- }
-
- for _, tt := range tests {
- t.Run(
- tt.name, func(_ *testing.T) {
- ui := &UIManager{
- enableProgress: tt.enabledProg,
- output: &bytes.Buffer{},
- }
-
- if tt.setupBar {
- ui.StartProgress(10, "Test")
- }
-
- // Should not panic
- ui.UpdateProgress(1)
-
- // Multiple updates should not panic
- ui.UpdateProgress(2)
- ui.UpdateProgress(3)
- },
- )
- }
-}
-
-func TestFinishProgress(t *testing.T) {
- tests := []struct {
- name string
- setupBar bool
- }{
- {
- name: "with progress bar",
- setupBar: true,
- },
- {
- name: "without progress bar",
- setupBar: false,
- },
- }
-
- for _, tt := range tests {
- t.Run(
- tt.name, func(t *testing.T) {
- ui := &UIManager{
- enableProgress: true,
- output: &bytes.Buffer{},
- }
-
- if tt.setupBar {
- ui.StartProgress(10, "Test")
- }
-
- // Should not panic
- ui.FinishProgress()
-
- // Bar should be cleared
- assert.Nil(t, ui.progressBar)
- },
- )
- }
-}
diff --git a/cli/ui_terminal_test.go b/cli/ui_terminal_test.go
deleted file mode 100644
index 924f8e4..0000000
--- a/cli/ui_terminal_test.go
+++ /dev/null
@@ -1,62 +0,0 @@
-package cli
-
-import (
- "testing"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestIsColorTerminal(t *testing.T) {
- tests := []struct {
- name string
- env terminalEnvSetup
- expected bool
- }{
- {
- name: "dumb terminal",
- env: envDumbTerminal,
- expected: false,
- },
- {
- name: "empty TERM",
- env: envEmptyTerm,
- expected: false,
- },
- {
- name: "CI without GitHub Actions",
- env: envCIWithoutGitHub,
- expected: false,
- },
- {
- name: "GitHub Actions",
- env: envGitHubActions,
- expected: true,
- },
- {
- name: "NO_COLOR set",
- env: envNoColor,
- expected: false,
- },
- {
- name: "FORCE_COLOR set",
- env: envForceColor,
- expected: true,
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- tt.env.apply(t)
-
- result := isColorTerminal()
- assert.Equal(t, tt.expected, result)
- })
- }
-}
-
-func TestIsInteractiveTerminal(t *testing.T) {
- // This function checks if stderr is a terminal
- // In test environment, it will typically return false
- result := isInteractiveTerminal()
- assert.False(t, result)
-}
diff --git a/cli/ui_test.go b/cli/ui_test.go
new file mode 100644
index 0000000..41a424c
--- /dev/null
+++ b/cli/ui_test.go
@@ -0,0 +1,531 @@
+package cli
+
+import (
+ "os"
+ "strings"
+ "testing"
+
+ "github.com/ivuorinen/gibidify/shared"
+)
+
+func TestNewUIManager(t *testing.T) {
+ ui := NewUIManager()
+
+ if ui == nil {
+ t.Error("NewUIManager() returned nil")
+
+ return
+ }
+ if ui.output == nil {
+ t.Error("NewUIManager() did not set output")
+
+ return
+ }
+ if ui.output != os.Stderr {
+ t.Error("NewUIManager() should default output to os.Stderr")
+ }
+}
+
+func TestUIManagerSetColorOutput(t *testing.T) {
+ ui := NewUIManager()
+
+ // Test enabling colors
+ ui.SetColorOutput(true)
+ if !ui.enableColors {
+ t.Error("SetColorOutput(true) did not enable colors")
+ }
+
+ // Test disabling colors
+ ui.SetColorOutput(false)
+ if ui.enableColors {
+ t.Error("SetColorOutput(false) did not disable colors")
+ }
+}
+
+func TestUIManagerSetProgressOutput(t *testing.T) {
+ ui := NewUIManager()
+
+ // Test enabling progress
+ ui.SetProgressOutput(true)
+ if !ui.enableProgress {
+ t.Error("SetProgressOutput(true) did not enable progress")
+ }
+
+ // Test disabling progress
+ ui.SetProgressOutput(false)
+ if ui.enableProgress {
+ t.Error("SetProgressOutput(false) did not disable progress")
+ }
+}
+
+func TestUIManagerStartProgress(t *testing.T) {
+ tests := []struct {
+ name string
+ total int
+ description string
+ enabled bool
+ expectBar bool
+ }{
+ {
+ name: "valid progress with enabled progress",
+ total: 10,
+ description: shared.TestProgressMessage,
+ enabled: true,
+ expectBar: true,
+ },
+ {
+ name: "disabled progress should not create bar",
+ total: 10,
+ description: shared.TestProgressMessage,
+ enabled: false,
+ expectBar: false,
+ },
+ {
+ name: "zero total should not create bar",
+ total: 0,
+ description: shared.TestProgressMessage,
+ enabled: true,
+ expectBar: false,
+ },
+ {
+ name: "negative total should not create bar",
+ total: -1,
+ description: shared.TestProgressMessage,
+ enabled: true,
+ expectBar: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(
+ tt.name, func(t *testing.T) {
+ ui, _ := createTestUI() //nolint:errcheck // Test helper output buffer not used in this test
+ ui.SetProgressOutput(tt.enabled)
+
+ ui.StartProgress(tt.total, tt.description)
+
+ if tt.expectBar && ui.progressBar == nil {
+ t.Error("StartProgress() should have created progress bar but didn't")
+ }
+ if !tt.expectBar && ui.progressBar != nil {
+ t.Error("StartProgress() should not have created progress bar but did")
+ }
+ },
+ )
+ }
+}
+
+func TestUIManagerUpdateProgress(t *testing.T) {
+ ui, _ := createTestUI() //nolint:errcheck // Test helper output buffer not used in this test
+ ui.SetProgressOutput(true)
+
+ // Test with no progress bar (should not panic)
+ ui.UpdateProgress(1)
+
+ // Test with progress bar
+ ui.StartProgress(10, "Test progress")
+ if ui.progressBar == nil {
+ t.Fatal("StartProgress() did not create progress bar")
+ }
+
+ // Should not panic
+ ui.UpdateProgress(1)
+ ui.UpdateProgress(5)
+}
+
+func TestUIManagerFinishProgress(t *testing.T) {
+ ui, _ := createTestUI() //nolint:errcheck // Test helper output buffer not used in this test
+ ui.SetProgressOutput(true)
+
+ // Test with no progress bar (should not panic)
+ ui.FinishProgress()
+
+ // Test with progress bar
+ ui.StartProgress(10, "Test progress")
+ if ui.progressBar == nil {
+ t.Fatal("StartProgress() did not create progress bar")
+ }
+
+ ui.FinishProgress()
+ if ui.progressBar != nil {
+ t.Error("FinishProgress() should have cleared progress bar")
+ }
+}
+
+// testPrintMethod is a helper function to test UI print methods without duplication.
+type printMethodTest struct {
+ name string
+ enableColors bool
+ format string
+ args []any
+ expectedText string
+}
+
+func testPrintMethod(
+ t *testing.T,
+ methodName string,
+ printFunc func(*UIManager, string, ...any),
+ tests []printMethodTest,
+) {
+ t.Helper()
+
+ for _, tt := range tests {
+ t.Run(
+ tt.name, func(t *testing.T) {
+ ui, output := createTestUI()
+ ui.SetColorOutput(tt.enableColors)
+
+ printFunc(ui, tt.format, tt.args...)
+
+ if !tt.enableColors {
+ outputStr := output.String()
+ if !strings.Contains(outputStr, tt.expectedText) {
+ t.Errorf("%s() output %q should contain %q", methodName, outputStr, tt.expectedText)
+ }
+ }
+ },
+ )
+ }
+
+ // Test color method separately (doesn't capture output but shouldn't panic)
+ t.Run(
+ methodName+" with colors should not panic", func(_ *testing.T) {
+ ui, _ := createTestUI() //nolint:errcheck // Test helper output buffer not used in this test
+ ui.SetColorOutput(true)
+ // Should not panic
+ printFunc(ui, "Test message")
+ },
+ )
+}
+
+func TestUIManagerPrintSuccess(t *testing.T) {
+ tests := []printMethodTest{
+ {
+ name: "success without colors",
+ enableColors: false,
+ format: "Operation completed successfully",
+ args: []any{},
+ expectedText: "✓ Operation completed successfully",
+ },
+ {
+ name: "success with args without colors",
+ enableColors: false,
+ format: "Processed %d files in %s",
+ args: []any{5, "project"},
+ expectedText: "✓ Processed 5 files in project",
+ },
+ }
+
+ testPrintMethod(
+ t, "PrintSuccess", func(ui *UIManager, format string, args ...any) {
+ ui.PrintSuccess(format, args...)
+ }, tests,
+ )
+}
+
+func TestUIManagerPrintError(t *testing.T) {
+ tests := []printMethodTest{
+ {
+ name: "error without colors",
+ enableColors: false,
+ format: "Operation failed",
+ args: []any{},
+ expectedText: "✗ Operation failed",
+ },
+ {
+ name: "error with args without colors",
+ enableColors: false,
+ format: "Failed to process %d files",
+ args: []any{3},
+ expectedText: "✗ Failed to process 3 files",
+ },
+ }
+
+ testPrintMethod(
+ t, "PrintError", func(ui *UIManager, format string, args ...any) {
+ ui.PrintError(format, args...)
+ }, tests,
+ )
+}
+
+func TestUIManagerPrintWarning(t *testing.T) {
+ tests := []printMethodTest{
+ {
+ name: "warning without colors",
+ enableColors: false,
+ format: "This is a warning",
+ args: []any{},
+ expectedText: "⚠ This is a warning",
+ },
+ {
+ name: "warning with args without colors",
+ enableColors: false,
+ format: "Found %d potential issues",
+ args: []any{2},
+ expectedText: "⚠ Found 2 potential issues",
+ },
+ }
+
+ testPrintMethod(
+ t, "PrintWarning", func(ui *UIManager, format string, args ...any) {
+ ui.PrintWarning(format, args...)
+ }, tests,
+ )
+}
+
+func TestUIManagerPrintInfo(t *testing.T) {
+ tests := []printMethodTest{
+ {
+ name: "info without colors",
+ enableColors: false,
+ format: "Information message",
+ args: []any{},
+ expectedText: "ℹ Information message",
+ },
+ {
+ name: "info with args without colors",
+ enableColors: false,
+ format: "Processing file %s",
+ args: []any{"example.go"},
+ expectedText: "ℹ Processing file example.go",
+ },
+ }
+
+ testPrintMethod(
+ t, "PrintInfo", func(ui *UIManager, format string, args ...any) {
+ ui.PrintInfo(format, args...)
+ }, tests,
+ )
+}
+
+func TestUIManagerPrintHeader(t *testing.T) {
+ tests := []struct {
+ name string
+ enableColors bool
+ format string
+ args []any
+ expectedText string
+ }{
+ {
+ name: "header without colors",
+ enableColors: false,
+ format: "Main Header",
+ args: []any{},
+ expectedText: "Main Header",
+ },
+ {
+ name: "header with args without colors",
+ enableColors: false,
+ format: "Processing %s Module",
+ args: []any{"CLI"},
+ expectedText: "Processing CLI Module",
+ },
+ {
+ name: "header with colors",
+ enableColors: true,
+ format: "Build Results",
+ args: []any{},
+ expectedText: "Build Results",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(
+ tt.name, func(t *testing.T) {
+ ui, output := createTestUI()
+ ui.SetColorOutput(tt.enableColors)
+
+ ui.PrintHeader(tt.format, tt.args...)
+
+ outputStr := output.String()
+ if !strings.Contains(outputStr, tt.expectedText) {
+ t.Errorf("PrintHeader() output %q should contain %q", outputStr, tt.expectedText)
+ }
+ },
+ )
+ }
+}
+
+// colorTerminalTestCase represents a test case for color terminal detection.
+type colorTerminalTestCase struct {
+ name string
+ term string
+ ci string
+ githubActions string
+ noColor string
+ forceColor string
+ expected bool
+}
+
+// clearColorTerminalEnvVars clears all environment variables used for terminal color detection.
+func clearColorTerminalEnvVars(t *testing.T) {
+ t.Helper()
+ envVars := []string{"TERM", "CI", "GITHUB_ACTIONS", "NO_COLOR", "FORCE_COLOR"}
+ for _, envVar := range envVars {
+ if err := os.Unsetenv(envVar); err != nil {
+ t.Logf("Failed to unset %s: %v", envVar, err)
+ }
+ }
+}
+
+// setColorTerminalTestEnv sets up environment variables for a test case.
+func setColorTerminalTestEnv(t *testing.T, testCase colorTerminalTestCase) {
+ t.Helper()
+
+ envSettings := map[string]string{
+ "TERM": testCase.term,
+ "CI": testCase.ci,
+ "GITHUB_ACTIONS": testCase.githubActions,
+ "NO_COLOR": testCase.noColor,
+ "FORCE_COLOR": testCase.forceColor,
+ }
+
+ for key, value := range envSettings {
+ if value != "" {
+ t.Setenv(key, value)
+ }
+ }
+}
+
+func TestIsColorTerminal(t *testing.T) {
+ // Save original environment
+ originalEnv := map[string]string{
+ "TERM": os.Getenv("TERM"),
+ "CI": os.Getenv("CI"),
+ "GITHUB_ACTIONS": os.Getenv("GITHUB_ACTIONS"),
+ "NO_COLOR": os.Getenv("NO_COLOR"),
+ "FORCE_COLOR": os.Getenv("FORCE_COLOR"),
+ }
+
+ defer func() {
+ // Restore original environment
+ for key, value := range originalEnv {
+ setEnvOrUnset(key, value)
+ }
+ }()
+
+ tests := []colorTerminalTestCase{
+ {
+ name: "dumb terminal",
+ term: "dumb",
+ expected: false,
+ },
+ {
+ name: "empty term",
+ term: "",
+ expected: false,
+ },
+ {
+ name: "github actions with CI",
+ term: shared.TestTerminalXterm256,
+ ci: "true",
+ githubActions: "true",
+ expected: true,
+ },
+ {
+ name: "CI without github actions",
+ term: shared.TestTerminalXterm256,
+ ci: "true",
+ expected: false,
+ },
+ {
+ name: "NO_COLOR set",
+ term: shared.TestTerminalXterm256,
+ noColor: "1",
+ expected: false,
+ },
+ {
+ name: "FORCE_COLOR set",
+ term: shared.TestTerminalXterm256,
+ forceColor: "1",
+ expected: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(
+ tt.name, func(t *testing.T) {
+ clearColorTerminalEnvVars(t)
+ setColorTerminalTestEnv(t, tt)
+
+ result := isColorTerminal()
+ if result != tt.expected {
+ t.Errorf("isColorTerminal() = %v, want %v", result, tt.expected)
+ }
+ },
+ )
+ }
+}
+
+func TestIsInteractiveTerminal(_ *testing.T) {
+ // This test is limited because we can't easily mock os.Stderr.Stat()
+ // but we can at least verify it doesn't panic and returns a boolean
+ result := isInteractiveTerminal()
+
+ // Result should be a boolean (true or false, both are valid)
+ // result is already a boolean, so this check is always satisfied
+ _ = result
+}
+
+func TestUIManagerprintf(t *testing.T) {
+ ui, output := createTestUI()
+
+ ui.printf("Hello %s", "world")
+
+ expected := "Hello world"
+ if output.String() != expected {
+ t.Errorf("printf() = %q, want %q", output.String(), expected)
+ }
+}
+
+// Helper function to set environment variable or unset if empty.
+func setEnvOrUnset(key, value string) {
+ if value == "" {
+ if err := os.Unsetenv(key); err != nil {
+ // In tests, environment variable errors are not critical,
+ // but we should still handle them to avoid linting issues
+ _ = err // explicitly ignore error
+ }
+ } else {
+ if err := os.Setenv(key, value); err != nil {
+ // In tests, environment variable errors are not critical,
+ // but we should still handle them to avoid linting issues
+ _ = err // explicitly ignore error
+ }
+ }
+}
+
+// Integration test for UI workflow.
+func TestUIManagerIntegration(t *testing.T) {
+ ui, output := createTestUI() //nolint:errcheck // Test helper, output buffer is used
+ ui.SetColorOutput(false) // Disable colors for consistent output
+ ui.SetProgressOutput(false) // Disable progress for testing
+
+ // Simulate a complete UI workflow
+ ui.PrintHeader("Starting Processing")
+ ui.PrintInfo("Initializing system")
+ ui.StartProgress(3, shared.TestProgressMessage)
+ ui.UpdateProgress(1)
+ ui.PrintInfo("Processing file 1")
+ ui.UpdateProgress(1)
+ ui.PrintWarning("Skipping invalid file")
+ ui.UpdateProgress(1)
+ ui.FinishProgress()
+ ui.PrintSuccess("Processing completed successfully")
+
+ outputStr := output.String()
+
+ expectedStrings := []string{
+ "Starting Processing",
+ "ℹ Initializing system",
+ "ℹ Processing file 1",
+ "⚠ Skipping invalid file",
+ "✓ Processing completed successfully",
+ }
+
+ for _, expected := range expectedStrings {
+ if !strings.Contains(outputStr, expected) {
+ t.Errorf("Integration test output missing expected string: %q\nFull output:\n%s", expected, outputStr)
+ }
+ }
+}
diff --git a/cmd/benchmark/main.go b/cmd/benchmark/main.go
index 63ec906..67eb447 100644
--- a/cmd/benchmark/main.go
+++ b/cmd/benchmark/main.go
@@ -9,38 +9,60 @@ import (
"strings"
"github.com/ivuorinen/gibidify/benchmark"
- "github.com/ivuorinen/gibidify/gibidiutils"
+ "github.com/ivuorinen/gibidify/shared"
)
var (
- sourceDir = flag.String("source", "", "Source directory to benchmark (uses temp files if empty)")
- benchmarkType = flag.String("type", "all", "Benchmark type: all, collection, processing, concurrency, format")
- format = flag.String("format", "json", "Output format for processing benchmarks")
- concurrency = flag.Int("concurrency", runtime.NumCPU(), "Concurrency level for processing benchmarks")
- concurrencyList = flag.String("concurrency-list", "1,2,4,8", "Comma-separated list of concurrency levels")
- formatList = flag.String("format-list", "json,yaml,markdown", "Comma-separated list of formats")
- numFiles = flag.Int("files", 100, "Number of files to create for benchmarks")
+ sourceDir = flag.String(
+ shared.CLIArgSource, "", "Source directory to benchmark (uses temp files if empty)",
+ )
+ benchmarkType = flag.String(
+ "type", shared.CLIArgAll, "Benchmark type: all, collection, processing, concurrency, format",
+ )
+ format = flag.String(
+ shared.CLIArgFormat, shared.FormatJSON, "Output format for processing benchmarks",
+ )
+ concurrency = flag.Int(
+ shared.CLIArgConcurrency, runtime.NumCPU(), "Concurrency level for processing benchmarks",
+ )
+ concurrencyList = flag.String(
+ "concurrency-list", shared.TestConcurrencyList, "Comma-separated list of concurrency levels",
+ )
+ formatList = flag.String(
+ "format-list", shared.TestFormatList, "Comma-separated list of formats",
+ )
+ numFiles = flag.Int("files", shared.BenchmarkDefaultFileCount, "Number of files to create for benchmarks")
)
func main() {
flag.Parse()
if err := runBenchmarks(); err != nil {
+ //goland:noinspection GoUnhandledErrorResult
_, _ = fmt.Fprintf(os.Stderr, "Benchmark failed: %v\n", err)
os.Exit(1)
}
}
func runBenchmarks() error {
- fmt.Printf("Running gibidify benchmarks...\n")
- fmt.Printf("Source: %s\n", getSourceDescription())
- fmt.Printf("Type: %s\n", *benchmarkType)
- fmt.Printf("CPU cores: %d\n", runtime.NumCPU())
- fmt.Println()
+ //nolint:errcheck // Benchmark informational output, errors don't affect benchmark results
+ _, _ = fmt.Println("Running gibidify benchmarks...")
+ //nolint:errcheck // Benchmark informational output, errors don't affect benchmark results
+ _, _ = fmt.Printf("Source: %s\n", getSourceDescription())
+ //nolint:errcheck // Benchmark informational output, errors don't affect benchmark results
+ _, _ = fmt.Printf("Type: %s\n", *benchmarkType)
+ //nolint:errcheck // Benchmark informational output, errors don't affect benchmark results
+ _, _ = fmt.Printf("CPU cores: %d\n", runtime.NumCPU())
+ //nolint:errcheck // Benchmark informational output, errors don't affect benchmark results
+ _, _ = fmt.Println()
switch *benchmarkType {
- case "all":
- return benchmark.RunAllBenchmarks(*sourceDir)
+ case shared.CLIArgAll:
+ if err := benchmark.RunAllBenchmarks(*sourceDir); err != nil {
+ return fmt.Errorf("benchmark failed: %w", err)
+ }
+
+ return nil
case "collection":
return runCollectionBenchmark()
case "processing":
@@ -50,81 +72,79 @@ func runBenchmarks() error {
case "format":
return runFormatBenchmark()
default:
- return gibidiutils.NewValidationError(
- gibidiutils.CodeValidationFormat,
- "invalid benchmark type: "+*benchmarkType,
- )
+ return shared.NewValidationError(shared.CodeValidationFormat, "invalid benchmark type: "+*benchmarkType)
}
}
func runCollectionBenchmark() error {
- fmt.Println("Running file collection benchmark...")
+ //nolint:errcheck // Benchmark status message, errors don't affect benchmark results
+ _, _ = fmt.Println(shared.BenchmarkMsgRunningCollection)
result, err := benchmark.FileCollectionBenchmark(*sourceDir, *numFiles)
if err != nil {
- return gibidiutils.WrapError(
+ return shared.WrapError(
err,
- gibidiutils.ErrorTypeProcessing,
- gibidiutils.CodeProcessingCollection,
- "file collection benchmark failed",
+ shared.ErrorTypeProcessing,
+ shared.CodeProcessingCollection,
+ shared.BenchmarkMsgFileCollectionFailed,
)
}
benchmark.PrintResult(result)
+
return nil
}
func runProcessingBenchmark() error {
- fmt.Printf("Running file processing benchmark (format: %s, concurrency: %d)...\n", *format, *concurrency)
+ //nolint:errcheck // Benchmark status message, errors don't affect benchmark results
+ _, _ = fmt.Printf("Running file processing benchmark (format: %s, concurrency: %d)...\n", *format, *concurrency)
result, err := benchmark.FileProcessingBenchmark(*sourceDir, *format, *concurrency)
if err != nil {
- return gibidiutils.WrapError(
+ return shared.WrapError(
err,
- gibidiutils.ErrorTypeProcessing,
- gibidiutils.CodeProcessingCollection,
+ shared.ErrorTypeProcessing,
+ shared.CodeProcessingCollection,
"file processing benchmark failed",
)
}
benchmark.PrintResult(result)
+
return nil
}
func runConcurrencyBenchmark() error {
concurrencyLevels, err := parseConcurrencyList(*concurrencyList)
if err != nil {
- return gibidiutils.WrapError(
- err,
- gibidiutils.ErrorTypeValidation,
- gibidiutils.CodeValidationFormat,
- "invalid concurrency list",
- )
+ return shared.WrapError(
+ err, shared.ErrorTypeValidation, shared.CodeValidationFormat, "invalid concurrency list")
}
- fmt.Printf("Running concurrency benchmark (format: %s, levels: %v)...\n", *format, concurrencyLevels)
+ //nolint:errcheck // Benchmark status message, errors don't affect benchmark results
+ _, _ = fmt.Printf("Running concurrency benchmark (format: %s, levels: %v)...\n", *format, concurrencyLevels)
suite, err := benchmark.ConcurrencyBenchmark(*sourceDir, *format, concurrencyLevels)
if err != nil {
- return gibidiutils.WrapError(
+ return shared.WrapError(
err,
- gibidiutils.ErrorTypeProcessing,
- gibidiutils.CodeProcessingCollection,
- "concurrency benchmark failed",
+ shared.ErrorTypeProcessing,
+ shared.CodeProcessingCollection,
+ shared.BenchmarkMsgConcurrencyFailed,
)
}
benchmark.PrintSuite(suite)
+
return nil
}
func runFormatBenchmark() error {
formats := parseFormatList(*formatList)
- fmt.Printf("Running format benchmark (formats: %v)...\n", formats)
+ //nolint:errcheck // Benchmark status message, errors don't affect benchmark results
+ _, _ = fmt.Printf("Running format benchmark (formats: %v)...\n", formats)
suite, err := benchmark.FormatBenchmark(*sourceDir, formats)
if err != nil {
- return gibidiutils.WrapError(
- err,
- gibidiutils.ErrorTypeProcessing,
- gibidiutils.CodeProcessingCollection,
- "format benchmark failed",
+ return shared.WrapError(
+ err, shared.ErrorTypeProcessing, shared.CodeProcessingCollection, shared.BenchmarkMsgFormatFailed,
)
}
benchmark.PrintSuite(suite)
+
return nil
}
@@ -132,6 +152,7 @@ func getSourceDescription() string {
if *sourceDir == "" {
return fmt.Sprintf("temporary files (%d files)", *numFiles)
}
+
return *sourceDir
}
@@ -143,28 +164,24 @@ func parseConcurrencyList(list string) ([]int, error) {
part = strings.TrimSpace(part)
var level int
if _, err := fmt.Sscanf(part, "%d", &level); err != nil {
- return nil, gibidiutils.WrapErrorf(
+ return nil, shared.WrapErrorf(
err,
- gibidiutils.ErrorTypeValidation,
- gibidiutils.CodeValidationFormat,
+ shared.ErrorTypeValidation,
+ shared.CodeValidationFormat,
"invalid concurrency level: %s",
part,
)
}
if level <= 0 {
- return nil, gibidiutils.NewValidationError(
- gibidiutils.CodeValidationFormat,
- "concurrency level must be positive: "+part,
+ return nil, shared.NewValidationError(
+ shared.CodeValidationFormat, "concurrency level must be positive: "+part,
)
}
levels = append(levels, level)
}
if len(levels) == 0 {
- return nil, gibidiutils.NewValidationError(
- gibidiutils.CodeValidationFormat,
- "no valid concurrency levels found",
- )
+ return nil, shared.NewValidationError(shared.CodeValidationFormat, "no valid concurrency levels found")
}
return levels, nil
diff --git a/cmd/benchmark/main_test.go b/cmd/benchmark/main_test.go
new file mode 100644
index 0000000..e934a79
--- /dev/null
+++ b/cmd/benchmark/main_test.go
@@ -0,0 +1,751 @@
+package main
+
+import (
+ "errors"
+ "flag"
+ "io"
+ "os"
+ "runtime"
+ "testing"
+
+ "github.com/ivuorinen/gibidify/shared"
+ "github.com/ivuorinen/gibidify/testutil"
+)
+
+// Test constants to avoid goconst linting issues.
+const (
+ testJSON = "json"
+ testMarkdown = "markdown"
+ testConcurrency = "1,2"
+ testAll = "all"
+ testCollection = "collection"
+ testConcurrencyT = "concurrency"
+ testNonExistent = "/nonexistent/path/that/should/not/exist"
+ testFile1 = "test1.txt"
+ testFile2 = "test2.txt"
+ testContent1 = "content1"
+ testContent2 = "content2"
+)
+
+func TestParseConcurrencyList(t *testing.T) {
+ tests := []struct {
+ name string
+ input string
+ want []int
+ wantErr bool
+ errContains string
+ }{
+ {
+ name: "valid single value",
+ input: "4",
+ want: []int{4},
+ wantErr: false,
+ },
+ {
+ name: "valid multiple values",
+ input: shared.TestConcurrencyList,
+ want: []int{1, 2, 4, 8},
+ wantErr: false,
+ },
+ {
+ name: "valid with whitespace",
+ input: " 1 , 2 , 4 , 8 ",
+ want: []int{1, 2, 4, 8},
+ wantErr: false,
+ },
+ {
+ name: "valid single large value",
+ input: "16",
+ want: []int{16},
+ wantErr: false,
+ },
+ {
+ name: "empty string",
+ input: "",
+ wantErr: true,
+ errContains: shared.TestMsgInvalidConcurrencyLevel,
+ },
+ {
+ name: "invalid number",
+ input: "1,abc,4",
+ wantErr: true,
+ errContains: shared.TestMsgInvalidConcurrencyLevel,
+ },
+ {
+ name: "zero value",
+ input: "1,0,4",
+ wantErr: true,
+ errContains: "concurrency level must be positive",
+ },
+ {
+ name: "negative value",
+ input: "1,-2,4",
+ wantErr: true,
+ errContains: "concurrency level must be positive",
+ },
+ {
+ name: "only whitespace",
+ input: " , , ",
+ wantErr: true,
+ errContains: shared.TestMsgInvalidConcurrencyLevel,
+ },
+ {
+ name: "large value list",
+ input: "1,2,4,8,16",
+ want: []int{1, 2, 4, 8, 16},
+ wantErr: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := parseConcurrencyList(tt.input)
+
+ if tt.wantErr {
+ testutil.AssertExpectedError(t, err, "parseConcurrencyList")
+ if tt.errContains != "" {
+ testutil.AssertErrorContains(t, err, tt.errContains, "parseConcurrencyList")
+ }
+
+ return
+ }
+
+ testutil.AssertNoError(t, err, "parseConcurrencyList")
+ if !equalSlices(got, tt.want) {
+ t.Errorf("parseConcurrencyList() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func TestParseFormatList(t *testing.T) {
+ tests := []struct {
+ name string
+ input string
+ want []string
+ }{
+ {
+ name: "single format",
+ input: "json",
+ want: []string{"json"},
+ },
+ {
+ name: "multiple formats",
+ input: shared.TestFormatList,
+ want: []string{"json", "yaml", "markdown"},
+ },
+ {
+ name: "formats with whitespace",
+ input: " json , yaml , markdown ",
+ want: []string{"json", "yaml", "markdown"},
+ },
+ {
+ name: "empty string",
+ input: "",
+ want: []string{},
+ },
+ {
+ name: "empty parts",
+ input: "json,,yaml",
+ want: []string{"json", "yaml"},
+ },
+ {
+ name: "only whitespace and commas",
+ input: " , , ",
+ want: []string{},
+ },
+ {
+ name: "single format with whitespace",
+ input: " markdown ",
+ want: []string{"markdown"},
+ },
+ {
+ name: "duplicate formats",
+ input: "json,json,yaml",
+ want: []string{"json", "json", "yaml"},
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got := parseFormatList(tt.input)
+ if !equalSlices(got, tt.want) {
+ t.Errorf("parseFormatList() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func TestGetSourceDescription(t *testing.T) {
+ // Save original flag values and reset after test
+ origSourceDir := sourceDir
+ origNumFiles := numFiles
+ defer func() {
+ sourceDir = origSourceDir
+ numFiles = origNumFiles
+ }()
+
+ tests := []struct {
+ name string
+ sourceDir string
+ numFiles int
+ want string
+ }{
+ {
+ name: "empty source directory with default files",
+ sourceDir: "",
+ numFiles: 100,
+ want: "temporary files (100 files)",
+ },
+ {
+ name: "empty source directory with custom files",
+ sourceDir: "",
+ numFiles: 50,
+ want: "temporary files (50 files)",
+ },
+ {
+ name: "non-empty source directory",
+ sourceDir: "/path/to/source",
+ numFiles: 100,
+ want: "/path/to/source",
+ },
+ {
+ name: "current directory",
+ sourceDir: ".",
+ numFiles: 100,
+ want: ".",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ // Set flag pointers to test values
+ *sourceDir = tt.sourceDir
+ *numFiles = tt.numFiles
+
+ got := getSourceDescription()
+ if got != tt.want {
+ t.Errorf("getSourceDescription() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func TestRunCollectionBenchmark(t *testing.T) {
+ restore := testutil.SuppressLogs(t)
+ defer restore()
+
+ // Save original flag values
+ origSourceDir := sourceDir
+ origNumFiles := numFiles
+ defer func() {
+ sourceDir = origSourceDir
+ numFiles = origNumFiles
+ }()
+
+ t.Run("success with temp files", func(t *testing.T) {
+ *sourceDir = ""
+ *numFiles = 10
+
+ err := runCollectionBenchmark()
+ testutil.AssertNoError(t, err, "runCollectionBenchmark with temp files")
+ })
+
+ t.Run("success with real directory", func(t *testing.T) {
+ tempDir := t.TempDir()
+ testutil.CreateTestFiles(t, tempDir, []testutil.FileSpec{
+ {Name: testFile1, Content: testContent1},
+ {Name: testFile2, Content: testContent2},
+ })
+
+ *sourceDir = tempDir
+ *numFiles = 10
+
+ err := runCollectionBenchmark()
+ testutil.AssertNoError(t, err, "runCollectionBenchmark with real directory")
+ })
+}
+
+func TestRunProcessingBenchmark(t *testing.T) {
+ restore := testutil.SuppressLogs(t)
+ defer restore()
+
+ // Save original flag values
+ origSourceDir := sourceDir
+ origFormat := format
+ origConcurrency := concurrency
+ defer func() {
+ sourceDir = origSourceDir
+ format = origFormat
+ concurrency = origConcurrency
+ }()
+
+ t.Run("success with json format", func(t *testing.T) {
+ tempDir := t.TempDir()
+ testutil.CreateTestFiles(t, tempDir, []testutil.FileSpec{
+ {Name: testFile1, Content: testContent1},
+ {Name: testFile2, Content: testContent2},
+ })
+
+ *sourceDir = tempDir
+ *format = testJSON
+ *concurrency = 2
+
+ err := runProcessingBenchmark()
+ testutil.AssertNoError(t, err, "runProcessingBenchmark with json")
+ })
+
+ t.Run("success with markdown format", func(t *testing.T) {
+ tempDir := t.TempDir()
+ testutil.CreateTestFiles(t, tempDir, []testutil.FileSpec{
+ {Name: testFile1, Content: testContent1},
+ })
+
+ *sourceDir = tempDir
+ *format = testMarkdown
+ *concurrency = 1
+
+ err := runProcessingBenchmark()
+ testutil.AssertNoError(t, err, "runProcessingBenchmark with markdown")
+ })
+}
+
+func TestRunConcurrencyBenchmark(t *testing.T) {
+ restore := testutil.SuppressLogs(t)
+ defer restore()
+
+ // Save original flag values
+ origSourceDir := sourceDir
+ origFormat := format
+ origConcurrencyList := concurrencyList
+ defer func() {
+ sourceDir = origSourceDir
+ format = origFormat
+ concurrencyList = origConcurrencyList
+ }()
+
+ t.Run("success with valid concurrency list", func(t *testing.T) {
+ tempDir := t.TempDir()
+ testutil.CreateTestFiles(t, tempDir, []testutil.FileSpec{
+ {Name: testFile1, Content: testContent1},
+ })
+
+ *sourceDir = tempDir
+ *format = testJSON
+ *concurrencyList = testConcurrency
+
+ err := runConcurrencyBenchmark()
+ testutil.AssertNoError(t, err, "runConcurrencyBenchmark")
+ })
+
+ t.Run("error with invalid concurrency list", func(t *testing.T) {
+ tempDir := t.TempDir()
+ *sourceDir = tempDir
+ *format = testJSON
+ *concurrencyList = "invalid"
+
+ err := runConcurrencyBenchmark()
+ testutil.AssertExpectedError(t, err, "runConcurrencyBenchmark with invalid list")
+ testutil.AssertErrorContains(t, err, "invalid concurrency list", "runConcurrencyBenchmark")
+ })
+}
+
+func TestRunFormatBenchmark(t *testing.T) {
+ restore := testutil.SuppressLogs(t)
+ defer restore()
+
+ // Save original flag values
+ origSourceDir := sourceDir
+ origFormatList := formatList
+ defer func() {
+ sourceDir = origSourceDir
+ formatList = origFormatList
+ }()
+
+ t.Run("success with valid format list", func(t *testing.T) {
+ tempDir := t.TempDir()
+ testutil.CreateTestFiles(t, tempDir, []testutil.FileSpec{
+ {Name: testFile1, Content: testContent1},
+ })
+
+ *sourceDir = tempDir
+ *formatList = "json,yaml"
+
+ err := runFormatBenchmark()
+ testutil.AssertNoError(t, err, "runFormatBenchmark")
+ })
+
+ t.Run("success with single format", func(t *testing.T) {
+ tempDir := t.TempDir()
+ testutil.CreateTestFiles(t, tempDir, []testutil.FileSpec{
+ {Name: testFile1, Content: testContent1},
+ })
+
+ *sourceDir = tempDir
+ *formatList = testMarkdown
+
+ err := runFormatBenchmark()
+ testutil.AssertNoError(t, err, "runFormatBenchmark with single format")
+ })
+}
+
+func TestRunBenchmarks(t *testing.T) {
+ restore := testutil.SuppressLogs(t)
+ defer restore()
+
+ // Save original flag values
+ origBenchmarkType := benchmarkType
+ origSourceDir := sourceDir
+ origConcurrencyList := concurrencyList
+ origFormatList := formatList
+ defer func() {
+ benchmarkType = origBenchmarkType
+ sourceDir = origSourceDir
+ concurrencyList = origConcurrencyList
+ formatList = origFormatList
+ }()
+
+ tempDir := t.TempDir()
+ testutil.CreateTestFiles(t, tempDir, []testutil.FileSpec{
+ {Name: testFile1, Content: testContent1},
+ })
+
+ tests := []struct {
+ name string
+ benchmarkType string
+ wantErr bool
+ errContains string
+ }{
+ {
+ name: "all benchmarks",
+ benchmarkType: "all",
+ wantErr: false,
+ },
+ {
+ name: "collection benchmark",
+ benchmarkType: "collection",
+ wantErr: false,
+ },
+ {
+ name: "processing benchmark",
+ benchmarkType: "processing",
+ wantErr: false,
+ },
+ {
+ name: "concurrency benchmark",
+ benchmarkType: "concurrency",
+ wantErr: false,
+ },
+ {
+ name: "format benchmark",
+ benchmarkType: "format",
+ wantErr: false,
+ },
+ {
+ name: "invalid benchmark type",
+ benchmarkType: "invalid",
+ wantErr: true,
+ errContains: "invalid benchmark type",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ *benchmarkType = tt.benchmarkType
+ *sourceDir = tempDir
+ *concurrencyList = testConcurrency
+ *formatList = testMarkdown
+
+ err := runBenchmarks()
+
+ if tt.wantErr {
+ testutil.AssertExpectedError(t, err, "runBenchmarks")
+ if tt.errContains != "" {
+ testutil.AssertErrorContains(t, err, tt.errContains, "runBenchmarks")
+ }
+ } else {
+ testutil.AssertNoError(t, err, "runBenchmarks")
+ }
+ })
+ }
+}
+
+func TestMainFunction(t *testing.T) {
+ restore := testutil.SuppressLogs(t)
+ defer restore()
+
+ // We can't easily test main() directly due to os.Exit calls,
+ // but we can test runBenchmarks() which contains the main logic
+ tempDir := t.TempDir()
+ testutil.CreateTestFiles(t, tempDir, []testutil.FileSpec{
+ {Name: testFile1, Content: testContent1},
+ })
+
+ // Save original flag values
+ origBenchmarkType := benchmarkType
+ origSourceDir := sourceDir
+ defer func() {
+ benchmarkType = origBenchmarkType
+ sourceDir = origSourceDir
+ }()
+
+ *benchmarkType = testCollection
+ *sourceDir = tempDir
+
+ err := runBenchmarks()
+ testutil.AssertNoError(t, err, "runBenchmarks through main logic path")
+}
+
+func TestFlagInitialization(t *testing.T) {
+ // Test that flags are properly initialized with expected defaults
+ resetFlags()
+
+ if *sourceDir != "" {
+ t.Errorf("sourceDir default should be empty, got %v", *sourceDir)
+ }
+ if *benchmarkType != testAll {
+ t.Errorf("benchmarkType default should be 'all', got %v", *benchmarkType)
+ }
+ if *format != testJSON {
+ t.Errorf("format default should be 'json', got %v", *format)
+ }
+ if *concurrency != runtime.NumCPU() {
+ t.Errorf("concurrency default should be %d, got %d", runtime.NumCPU(), *concurrency)
+ }
+ if *concurrencyList != shared.TestConcurrencyList {
+ t.Errorf("concurrencyList default should be '%s', got %v", shared.TestConcurrencyList, *concurrencyList)
+ }
+ if *formatList != shared.TestFormatList {
+ t.Errorf("formatList default should be '%s', got %v", shared.TestFormatList, *formatList)
+ }
+ if *numFiles != 100 {
+ t.Errorf("numFiles default should be 100, got %d", *numFiles)
+ }
+}
+
+func TestErrorPropagation(t *testing.T) {
+ restore := testutil.SuppressLogs(t)
+ defer restore()
+
+ // Save original flag values
+ origBenchmarkType := benchmarkType
+ origSourceDir := sourceDir
+ origConcurrencyList := concurrencyList
+ defer func() {
+ benchmarkType = origBenchmarkType
+ sourceDir = origSourceDir
+ concurrencyList = origConcurrencyList
+ }()
+
+ tempDir := t.TempDir()
+
+ t.Run("error from concurrency benchmark propagates", func(t *testing.T) {
+ *benchmarkType = testConcurrencyT
+ *sourceDir = tempDir
+ *concurrencyList = "invalid,list"
+
+ err := runBenchmarks()
+ testutil.AssertExpectedError(t, err, "runBenchmarks with invalid concurrency")
+ testutil.AssertErrorContains(t, err, "invalid concurrency list", "runBenchmarks error propagation")
+ })
+
+ t.Run("validation error contains proper error type", func(t *testing.T) {
+ *benchmarkType = "invalid-type"
+ *sourceDir = tempDir
+
+ err := runBenchmarks()
+ testutil.AssertExpectedError(t, err, "runBenchmarks with invalid type")
+
+ var validationErr *shared.StructuredError
+ if !errors.As(err, &validationErr) {
+ t.Errorf("Expected StructuredError, got %T", err)
+ } else if validationErr.Code != shared.CodeValidationFormat {
+ t.Errorf("Expected validation format error code, got %v", validationErr.Code)
+ }
+ })
+
+ t.Run("empty levels array returns error", func(t *testing.T) {
+ // Test the specific case where all parts are empty after trimming
+ _, err := parseConcurrencyList(" , , ")
+ testutil.AssertExpectedError(t, err, "parseConcurrencyList with all empty parts")
+ testutil.AssertErrorContains(t, err, shared.TestMsgInvalidConcurrencyLevel, "parseConcurrencyList empty levels")
+ })
+
+ t.Run("single empty part returns error", func(t *testing.T) {
+ // Test case that should never reach the "no valid levels found" condition
+ _, err := parseConcurrencyList(" ")
+ testutil.AssertExpectedError(t, err, "parseConcurrencyList with single empty part")
+ testutil.AssertErrorContains(
+ t, err, shared.TestMsgInvalidConcurrencyLevel, "parseConcurrencyList single empty part",
+ )
+ })
+
+ t.Run("benchmark function error paths", func(t *testing.T) {
+ // Test with non-existent source directory to trigger error paths
+ nonExistentDir := testNonExistent
+
+ *benchmarkType = testCollection
+ *sourceDir = nonExistentDir
+
+ // This should fail as the benchmark package cannot access non-existent directories
+ err := runBenchmarks()
+ testutil.AssertExpectedError(t, err, "runBenchmarks with non-existent directory")
+ testutil.AssertErrorContains(t, err, "file collection benchmark failed",
+ "runBenchmarks error contains expected message")
+ })
+
+ t.Run("processing benchmark error path", func(t *testing.T) {
+ // Test error path for processing benchmark
+ nonExistentDir := testNonExistent
+
+ *benchmarkType = "processing"
+ *sourceDir = nonExistentDir
+ *format = "json"
+ *concurrency = 1
+
+ err := runBenchmarks()
+ testutil.AssertExpectedError(t, err, "runBenchmarks processing with non-existent directory")
+ testutil.AssertErrorContains(t, err, "file processing benchmark failed", "runBenchmarks processing error")
+ })
+
+ t.Run("concurrency benchmark error path", func(t *testing.T) {
+ // Test error path for concurrency benchmark
+ nonExistentDir := testNonExistent
+
+ *benchmarkType = testConcurrencyT
+ *sourceDir = nonExistentDir
+ *format = "json"
+ *concurrencyList = "1,2"
+
+ err := runBenchmarks()
+ testutil.AssertExpectedError(t, err, "runBenchmarks concurrency with non-existent directory")
+ testutil.AssertErrorContains(t, err, "concurrency benchmark failed", "runBenchmarks concurrency error")
+ })
+
+ t.Run("format benchmark error path", func(t *testing.T) {
+ // Test error path for format benchmark
+ nonExistentDir := testNonExistent
+
+ *benchmarkType = "format"
+ *sourceDir = nonExistentDir
+ *formatList = "json,yaml"
+
+ err := runBenchmarks()
+ testutil.AssertExpectedError(t, err, "runBenchmarks format with non-existent directory")
+ testutil.AssertErrorContains(t, err, "format benchmark failed", "runBenchmarks format error")
+ })
+
+ t.Run("all benchmarks error path", func(t *testing.T) {
+ // Test error path for all benchmarks
+ nonExistentDir := testNonExistent
+
+ *benchmarkType = "all"
+ *sourceDir = nonExistentDir
+
+ err := runBenchmarks()
+ testutil.AssertExpectedError(t, err, "runBenchmarks all with non-existent directory")
+ testutil.AssertErrorContains(t, err, "benchmark failed", "runBenchmarks all error")
+ })
+}
+
+// Benchmark functions
+
+// BenchmarkParseConcurrencyList benchmarks the parsing of concurrency lists.
+func BenchmarkParseConcurrencyList(b *testing.B) {
+ benchmarks := []struct {
+ name string
+ input string
+ }{
+ {
+ name: "single value",
+ input: "4",
+ },
+ {
+ name: "multiple values",
+ input: "1,2,4,8",
+ },
+ {
+ name: "values with whitespace",
+ input: " 1 , 2 , 4 , 8 , 16 ",
+ },
+ {
+ name: "large list",
+ input: "1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16",
+ },
+ }
+
+ for _, bm := range benchmarks {
+ b.Run(bm.name, func(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ _, _ = parseConcurrencyList(bm.input)
+ }
+ })
+ }
+}
+
+// BenchmarkParseFormatList benchmarks the parsing of format lists.
+func BenchmarkParseFormatList(b *testing.B) {
+ benchmarks := []struct {
+ name string
+ input string
+ }{
+ {
+ name: "single format",
+ input: "json",
+ },
+ {
+ name: "multiple formats",
+ input: shared.TestFormatList,
+ },
+ {
+ name: "formats with whitespace",
+ input: " json , yaml , markdown , xml , toml ",
+ },
+ {
+ name: "large list",
+ input: "json,yaml,markdown,xml,toml,csv,tsv,html,txt,log",
+ },
+ }
+
+ for _, bm := range benchmarks {
+ b.Run(bm.name, func(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ _ = parseFormatList(bm.input)
+ }
+ })
+ }
+}
+
+// Helper functions
+
+// equalSlices compares two slices for equality.
+func equalSlices[T comparable](a, b []T) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for i := range a {
+ if a[i] != b[i] {
+ return false
+ }
+ }
+
+ return true
+}
+
+// resetFlags resets flag variables to their defaults for testing.
+func resetFlags() {
+ flag.CommandLine = flag.NewFlagSet(os.Args[0], flag.ContinueOnError)
+ flag.CommandLine.SetOutput(io.Discard)
+ // Reinitialize the flags
+ sourceDir = flag.String("source", "", "Source directory to benchmark (uses temp files if empty)")
+ benchmarkType = flag.String("type", "all", "Benchmark type: all, collection, processing, concurrency, format")
+ format = flag.String("format", "json", "Output format for processing benchmarks")
+ concurrency = flag.Int("concurrency", runtime.NumCPU(), "Concurrency level for processing benchmarks")
+ concurrencyList = flag.String(
+ "concurrency-list", shared.TestConcurrencyList, "Comma-separated list of concurrency levels",
+ )
+ formatList = flag.String("format-list", shared.TestFormatList, "Comma-separated list of formats")
+ numFiles = flag.Int("files", 100, "Number of files to create for benchmarks")
+}
diff --git a/config.example.yaml b/config.example.yaml
index 5bf2e79..f9d6bbc 100644
--- a/config.example.yaml
+++ b/config.example.yaml
@@ -1,84 +1,333 @@
-# gibidify configuration example
-# Place this file in one of these locations:
+---
+# gibidify Configuration Example
+# =============================
+# This file demonstrates all available configuration options with their defaults
+# and validation ranges. Copy this file to one of the following locations:
+#
# - $XDG_CONFIG_HOME/gibidify/config.yaml
# - $HOME/.config/gibidify/config.yaml
# - Current directory (if no gibidify.yaml output file exists)
-# File size limit in bytes (default: 5MB)
+# =============================================================================
+# BASIC FILE PROCESSING SETTINGS
+# =============================================================================
+
+# Maximum size for individual files in bytes
+# Default: 5242880 (5MB), Min: 1024 (1KB), Max: 104857600 (100MB)
fileSizeLimit: 5242880
-# Directories to ignore during scanning
+# Directories to ignore during file system traversal
+# These are sensible defaults for most projects
ignoreDirectories:
- - vendor
- - node_modules
- - .git
- - dist
- - build
- - target
- - bower_components
- - cache
- - tmp
- - .next
- - .nuxt
+ - vendor # Go vendor directory
+ - node_modules # Node.js dependencies
+ - .git # Git repository data
+ - dist # Distribution/build output
+ - build # Build artifacts
+ - target # Maven/Rust build directory
+ - bower_components # Bower dependencies
+ - cache # Various cache directories
+ - tmp # Temporary files
+ - .next # Next.js build directory
+ - .nuxt # Nuxt.js build directory
+ - .vscode # VS Code settings
+ - .idea # IntelliJ IDEA settings
+ - __pycache__ # Python cache
+ - .pytest_cache # Pytest cache
+
+# Maximum number of worker goroutines for concurrent processing
+# Default: number of CPU cores, Min: 1, Max: 100
+# maxConcurrency: 8
+
+# Supported output formats for validation
+# Default: ["json", "yaml", "markdown"]
+# supportedFormats:
+# - json
+# - yaml
+# - markdown
+
+# File patterns to include (glob patterns)
+# Default: empty (all files), useful for filtering specific file types
+# filePatterns:
+# - "*.go"
+# - "*.py"
+# - "*.js"
+# - "*.ts"
+# - "*.java"
+# - "*.c"
+# - "*.cpp"
+
+# =============================================================================
+# FILE TYPE DETECTION AND CUSTOMIZATION
+# =============================================================================
-# FileType registry configuration
fileTypes:
- # Enable/disable file type detection entirely (default: true)
+ # Enable/disable file type detection entirely
+ # Default: true
enabled: true
- # Add custom image extensions
+ # Add custom image extensions (beyond built-in: .png, .jpg, .jpeg, .gif, .svg, .ico, .bmp, .tiff, .webp)
customImageExtensions:
- - .webp
- - .avif
- - .heic
- - .jxl
+ - .avif # AV1 Image File Format
+ - .heic # High Efficiency Image Container
+ - .jxl # JPEG XL
+ - .webp # WebP (if not already included)
- # Add custom binary extensions
+ # Add custom binary extensions (beyond built-in: .exe, .dll, .so, .dylib, .a, .lib, .obj, .o)
customBinaryExtensions:
- - .custom
- - .proprietary
- - .blob
+ - .custom # Custom binary format
+ - .proprietary # Proprietary format
+ - .blob # Binary large object
- # Add custom language mappings
+ # Add custom language mappings (extension -> language name)
customLanguages:
- .zig: zig
- .odin: odin
- .v: vlang
- .grain: grain
- .gleam: gleam
- .roc: roc
- .janet: janet
- .fennel: fennel
- .wast: wast
- .wat: wat
+ .zig: zig # Zig language
+ .odin: odin # Odin language
+ .v: vlang # V language
+ .grain: grain # Grain language
+ .gleam: gleam # Gleam language
+ .roc: roc # Roc language
+ .janet: janet # Janet language
+ .fennel: fennel # Fennel language
+ .wast: wast # WebAssembly text format
+ .wat: wat # WebAssembly text format
# Disable specific default image extensions
disabledImageExtensions:
- - .bmp # Disable bitmap support
- - .tif # Disable TIFF support
+ - .bmp # Disable bitmap support
+ - .tiff # Disable TIFF support
# Disable specific default binary extensions
disabledBinaryExtensions:
- - .exe # Don't treat executables as binary
- - .dll # Don't treat DLL files as binary
+ - .exe # Don't treat executables as binary
+ - .dll # Don't treat DLL files as binary
# Disable specific default language extensions
disabledLanguageExtensions:
- - .bat # Don't detect batch files
- - .cmd # Don't detect command files
+ - .bat # Don't detect batch files
+ - .cmd # Don't detect command files
-# Maximum concurrency (optional)
-maxConcurrency: 16
+# =============================================================================
+# BACKPRESSURE AND MEMORY MANAGEMENT
+# =============================================================================
-# Supported output formats (optional validation)
-supportedFormats:
- - json
- - yaml
- - markdown
+backpressure:
+ # Enable backpressure management for memory optimization
+ # Default: true
+ enabled: true
-# File patterns for filtering (optional)
-filePatterns:
- - "*.go"
- - "*.py"
- - "*.js"
- - "*.ts"
+ # Maximum number of files to buffer in the processing pipeline
+ # Default: 1000, helps prevent memory exhaustion with many small files
+ maxPendingFiles: 1000
+
+ # Maximum number of write operations to buffer
+ # Default: 100, controls write throughput vs memory usage
+ maxPendingWrites: 100
+
+ # Soft memory usage limit in bytes before triggering backpressure
+ # Default: 104857600 (100MB)
+ maxMemoryUsage: 104857600
+
+ # Check memory usage every N files processed
+ # Default: 1000, lower values = more frequent checks but higher overhead
+ memoryCheckInterval: 1000
+
+# =============================================================================
+# RESOURCE LIMITS AND SECURITY
+# =============================================================================
+
+resourceLimits:
+ # Enable resource limits for DoS protection
+ # Default: true
+ enabled: true
+
+ # Maximum number of files to process
+ # Default: 10000, Min: 1, Max: 1000000
+ maxFiles: 10000
+
+ # Maximum total size of all files combined in bytes
+ # Default: 1073741824 (1GB), Min: 1048576 (1MB), Max: 107374182400 (100GB)
+ maxTotalSize: 1073741824
+
+ # Timeout for processing individual files in seconds
+ # Default: 30, Min: 1, Max: 300 (5 minutes)
+ fileProcessingTimeoutSec: 30
+
+ # Overall timeout for the entire operation in seconds
+ # Default: 3600 (1 hour), Min: 10, Max: 86400 (24 hours)
+ overallTimeoutSec: 3600
+
+ # Maximum concurrent file reading operations
+ # Default: 10, Min: 1, Max: 100
+ maxConcurrentReads: 10
+
+ # Rate limit for file processing (files per second)
+ # Default: 0 (disabled), Min: 0, Max: 10000
+ rateLimitFilesPerSec: 0
+
+ # Hard memory limit in MB - terminates processing if exceeded
+ # Default: 512, Min: 64, Max: 8192 (8GB)
+ hardMemoryLimitMB: 512
+
+ # Enable graceful degradation under resource pressure
+ # Default: true - reduces concurrency and buffers when under pressure
+ enableGracefulDegradation: true
+
+ # Enable detailed resource monitoring and metrics
+ # Default: true - tracks memory, timing, and processing statistics
+ enableResourceMonitoring: true
+
+# =============================================================================
+# OUTPUT FORMATTING AND TEMPLATES
+# =============================================================================
+
+output:
+ # Template selection: "" (default), "minimal", "detailed", "compact", or "custom"
+ # Default: "" (uses built-in default template)
+ template: ""
+
+ # Metadata inclusion options
+ metadata:
+ # Include processing statistics in output
+ # Default: false
+ includeStats: false
+
+ # Include timestamp when processing was done
+ # Default: false
+ includeTimestamp: false
+
+ # Include total number of files processed
+ # Default: false
+ includeFileCount: false
+
+ # Include source directory path
+ # Default: false
+ includeSourcePath: false
+
+ # Include detected file types summary
+ # Default: false
+ includeFileTypes: false
+
+ # Include processing time information
+ # Default: false
+ includeProcessingTime: false
+
+ # Include total size of processed files
+ # Default: false
+ includeTotalSize: false
+
+ # Include detailed processing metrics
+ # Default: false
+ includeMetrics: false
+
+ # Markdown-specific formatting options
+ markdown:
+ # Wrap file content in code blocks
+ # Default: false
+ useCodeBlocks: false
+
+ # Include language identifier in code blocks
+ # Default: false
+ includeLanguage: false
+
+ # Header level for file sections (1-6)
+ # Default: 0 (uses template default, typically 2)
+ headerLevel: 0
+
+ # Generate table of contents
+ # Default: false
+ tableOfContents: false
+
+ # Use collapsible sections for large files
+ # Default: false
+ useCollapsible: false
+
+ # Enable syntax highlighting hints
+ # Default: false
+ syntaxHighlighting: false
+
+ # Include line numbers in code blocks
+ # Default: false
+ lineNumbers: false
+
+ # Automatically fold files longer than maxLineLength
+ # Default: false
+ foldLongFiles: false
+
+ # Maximum line length before wrapping/folding
+ # Default: 0 (no limit)
+ maxLineLength: 0
+
+ # Custom CSS to include in markdown output
+ # Default: "" (no custom CSS)
+ customCSS: ""
+
+ # Custom template overrides (only used when template is "custom")
+ custom:
+ # Custom header template (supports Go template syntax)
+ header: ""
+
+ # Custom footer template
+ footer: ""
+
+ # Custom file header template (prepended to each file)
+ fileHeader: ""
+
+ # Custom file footer template (appended to each file)
+ fileFooter: ""
+
+ # Custom template variables accessible in all templates
+ variables:
+ # Example variables - customize as needed
+ project_name: "My Project"
+ author: "Developer Name"
+ version: "1.0.0"
+ description: "Generated code aggregation"
+ # Add any custom key-value pairs here
+
+# =============================================================================
+# EXAMPLES OF COMMON CONFIGURATIONS
+# =============================================================================
+
+# Example 1: Minimal configuration for quick code review
+# fileSizeLimit: 1048576 # 1MB limit for faster processing
+# maxConcurrency: 4 # Lower concurrency for stability
+# ignoreDirectories: [".git", "node_modules", "vendor"]
+# output:
+# template: "minimal"
+# metadata:
+# includeStats: true
+
+# Example 2: High-performance configuration for large codebases
+# fileSizeLimit: 10485760 # 10MB limit
+# maxConcurrency: 16 # High concurrency
+# backpressure:
+# maxPendingFiles: 5000 # Larger buffers
+# maxMemoryUsage: 536870912 # 512MB memory
+# resourceLimits:
+# maxFiles: 100000 # Process more files
+# maxTotalSize: 10737418240 # 10GB total size
+
+# Example 3: Security-focused configuration
+# resourceLimits:
+# maxFiles: 1000 # Strict file limit
+# maxTotalSize: 104857600 # 100MB total limit
+# fileProcessingTimeoutSec: 10 # Short timeout
+# overallTimeoutSec: 300 # 5-minute overall limit
+# hardMemoryLimitMB: 256 # Lower memory limit
+# rateLimitFilesPerSec: 50 # Rate limiting enabled
+
+# Example 4: Documentation-friendly output
+# output:
+# template: "detailed"
+# metadata:
+# includeStats: true
+# includeTimestamp: true
+# includeFileCount: true
+# includeSourcePath: true
+# markdown:
+# useCodeBlocks: true
+# includeLanguage: true
+# headerLevel: 2
+# tableOfContents: true
+# syntaxHighlighting: true
diff --git a/config.yaml.example b/config.yaml.example
deleted file mode 100644
index 8641ee8..0000000
--- a/config.yaml.example
+++ /dev/null
@@ -1,79 +0,0 @@
-# Gibidify Configuration Example
-# This file demonstrates all available configuration options
-
-# File size limit for individual files (in bytes)
-# Default: 5242880 (5MB), Min: 1024 (1KB), Max: 104857600 (100MB)
-fileSizeLimit: 5242880
-
-# Directories to ignore during traversal
-ignoreDirectories:
- - vendor
- - node_modules
- - .git
- - dist
- - build
- - target
- - bower_components
- - cache
- - tmp
-
-# File type detection and filtering
-fileTypes:
- enabled: true
- customImageExtensions: []
- customBinaryExtensions: []
- customLanguages: {}
- disabledImageExtensions: []
- disabledBinaryExtensions: []
- disabledLanguageExtensions: []
-
-# Back-pressure management for memory optimization
-backpressure:
- enabled: true
- maxPendingFiles: 1000 # Max files in channel buffer
- maxPendingWrites: 100 # Max writes in channel buffer
- maxMemoryUsage: 104857600 # 100MB soft memory limit
- memoryCheckInterval: 1000 # Check memory every N files
-
-# Resource limits for DoS protection and security
-resourceLimits:
- enabled: true
-
- # File processing limits
- maxFiles: 10000 # Maximum number of files to process
- maxTotalSize: 1073741824 # Maximum total size (1GB)
-
- # Timeout limits (in seconds)
- fileProcessingTimeoutSec: 30 # Timeout for individual file processing
- overallTimeoutSec: 3600 # Overall processing timeout (1 hour)
-
- # Concurrency limits
- maxConcurrentReads: 10 # Maximum concurrent file reading operations
-
- # Rate limiting (0 = disabled)
- rateLimitFilesPerSec: 0 # Files per second rate limit
-
- # Memory limits
- hardMemoryLimitMB: 512 # Hard memory limit (512MB)
-
- # Safety features
- enableGracefulDegradation: true # Enable graceful degradation on resource pressure
- enableResourceMonitoring: true # Enable detailed resource monitoring
-
-# Optional: Maximum concurrency for workers
-# Default: number of CPU cores
-# maxConcurrency: 4
-
-# Optional: Supported output formats
-# Default: ["json", "yaml", "markdown"]
-# supportedFormats:
-# - json
-# - yaml
-# - markdown
-
-# Optional: File patterns to include
-# Default: all files (empty list means no pattern filtering)
-# filePatterns:
-# - "*.go"
-# - "*.py"
-# - "*.js"
diff --git a/config/config_filetype_test.go b/config/config_filetype_test.go
index 0065bfa..ce5f8f6 100644
--- a/config/config_filetype_test.go
+++ b/config/config_filetype_test.go
@@ -4,171 +4,223 @@ import (
"testing"
"github.com/spf13/viper"
+
+ "github.com/ivuorinen/gibidify/shared"
)
-// TestFileTypeRegistryConfig tests the FileTypeRegistry configuration functionality.
-func TestFileTypeRegistryConfig(t *testing.T) {
- // Test default values
- t.Run("DefaultValues", func(t *testing.T) {
- viper.Reset()
- setDefaultConfig()
+// TestFileTypeRegistryDefaultValues tests default configuration values.
+func TestFileTypeRegistryDefaultValues(t *testing.T) {
+ viper.Reset()
+ SetDefaultConfig()
- if !GetFileTypesEnabled() {
- t.Error("Expected file types to be enabled by default")
- }
+ verifyDefaultValues(t)
+}
- if len(GetCustomImageExtensions()) != 0 {
- t.Error("Expected custom image extensions to be empty by default")
- }
+// TestFileTypeRegistrySetGet tests configuration setting and getting.
+func TestFileTypeRegistrySetGet(t *testing.T) {
+ viper.Reset()
- if len(GetCustomBinaryExtensions()) != 0 {
- t.Error("Expected custom binary extensions to be empty by default")
- }
+ // Set test values
+ setTestConfiguration()
- if len(GetCustomLanguages()) != 0 {
- t.Error("Expected custom languages to be empty by default")
- }
+ // Test getter functions
+ verifyTestConfiguration(t)
+}
- if len(GetDisabledImageExtensions()) != 0 {
- t.Error("Expected disabled image extensions to be empty by default")
- }
+// TestFileTypeRegistryValidationSuccess tests successful validation.
+func TestFileTypeRegistryValidationSuccess(t *testing.T) {
+ viper.Reset()
+ SetDefaultConfig()
- if len(GetDisabledBinaryExtensions()) != 0 {
- t.Error("Expected disabled binary extensions to be empty by default")
- }
+ // Set valid configuration
+ setValidConfiguration()
- if len(GetDisabledLanguageExtensions()) != 0 {
- t.Error("Expected disabled language extensions to be empty by default")
- }
- })
+ err := ValidateConfig()
+ if err != nil {
+ t.Errorf("Expected validation to pass with valid config, got error: %v", err)
+ }
+}
- // Test configuration setting and getting
- t.Run("ConfigurationSetGet", func(t *testing.T) {
- viper.Reset()
+// TestFileTypeRegistryValidationFailure tests validation failures.
+func TestFileTypeRegistryValidationFailure(t *testing.T) {
+ // Test invalid custom image extensions
+ testInvalidImageExtensions(t)
- // Set test values
- viper.Set("fileTypes.enabled", false)
- viper.Set("fileTypes.customImageExtensions", []string{".webp", ".avif"})
- viper.Set("fileTypes.customBinaryExtensions", []string{".custom", ".mybin"})
- viper.Set("fileTypes.customLanguages", map[string]string{
+ // Test invalid custom binary extensions
+ testInvalidBinaryExtensions(t)
+
+ // Test invalid custom languages
+ testInvalidCustomLanguages(t)
+}
+
+// verifyDefaultValues verifies that default values are correct.
+func verifyDefaultValues(t *testing.T) {
+ t.Helper()
+
+ if !FileTypesEnabled() {
+ t.Error("Expected file types to be enabled by default")
+ }
+
+ verifyEmptySlice(t, CustomImageExtensions(), "custom image extensions")
+ verifyEmptySlice(t, CustomBinaryExtensions(), "custom binary extensions")
+ verifyEmptyMap(t, CustomLanguages(), "custom languages")
+ verifyEmptySlice(t, DisabledImageExtensions(), "disabled image extensions")
+ verifyEmptySlice(t, DisabledBinaryExtensions(), "disabled binary extensions")
+ verifyEmptySlice(t, DisabledLanguageExtensions(), "disabled language extensions")
+}
+
+// setTestConfiguration sets test configuration values.
+func setTestConfiguration() {
+ viper.Set("fileTypes.enabled", false)
+ viper.Set(shared.ConfigKeyFileTypesCustomImageExtensions, []string{".webp", ".avif"})
+ viper.Set(shared.ConfigKeyFileTypesCustomBinaryExtensions, []string{shared.TestExtensionCustom, ".mybin"})
+ viper.Set(
+ shared.ConfigKeyFileTypesCustomLanguages, map[string]string{
".zig": "zig",
".v": "vlang",
- })
- viper.Set("fileTypes.disabledImageExtensions", []string{".gif", ".bmp"})
- viper.Set("fileTypes.disabledBinaryExtensions", []string{".exe", ".dll"})
- viper.Set("fileTypes.disabledLanguageExtensions", []string{".rb", ".pl"})
+ },
+ )
+ viper.Set("fileTypes.disabledImageExtensions", []string{".gif", ".bmp"})
+ viper.Set("fileTypes.disabledBinaryExtensions", []string{".exe", ".dll"})
+ viper.Set("fileTypes.disabledLanguageExtensions", []string{".rb", ".pl"})
+}
- // Test getter functions
- if GetFileTypesEnabled() {
- t.Error("Expected file types to be disabled")
- }
+// verifyTestConfiguration verifies that test configuration is retrieved correctly.
+func verifyTestConfiguration(t *testing.T) {
+ t.Helper()
- customImages := GetCustomImageExtensions()
- expectedImages := []string{".webp", ".avif"}
- if len(customImages) != len(expectedImages) {
- t.Errorf("Expected %d custom image extensions, got %d", len(expectedImages), len(customImages))
- }
- for i, ext := range expectedImages {
- if customImages[i] != ext {
- t.Errorf("Expected custom image extension %s, got %s", ext, customImages[i])
- }
- }
+ if FileTypesEnabled() {
+ t.Error("Expected file types to be disabled")
+ }
- customBinary := GetCustomBinaryExtensions()
- expectedBinary := []string{".custom", ".mybin"}
- if len(customBinary) != len(expectedBinary) {
- t.Errorf("Expected %d custom binary extensions, got %d", len(expectedBinary), len(customBinary))
- }
- for i, ext := range expectedBinary {
- if customBinary[i] != ext {
- t.Errorf("Expected custom binary extension %s, got %s", ext, customBinary[i])
- }
- }
+ verifyStringSlice(t, CustomImageExtensions(), []string{".webp", ".avif"}, "custom image extensions")
+ verifyStringSlice(t, CustomBinaryExtensions(), []string{".custom", ".mybin"}, "custom binary extensions")
- customLangs := GetCustomLanguages()
- expectedLangs := map[string]string{
+ expectedLangs := map[string]string{
+ ".zig": "zig",
+ ".v": "vlang",
+ }
+ verifyStringMap(t, CustomLanguages(), expectedLangs, "custom languages")
+
+ verifyStringSliceLength(t, DisabledImageExtensions(), []string{".gif", ".bmp"}, "disabled image extensions")
+ verifyStringSliceLength(t, DisabledBinaryExtensions(), []string{".exe", ".dll"}, "disabled binary extensions")
+ verifyStringSliceLength(t, DisabledLanguageExtensions(), []string{".rb", ".pl"}, "disabled language extensions")
+}
+
+// setValidConfiguration sets valid configuration for validation tests.
+func setValidConfiguration() {
+ viper.Set(shared.ConfigKeyFileTypesCustomImageExtensions, []string{".webp", ".avif"})
+ viper.Set(shared.ConfigKeyFileTypesCustomBinaryExtensions, []string{shared.TestExtensionCustom})
+ viper.Set(
+ shared.ConfigKeyFileTypesCustomLanguages, map[string]string{
".zig": "zig",
".v": "vlang",
- }
- if len(customLangs) != len(expectedLangs) {
- t.Errorf("Expected %d custom languages, got %d", len(expectedLangs), len(customLangs))
- }
- for ext, lang := range expectedLangs {
- if customLangs[ext] != lang {
- t.Errorf("Expected custom language %s -> %s, got %s", ext, lang, customLangs[ext])
- }
- }
+ },
+ )
+}
- disabledImages := GetDisabledImageExtensions()
- expectedDisabledImages := []string{".gif", ".bmp"}
- if len(disabledImages) != len(expectedDisabledImages) {
- t.Errorf("Expected %d disabled image extensions, got %d", len(expectedDisabledImages), len(disabledImages))
- }
+// testInvalidImageExtensions tests validation failure with invalid image extensions.
+func testInvalidImageExtensions(t *testing.T) {
+ t.Helper()
- disabledBinary := GetDisabledBinaryExtensions()
- expectedDisabledBinary := []string{".exe", ".dll"}
- if len(disabledBinary) != len(expectedDisabledBinary) {
- t.Errorf("Expected %d disabled binary extensions, got %d", len(expectedDisabledBinary), len(disabledBinary))
- }
+ viper.Reset()
+ SetDefaultConfig()
+ viper.Set(shared.ConfigKeyFileTypesCustomImageExtensions, []string{"", "webp"}) // Empty and missing dot
- disabledLangs := GetDisabledLanguageExtensions()
- expectedDisabledLangs := []string{".rb", ".pl"}
- if len(disabledLangs) != len(expectedDisabledLangs) {
- t.Errorf("Expected %d disabled language extensions, got %d", len(expectedDisabledLangs), len(disabledLangs))
- }
- })
+ err := ValidateConfig()
+ if err == nil {
+ t.Error("Expected validation to fail with invalid custom image extensions")
+ }
+}
- // Test validation
- t.Run("ValidationSuccess", func(t *testing.T) {
- viper.Reset()
- setDefaultConfig()
+// testInvalidBinaryExtensions tests validation failure with invalid binary extensions.
+func testInvalidBinaryExtensions(t *testing.T) {
+ t.Helper()
- // Set valid configuration
- viper.Set("fileTypes.customImageExtensions", []string{".webp", ".avif"})
- viper.Set("fileTypes.customBinaryExtensions", []string{".custom"})
- viper.Set("fileTypes.customLanguages", map[string]string{
- ".zig": "zig",
- ".v": "vlang",
- })
+ viper.Reset()
+ SetDefaultConfig()
+ viper.Set(shared.ConfigKeyFileTypesCustomBinaryExtensions, []string{"custom"}) // Missing dot
- err := ValidateConfig()
- if err != nil {
- t.Errorf("Expected validation to pass with valid config, got error: %v", err)
- }
- })
+ err := ValidateConfig()
+ if err == nil {
+ t.Error("Expected validation to fail with invalid custom binary extensions")
+ }
+}
- t.Run("ValidationFailure", func(t *testing.T) {
- // Test invalid custom image extensions
- viper.Reset()
- setDefaultConfig()
- viper.Set("fileTypes.customImageExtensions", []string{"", "webp"}) // Empty and missing dot
+// testInvalidCustomLanguages tests validation failure with invalid custom languages.
+func testInvalidCustomLanguages(t *testing.T) {
+ t.Helper()
- err := ValidateConfig()
- if err == nil {
- t.Error("Expected validation to fail with invalid custom image extensions")
- }
-
- // Test invalid custom binary extensions
- viper.Reset()
- setDefaultConfig()
- viper.Set("fileTypes.customBinaryExtensions", []string{"custom"}) // Missing dot
-
- err = ValidateConfig()
- if err == nil {
- t.Error("Expected validation to fail with invalid custom binary extensions")
- }
-
- // Test invalid custom languages
- viper.Reset()
- setDefaultConfig()
- viper.Set("fileTypes.customLanguages", map[string]string{
+ viper.Reset()
+ SetDefaultConfig()
+ viper.Set(
+ shared.ConfigKeyFileTypesCustomLanguages, map[string]string{
"zig": "zig", // Missing dot in extension
".v": "", // Empty language
- })
+ },
+ )
- err = ValidateConfig()
- if err == nil {
- t.Error("Expected validation to fail with invalid custom languages")
- }
- })
+ err := ValidateConfig()
+ if err == nil {
+ t.Error("Expected validation to fail with invalid custom languages")
+ }
+}
+
+// verifyEmptySlice verifies that a slice is empty.
+func verifyEmptySlice(t *testing.T, slice []string, name string) {
+ t.Helper()
+
+ if len(slice) != 0 {
+ t.Errorf("Expected %s to be empty by default", name)
+ }
+}
+
+// verifyEmptyMap verifies that a map is empty.
+func verifyEmptyMap(t *testing.T, m map[string]string, name string) {
+ t.Helper()
+
+ if len(m) != 0 {
+ t.Errorf("Expected %s to be empty by default", name)
+ }
+}
+
+// verifyStringSlice verifies that a string slice matches expected values.
+func verifyStringSlice(t *testing.T, actual, expected []string, name string) {
+ t.Helper()
+
+ if len(actual) != len(expected) {
+ t.Errorf(shared.TestFmtExpectedCount, len(expected), name, len(actual))
+
+ return
+ }
+ for i, ext := range expected {
+ if actual[i] != ext {
+ t.Errorf("Expected %s %s, got %s", name, ext, actual[i])
+ }
+ }
+}
+
+// verifyStringMap verifies that a string map matches expected values.
+func verifyStringMap(t *testing.T, actual, expected map[string]string, name string) {
+ t.Helper()
+
+ if len(actual) != len(expected) {
+ t.Errorf(shared.TestFmtExpectedCount, len(expected), name, len(actual))
+
+ return
+ }
+ for ext, lang := range expected {
+ if actual[ext] != lang {
+ t.Errorf("Expected %s %s -> %s, got %s", name, ext, lang, actual[ext])
+ }
+ }
+}
+
+// verifyStringSliceLength verifies that a string slice has the expected length.
+func verifyStringSliceLength(t *testing.T, actual, expected []string, name string) {
+ t.Helper()
+
+ if len(actual) != len(expected) {
+ t.Errorf(shared.TestFmtExpectedCount, len(expected), name, len(actual))
+ }
}
diff --git a/config/constants.go b/config/constants.go
deleted file mode 100644
index 1a2ba75..0000000
--- a/config/constants.go
+++ /dev/null
@@ -1,61 +0,0 @@
-package config
-
-const (
- // DefaultFileSizeLimit is the default maximum file size (5MB).
- DefaultFileSizeLimit = 5242880
- // MinFileSizeLimit is the minimum allowed file size limit (1KB).
- MinFileSizeLimit = 1024
- // MaxFileSizeLimit is the maximum allowed file size limit (100MB).
- MaxFileSizeLimit = 104857600
-
- // Resource Limit Constants
-
- // DefaultMaxFiles is the default maximum number of files to process.
- DefaultMaxFiles = 10000
- // MinMaxFiles is the minimum allowed file count limit.
- MinMaxFiles = 1
- // MaxMaxFiles is the maximum allowed file count limit.
- MaxMaxFiles = 1000000
-
- // DefaultMaxTotalSize is the default maximum total size of files (1GB).
- DefaultMaxTotalSize = 1073741824
- // MinMaxTotalSize is the minimum allowed total size limit (1MB).
- MinMaxTotalSize = 1048576
- // MaxMaxTotalSize is the maximum allowed total size limit (100GB).
- MaxMaxTotalSize = 107374182400
-
- // DefaultFileProcessingTimeoutSec is the default timeout for individual file processing (30 seconds).
- DefaultFileProcessingTimeoutSec = 30
- // MinFileProcessingTimeoutSec is the minimum allowed file processing timeout (1 second).
- MinFileProcessingTimeoutSec = 1
- // MaxFileProcessingTimeoutSec is the maximum allowed file processing timeout (300 seconds).
- MaxFileProcessingTimeoutSec = 300
-
- // DefaultOverallTimeoutSec is the default timeout for overall processing (3600 seconds = 1 hour).
- DefaultOverallTimeoutSec = 3600
- // MinOverallTimeoutSec is the minimum allowed overall timeout (10 seconds).
- MinOverallTimeoutSec = 10
- // MaxOverallTimeoutSec is the maximum allowed overall timeout (86400 seconds = 24 hours).
- MaxOverallTimeoutSec = 86400
-
- // DefaultMaxConcurrentReads is the default maximum concurrent file reading operations.
- DefaultMaxConcurrentReads = 10
- // MinMaxConcurrentReads is the minimum allowed concurrent reads.
- MinMaxConcurrentReads = 1
- // MaxMaxConcurrentReads is the maximum allowed concurrent reads.
- MaxMaxConcurrentReads = 100
-
- // DefaultRateLimitFilesPerSec is the default rate limit for file processing (0 = disabled).
- DefaultRateLimitFilesPerSec = 0
- // MinRateLimitFilesPerSec is the minimum rate limit.
- MinRateLimitFilesPerSec = 0
- // MaxRateLimitFilesPerSec is the maximum rate limit.
- MaxRateLimitFilesPerSec = 10000
-
- // DefaultHardMemoryLimitMB is the default hard memory limit (512MB).
- DefaultHardMemoryLimitMB = 512
- // MinHardMemoryLimitMB is the minimum hard memory limit (64MB).
- MinHardMemoryLimitMB = 64
- // MaxHardMemoryLimitMB is the maximum hard memory limit (8192MB = 8GB).
- MaxHardMemoryLimitMB = 8192
-)
diff --git a/config/getters.go b/config/getters.go
index b178144..30cfedc 100644
--- a/config/getters.go
+++ b/config/getters.go
@@ -1,157 +1,331 @@
+// Package config handles application configuration management.
package config
import (
"strings"
"github.com/spf13/viper"
+
+ "github.com/ivuorinen/gibidify/shared"
)
-// GetFileSizeLimit returns the file size limit from configuration.
-func GetFileSizeLimit() int64 {
- return viper.GetInt64("fileSizeLimit")
+// FileSizeLimit returns the file size limit from configuration.
+// Default: ConfigFileSizeLimitDefault (5MB).
+func FileSizeLimit() int64 {
+ return viper.GetInt64(shared.ConfigKeyFileSizeLimit)
}
-// GetIgnoredDirectories returns the list of directories to ignore.
-func GetIgnoredDirectories() []string {
- return viper.GetStringSlice("ignoreDirectories")
+// IgnoredDirectories returns the list of directories to ignore.
+// Default: ConfigIgnoredDirectoriesDefault.
+func IgnoredDirectories() []string {
+ return viper.GetStringSlice(shared.ConfigKeyIgnoreDirectories)
}
-// GetMaxConcurrency returns the maximum concurrency level.
-func GetMaxConcurrency() int {
- return viper.GetInt("maxConcurrency")
+// MaxConcurrency returns the maximum concurrency level.
+// Returns 0 if not set (caller should determine appropriate default).
+func MaxConcurrency() int {
+ return viper.GetInt(shared.ConfigKeyMaxConcurrency)
}
-// GetSupportedFormats returns the list of supported output formats.
-func GetSupportedFormats() []string {
- return viper.GetStringSlice("supportedFormats")
+// SupportedFormats returns the list of supported output formats.
+// Returns empty slice if not set.
+func SupportedFormats() []string {
+ return viper.GetStringSlice(shared.ConfigKeySupportedFormats)
}
-// GetFilePatterns returns the list of file patterns.
-func GetFilePatterns() []string {
- return viper.GetStringSlice("filePatterns")
+// FilePatterns returns the list of file patterns.
+// Returns empty slice if not set.
+func FilePatterns() []string {
+ return viper.GetStringSlice(shared.ConfigKeyFilePatterns)
}
// IsValidFormat checks if the given format is valid.
func IsValidFormat(format string) bool {
format = strings.ToLower(strings.TrimSpace(format))
supportedFormats := map[string]bool{
- "json": true,
- "yaml": true,
- "markdown": true,
+ shared.FormatJSON: true,
+ shared.FormatYAML: true,
+ shared.FormatMarkdown: true,
}
+
return supportedFormats[format]
}
-// GetFileTypesEnabled returns whether file types are enabled.
-func GetFileTypesEnabled() bool {
- return viper.GetBool("fileTypes.enabled")
+// FileTypesEnabled returns whether file types are enabled.
+// Default: ConfigFileTypesEnabledDefault (true).
+func FileTypesEnabled() bool {
+ return viper.GetBool(shared.ConfigKeyFileTypesEnabled)
}
-// GetCustomImageExtensions returns custom image extensions.
-func GetCustomImageExtensions() []string {
- return viper.GetStringSlice("fileTypes.customImageExtensions")
+// CustomImageExtensions returns custom image extensions.
+// Default: ConfigCustomImageExtensionsDefault (empty).
+func CustomImageExtensions() []string {
+ return viper.GetStringSlice(shared.ConfigKeyFileTypesCustomImageExtensions)
}
-// GetCustomBinaryExtensions returns custom binary extensions.
-func GetCustomBinaryExtensions() []string {
- return viper.GetStringSlice("fileTypes.customBinaryExtensions")
+// CustomBinaryExtensions returns custom binary extensions.
+// Default: ConfigCustomBinaryExtensionsDefault (empty).
+func CustomBinaryExtensions() []string {
+ return viper.GetStringSlice(shared.ConfigKeyFileTypesCustomBinaryExtensions)
}
-// GetCustomLanguages returns custom language mappings.
-func GetCustomLanguages() map[string]string {
- return viper.GetStringMapString("fileTypes.customLanguages")
+// CustomLanguages returns custom language mappings.
+// Default: ConfigCustomLanguagesDefault (empty).
+func CustomLanguages() map[string]string {
+ return viper.GetStringMapString(shared.ConfigKeyFileTypesCustomLanguages)
}
-// GetDisabledImageExtensions returns disabled image extensions.
-func GetDisabledImageExtensions() []string {
- return viper.GetStringSlice("fileTypes.disabledImageExtensions")
+// DisabledImageExtensions returns disabled image extensions.
+// Default: ConfigDisabledImageExtensionsDefault (empty).
+func DisabledImageExtensions() []string {
+ return viper.GetStringSlice(shared.ConfigKeyFileTypesDisabledImageExtensions)
}
-// GetDisabledBinaryExtensions returns disabled binary extensions.
-func GetDisabledBinaryExtensions() []string {
- return viper.GetStringSlice("fileTypes.disabledBinaryExtensions")
+// DisabledBinaryExtensions returns disabled binary extensions.
+// Default: ConfigDisabledBinaryExtensionsDefault (empty).
+func DisabledBinaryExtensions() []string {
+ return viper.GetStringSlice(shared.ConfigKeyFileTypesDisabledBinaryExtensions)
}
-// GetDisabledLanguageExtensions returns disabled language extensions.
-func GetDisabledLanguageExtensions() []string {
- return viper.GetStringSlice("fileTypes.disabledLanguageExtensions")
+// DisabledLanguageExtensions returns disabled language extensions.
+// Default: ConfigDisabledLanguageExtensionsDefault (empty).
+func DisabledLanguageExtensions() []string {
+ return viper.GetStringSlice(shared.ConfigKeyFileTypesDisabledLanguageExts)
}
// Backpressure getters
-// GetBackpressureEnabled returns whether backpressure is enabled.
-func GetBackpressureEnabled() bool {
- return viper.GetBool("backpressure.enabled")
+// BackpressureEnabled returns whether backpressure is enabled.
+// Default: ConfigBackpressureEnabledDefault (true).
+func BackpressureEnabled() bool {
+ return viper.GetBool(shared.ConfigKeyBackpressureEnabled)
}
-// GetMaxPendingFiles returns the maximum pending files.
-func GetMaxPendingFiles() int {
- return viper.GetInt("backpressure.maxPendingFiles")
+// MaxPendingFiles returns the maximum pending files.
+// Default: ConfigMaxPendingFilesDefault (1000).
+func MaxPendingFiles() int {
+ return viper.GetInt(shared.ConfigKeyBackpressureMaxPendingFiles)
}
-// GetMaxPendingWrites returns the maximum pending writes.
-func GetMaxPendingWrites() int {
- return viper.GetInt("backpressure.maxPendingWrites")
+// MaxPendingWrites returns the maximum pending writes.
+// Default: ConfigMaxPendingWritesDefault (100).
+func MaxPendingWrites() int {
+ return viper.GetInt(shared.ConfigKeyBackpressureMaxPendingWrites)
}
-// GetMaxMemoryUsage returns the maximum memory usage.
-func GetMaxMemoryUsage() int64 {
- return viper.GetInt64("backpressure.maxMemoryUsage")
+// MaxMemoryUsage returns the maximum memory usage.
+// Default: ConfigMaxMemoryUsageDefault (100MB).
+func MaxMemoryUsage() int64 {
+ return viper.GetInt64(shared.ConfigKeyBackpressureMaxMemoryUsage)
}
-// GetMemoryCheckInterval returns the memory check interval.
-func GetMemoryCheckInterval() int {
- return viper.GetInt("backpressure.memoryCheckInterval")
+// MemoryCheckInterval returns the memory check interval.
+// Default: ConfigMemoryCheckIntervalDefault (1000 files).
+func MemoryCheckInterval() int {
+ return viper.GetInt(shared.ConfigKeyBackpressureMemoryCheckInt)
}
// Resource limits getters
-// GetResourceLimitsEnabled returns whether resource limits are enabled.
-func GetResourceLimitsEnabled() bool {
- return viper.GetBool("resourceLimits.enabled")
+// ResourceLimitsEnabled returns whether resource limits are enabled.
+// Default: ConfigResourceLimitsEnabledDefault (true).
+func ResourceLimitsEnabled() bool {
+ return viper.GetBool(shared.ConfigKeyResourceLimitsEnabled)
}
-// GetMaxFiles returns the maximum number of files.
-func GetMaxFiles() int {
- return viper.GetInt("resourceLimits.maxFiles")
+// MaxFiles returns the maximum number of files.
+// Default: ConfigMaxFilesDefault (10000).
+func MaxFiles() int {
+ return viper.GetInt(shared.ConfigKeyResourceLimitsMaxFiles)
}
-// GetMaxTotalSize returns the maximum total size.
-func GetMaxTotalSize() int64 {
- return viper.GetInt64("resourceLimits.maxTotalSize")
+// MaxTotalSize returns the maximum total size.
+// Default: ConfigMaxTotalSizeDefault (1GB).
+func MaxTotalSize() int64 {
+ return viper.GetInt64(shared.ConfigKeyResourceLimitsMaxTotalSize)
}
-// GetFileProcessingTimeoutSec returns the file processing timeout in seconds.
-func GetFileProcessingTimeoutSec() int {
- return viper.GetInt("resourceLimits.fileProcessingTimeoutSec")
+// FileProcessingTimeoutSec returns the file processing timeout in seconds.
+// Default: ConfigFileProcessingTimeoutSecDefault (30 seconds).
+func FileProcessingTimeoutSec() int {
+ return viper.GetInt(shared.ConfigKeyResourceLimitsFileProcessingTO)
}
-// GetOverallTimeoutSec returns the overall timeout in seconds.
-func GetOverallTimeoutSec() int {
- return viper.GetInt("resourceLimits.overallTimeoutSec")
+// OverallTimeoutSec returns the overall timeout in seconds.
+// Default: ConfigOverallTimeoutSecDefault (3600 seconds).
+func OverallTimeoutSec() int {
+ return viper.GetInt(shared.ConfigKeyResourceLimitsOverallTO)
}
-// GetMaxConcurrentReads returns the maximum concurrent reads.
-func GetMaxConcurrentReads() int {
- return viper.GetInt("resourceLimits.maxConcurrentReads")
+// MaxConcurrentReads returns the maximum concurrent reads.
+// Default: ConfigMaxConcurrentReadsDefault (10).
+func MaxConcurrentReads() int {
+ return viper.GetInt(shared.ConfigKeyResourceLimitsMaxConcurrentReads)
}
-// GetRateLimitFilesPerSec returns the rate limit files per second.
-func GetRateLimitFilesPerSec() int {
- return viper.GetInt("resourceLimits.rateLimitFilesPerSec")
+// RateLimitFilesPerSec returns the rate limit files per second.
+// Default: ConfigRateLimitFilesPerSecDefault (0 = disabled).
+func RateLimitFilesPerSec() int {
+ return viper.GetInt(shared.ConfigKeyResourceLimitsRateLimitFilesPerSec)
}
-// GetHardMemoryLimitMB returns the hard memory limit in MB.
-func GetHardMemoryLimitMB() int {
- return viper.GetInt("resourceLimits.hardMemoryLimitMB")
+// HardMemoryLimitMB returns the hard memory limit in MB.
+// Default: ConfigHardMemoryLimitMBDefault (512MB).
+func HardMemoryLimitMB() int {
+ return viper.GetInt(shared.ConfigKeyResourceLimitsHardMemoryLimitMB)
}
-// GetEnableGracefulDegradation returns whether graceful degradation is enabled.
-func GetEnableGracefulDegradation() bool {
- return viper.GetBool("resourceLimits.enableGracefulDegradation")
+// EnableGracefulDegradation returns whether graceful degradation is enabled.
+// Default: ConfigEnableGracefulDegradationDefault (true).
+func EnableGracefulDegradation() bool {
+ return viper.GetBool(shared.ConfigKeyResourceLimitsEnableGracefulDeg)
}
-// GetEnableResourceMonitoring returns whether resource monitoring is enabled.
-func GetEnableResourceMonitoring() bool {
- return viper.GetBool("resourceLimits.enableResourceMonitoring")
+// EnableResourceMonitoring returns whether resource monitoring is enabled.
+// Default: ConfigEnableResourceMonitoringDefault (true).
+func EnableResourceMonitoring() bool {
+ return viper.GetBool(shared.ConfigKeyResourceLimitsEnableMonitoring)
+}
+
+// Template system getters
+
+// OutputTemplate returns the selected output template name.
+// Default: ConfigOutputTemplateDefault (empty string).
+func OutputTemplate() string {
+ return viper.GetString(shared.ConfigKeyOutputTemplate)
+}
+
+// metadataBool is a helper for metadata boolean configuration values.
+// All metadata flags default to false.
+func metadataBool(key string) bool {
+ return viper.GetBool("output.metadata." + key)
+}
+
+// TemplateMetadataIncludeStats returns whether to include stats in metadata.
+func TemplateMetadataIncludeStats() bool {
+ return metadataBool("includeStats")
+}
+
+// TemplateMetadataIncludeTimestamp returns whether to include timestamp in metadata.
+func TemplateMetadataIncludeTimestamp() bool {
+ return metadataBool("includeTimestamp")
+}
+
+// TemplateMetadataIncludeFileCount returns whether to include file count in metadata.
+func TemplateMetadataIncludeFileCount() bool {
+ return metadataBool("includeFileCount")
+}
+
+// TemplateMetadataIncludeSourcePath returns whether to include source path in metadata.
+func TemplateMetadataIncludeSourcePath() bool {
+ return metadataBool("includeSourcePath")
+}
+
+// TemplateMetadataIncludeFileTypes returns whether to include file types in metadata.
+func TemplateMetadataIncludeFileTypes() bool {
+ return metadataBool("includeFileTypes")
+}
+
+// TemplateMetadataIncludeProcessingTime returns whether to include processing time in metadata.
+func TemplateMetadataIncludeProcessingTime() bool {
+ return metadataBool("includeProcessingTime")
+}
+
+// TemplateMetadataIncludeTotalSize returns whether to include total size in metadata.
+func TemplateMetadataIncludeTotalSize() bool {
+ return metadataBool("includeTotalSize")
+}
+
+// TemplateMetadataIncludeMetrics returns whether to include metrics in metadata.
+func TemplateMetadataIncludeMetrics() bool {
+ return metadataBool("includeMetrics")
+}
+
+// markdownBool is a helper for markdown boolean configuration values.
+// All markdown flags default to false.
+func markdownBool(key string) bool {
+ return viper.GetBool("output.markdown." + key)
+}
+
+// TemplateMarkdownUseCodeBlocks returns whether to use code blocks in markdown.
+func TemplateMarkdownUseCodeBlocks() bool {
+ return markdownBool("useCodeBlocks")
+}
+
+// TemplateMarkdownIncludeLanguage returns whether to include language in code blocks.
+func TemplateMarkdownIncludeLanguage() bool {
+ return markdownBool("includeLanguage")
+}
+
+// TemplateMarkdownHeaderLevel returns the header level for file sections.
+// Default: ConfigMarkdownHeaderLevelDefault (0).
+func TemplateMarkdownHeaderLevel() int {
+ return viper.GetInt(shared.ConfigKeyOutputMarkdownHeaderLevel)
+}
+
+// TemplateMarkdownTableOfContents returns whether to include table of contents.
+func TemplateMarkdownTableOfContents() bool {
+ return markdownBool("tableOfContents")
+}
+
+// TemplateMarkdownUseCollapsible returns whether to use collapsible sections.
+func TemplateMarkdownUseCollapsible() bool {
+ return markdownBool("useCollapsible")
+}
+
+// TemplateMarkdownSyntaxHighlighting returns whether to enable syntax highlighting.
+func TemplateMarkdownSyntaxHighlighting() bool {
+ return markdownBool("syntaxHighlighting")
+}
+
+// TemplateMarkdownLineNumbers returns whether to include line numbers.
+func TemplateMarkdownLineNumbers() bool {
+ return markdownBool("lineNumbers")
+}
+
+// TemplateMarkdownFoldLongFiles returns whether to fold long files.
+func TemplateMarkdownFoldLongFiles() bool {
+ return markdownBool("foldLongFiles")
+}
+
+// TemplateMarkdownMaxLineLength returns the maximum line length.
+// Default: ConfigMarkdownMaxLineLengthDefault (0 = unlimited).
+func TemplateMarkdownMaxLineLength() int {
+ return viper.GetInt(shared.ConfigKeyOutputMarkdownMaxLineLen)
+}
+
+// TemplateCustomCSS returns custom CSS for markdown output.
+// Default: ConfigMarkdownCustomCSSDefault (empty string).
+func TemplateCustomCSS() string {
+ return viper.GetString(shared.ConfigKeyOutputMarkdownCustomCSS)
+}
+
+// TemplateCustomHeader returns custom header template.
+// Default: ConfigCustomHeaderDefault (empty string).
+func TemplateCustomHeader() string {
+ return viper.GetString(shared.ConfigKeyOutputCustomHeader)
+}
+
+// TemplateCustomFooter returns custom footer template.
+// Default: ConfigCustomFooterDefault (empty string).
+func TemplateCustomFooter() string {
+ return viper.GetString(shared.ConfigKeyOutputCustomFooter)
+}
+
+// TemplateCustomFileHeader returns custom file header template.
+// Default: ConfigCustomFileHeaderDefault (empty string).
+func TemplateCustomFileHeader() string {
+ return viper.GetString(shared.ConfigKeyOutputCustomFileHeader)
+}
+
+// TemplateCustomFileFooter returns custom file footer template.
+// Default: ConfigCustomFileFooterDefault (empty string).
+func TemplateCustomFileFooter() string {
+ return viper.GetString(shared.ConfigKeyOutputCustomFileFooter)
+}
+
+// TemplateVariables returns custom template variables.
+// Default: ConfigTemplateVariablesDefault (empty map).
+func TemplateVariables() map[string]string {
+ return viper.GetStringMapString(shared.ConfigKeyOutputVariables)
}
diff --git a/config/getters_test.go b/config/getters_test.go
new file mode 100644
index 0000000..708fdfd
--- /dev/null
+++ b/config/getters_test.go
@@ -0,0 +1,492 @@
+package config_test
+
+import (
+ "reflect"
+ "testing"
+
+ "github.com/ivuorinen/gibidify/config"
+ "github.com/ivuorinen/gibidify/shared"
+ "github.com/ivuorinen/gibidify/testutil"
+)
+
+// TestConfigGetters tests all configuration getter functions with comprehensive test coverage.
+func TestConfigGetters(t *testing.T) {
+ tests := []struct {
+ name string
+ configKey string
+ configValue any
+ getterFunc func() any
+ expectedResult any
+ }{
+ // Basic configuration getters
+ {
+ name: "GetFileSizeLimit",
+ configKey: "fileSizeLimit",
+ configValue: int64(1048576),
+ getterFunc: func() any { return config.FileSizeLimit() },
+ expectedResult: int64(1048576),
+ },
+ {
+ name: "GetIgnoredDirectories",
+ configKey: "ignoreDirectories",
+ configValue: []string{"node_modules", ".git", "dist"},
+ getterFunc: func() any { return config.IgnoredDirectories() },
+ expectedResult: []string{"node_modules", ".git", "dist"},
+ },
+ {
+ name: "GetMaxConcurrency",
+ configKey: "maxConcurrency",
+ configValue: 8,
+ getterFunc: func() any { return config.MaxConcurrency() },
+ expectedResult: 8,
+ },
+ {
+ name: "GetSupportedFormats",
+ configKey: "supportedFormats",
+ configValue: []string{"json", "yaml", "markdown"},
+ getterFunc: func() any { return config.SupportedFormats() },
+ expectedResult: []string{"json", "yaml", "markdown"},
+ },
+ {
+ name: "GetFilePatterns",
+ configKey: "filePatterns",
+ configValue: []string{"*.go", "*.js", "*.py"},
+ getterFunc: func() any { return config.FilePatterns() },
+ expectedResult: []string{"*.go", "*.js", "*.py"},
+ },
+
+ // File type configuration getters
+ {
+ name: "GetFileTypesEnabled",
+ configKey: "fileTypes.enabled",
+ configValue: true,
+ getterFunc: func() any { return config.FileTypesEnabled() },
+ expectedResult: true,
+ },
+ {
+ name: "GetCustomImageExtensions",
+ configKey: "fileTypes.customImageExtensions",
+ configValue: []string{".webp", ".avif"},
+ getterFunc: func() any { return config.CustomImageExtensions() },
+ expectedResult: []string{".webp", ".avif"},
+ },
+ {
+ name: "GetCustomBinaryExtensions",
+ configKey: "fileTypes.customBinaryExtensions",
+ configValue: []string{".custom", ".bin"},
+ getterFunc: func() any { return config.CustomBinaryExtensions() },
+ expectedResult: []string{".custom", ".bin"},
+ },
+ {
+ name: "GetDisabledImageExtensions",
+ configKey: "fileTypes.disabledImageExtensions",
+ configValue: []string{".gif", ".bmp"},
+ getterFunc: func() any { return config.DisabledImageExtensions() },
+ expectedResult: []string{".gif", ".bmp"},
+ },
+ {
+ name: "GetDisabledBinaryExtensions",
+ configKey: "fileTypes.disabledBinaryExtensions",
+ configValue: []string{".exe", ".dll"},
+ getterFunc: func() any { return config.DisabledBinaryExtensions() },
+ expectedResult: []string{".exe", ".dll"},
+ },
+ {
+ name: "GetDisabledLanguageExtensions",
+ configKey: "fileTypes.disabledLanguageExtensions",
+ configValue: []string{".sh", ".bat"},
+ getterFunc: func() any { return config.DisabledLanguageExtensions() },
+ expectedResult: []string{".sh", ".bat"},
+ },
+
+ // Backpressure configuration getters
+ {
+ name: "GetBackpressureEnabled",
+ configKey: "backpressure.enabled",
+ configValue: true,
+ getterFunc: func() any { return config.BackpressureEnabled() },
+ expectedResult: true,
+ },
+ {
+ name: "GetMaxPendingFiles",
+ configKey: "backpressure.maxPendingFiles",
+ configValue: 1000,
+ getterFunc: func() any { return config.MaxPendingFiles() },
+ expectedResult: 1000,
+ },
+ {
+ name: "GetMaxPendingWrites",
+ configKey: "backpressure.maxPendingWrites",
+ configValue: 100,
+ getterFunc: func() any { return config.MaxPendingWrites() },
+ expectedResult: 100,
+ },
+ {
+ name: "GetMaxMemoryUsage",
+ configKey: "backpressure.maxMemoryUsage",
+ configValue: int64(104857600),
+ getterFunc: func() any { return config.MaxMemoryUsage() },
+ expectedResult: int64(104857600),
+ },
+ {
+ name: "GetMemoryCheckInterval",
+ configKey: "backpressure.memoryCheckInterval",
+ configValue: 500,
+ getterFunc: func() any { return config.MemoryCheckInterval() },
+ expectedResult: 500,
+ },
+
+ // Resource limits configuration getters
+ {
+ name: "GetResourceLimitsEnabled",
+ configKey: "resourceLimits.enabled",
+ configValue: true,
+ getterFunc: func() any { return config.ResourceLimitsEnabled() },
+ expectedResult: true,
+ },
+ {
+ name: "GetMaxFiles",
+ configKey: "resourceLimits.maxFiles",
+ configValue: 5000,
+ getterFunc: func() any { return config.MaxFiles() },
+ expectedResult: 5000,
+ },
+ {
+ name: "GetMaxTotalSize",
+ configKey: "resourceLimits.maxTotalSize",
+ configValue: int64(1073741824),
+ getterFunc: func() any { return config.MaxTotalSize() },
+ expectedResult: int64(1073741824),
+ },
+ {
+ name: "GetFileProcessingTimeoutSec",
+ configKey: "resourceLimits.fileProcessingTimeoutSec",
+ configValue: 30,
+ getterFunc: func() any { return config.FileProcessingTimeoutSec() },
+ expectedResult: 30,
+ },
+ {
+ name: "GetOverallTimeoutSec",
+ configKey: "resourceLimits.overallTimeoutSec",
+ configValue: 1800,
+ getterFunc: func() any { return config.OverallTimeoutSec() },
+ expectedResult: 1800,
+ },
+ {
+ name: "GetMaxConcurrentReads",
+ configKey: "resourceLimits.maxConcurrentReads",
+ configValue: 10,
+ getterFunc: func() any { return config.MaxConcurrentReads() },
+ expectedResult: 10,
+ },
+ {
+ name: "GetRateLimitFilesPerSec",
+ configKey: "resourceLimits.rateLimitFilesPerSec",
+ configValue: 100,
+ getterFunc: func() any { return config.RateLimitFilesPerSec() },
+ expectedResult: 100,
+ },
+ {
+ name: "GetHardMemoryLimitMB",
+ configKey: "resourceLimits.hardMemoryLimitMB",
+ configValue: 512,
+ getterFunc: func() any { return config.HardMemoryLimitMB() },
+ expectedResult: 512,
+ },
+ {
+ name: "GetEnableGracefulDegradation",
+ configKey: "resourceLimits.enableGracefulDegradation",
+ configValue: true,
+ getterFunc: func() any { return config.EnableGracefulDegradation() },
+ expectedResult: true,
+ },
+ {
+ name: "GetEnableResourceMonitoring",
+ configKey: "resourceLimits.enableResourceMonitoring",
+ configValue: true,
+ getterFunc: func() any { return config.EnableResourceMonitoring() },
+ expectedResult: true,
+ },
+
+ // Template system configuration getters
+ {
+ name: "GetOutputTemplate",
+ configKey: "output.template",
+ configValue: "detailed",
+ getterFunc: func() any { return config.OutputTemplate() },
+ expectedResult: "detailed",
+ },
+ {
+ name: "GetTemplateMetadataIncludeStats",
+ configKey: "output.metadata.includeStats",
+ configValue: true,
+ getterFunc: func() any { return config.TemplateMetadataIncludeStats() },
+ expectedResult: true,
+ },
+ {
+ name: "GetTemplateMetadataIncludeTimestamp",
+ configKey: "output.metadata.includeTimestamp",
+ configValue: false,
+ getterFunc: func() any { return config.TemplateMetadataIncludeTimestamp() },
+ expectedResult: false,
+ },
+ {
+ name: "GetTemplateMetadataIncludeFileCount",
+ configKey: "output.metadata.includeFileCount",
+ configValue: true,
+ getterFunc: func() any { return config.TemplateMetadataIncludeFileCount() },
+ expectedResult: true,
+ },
+ {
+ name: "GetTemplateMetadataIncludeSourcePath",
+ configKey: "output.metadata.includeSourcePath",
+ configValue: false,
+ getterFunc: func() any { return config.TemplateMetadataIncludeSourcePath() },
+ expectedResult: false,
+ },
+ {
+ name: "GetTemplateMetadataIncludeFileTypes",
+ configKey: "output.metadata.includeFileTypes",
+ configValue: true,
+ getterFunc: func() any { return config.TemplateMetadataIncludeFileTypes() },
+ expectedResult: true,
+ },
+ {
+ name: "GetTemplateMetadataIncludeProcessingTime",
+ configKey: "output.metadata.includeProcessingTime",
+ configValue: false,
+ getterFunc: func() any { return config.TemplateMetadataIncludeProcessingTime() },
+ expectedResult: false,
+ },
+ {
+ name: "GetTemplateMetadataIncludeTotalSize",
+ configKey: "output.metadata.includeTotalSize",
+ configValue: true,
+ getterFunc: func() any { return config.TemplateMetadataIncludeTotalSize() },
+ expectedResult: true,
+ },
+ {
+ name: "GetTemplateMetadataIncludeMetrics",
+ configKey: "output.metadata.includeMetrics",
+ configValue: false,
+ getterFunc: func() any { return config.TemplateMetadataIncludeMetrics() },
+ expectedResult: false,
+ },
+
+ // Markdown template configuration getters
+ {
+ name: "GetTemplateMarkdownUseCodeBlocks",
+ configKey: "output.markdown.useCodeBlocks",
+ configValue: true,
+ getterFunc: func() any { return config.TemplateMarkdownUseCodeBlocks() },
+ expectedResult: true,
+ },
+ {
+ name: "GetTemplateMarkdownIncludeLanguage",
+ configKey: "output.markdown.includeLanguage",
+ configValue: false,
+ getterFunc: func() any { return config.TemplateMarkdownIncludeLanguage() },
+ expectedResult: false,
+ },
+ {
+ name: "GetTemplateMarkdownHeaderLevel",
+ configKey: "output.markdown.headerLevel",
+ configValue: 3,
+ getterFunc: func() any { return config.TemplateMarkdownHeaderLevel() },
+ expectedResult: 3,
+ },
+ {
+ name: "GetTemplateMarkdownTableOfContents",
+ configKey: "output.markdown.tableOfContents",
+ configValue: true,
+ getterFunc: func() any { return config.TemplateMarkdownTableOfContents() },
+ expectedResult: true,
+ },
+ {
+ name: "GetTemplateMarkdownUseCollapsible",
+ configKey: "output.markdown.useCollapsible",
+ configValue: false,
+ getterFunc: func() any { return config.TemplateMarkdownUseCollapsible() },
+ expectedResult: false,
+ },
+ {
+ name: "GetTemplateMarkdownSyntaxHighlighting",
+ configKey: "output.markdown.syntaxHighlighting",
+ configValue: true,
+ getterFunc: func() any { return config.TemplateMarkdownSyntaxHighlighting() },
+ expectedResult: true,
+ },
+ {
+ name: "GetTemplateMarkdownLineNumbers",
+ configKey: "output.markdown.lineNumbers",
+ configValue: false,
+ getterFunc: func() any { return config.TemplateMarkdownLineNumbers() },
+ expectedResult: false,
+ },
+ {
+ name: "GetTemplateMarkdownFoldLongFiles",
+ configKey: "output.markdown.foldLongFiles",
+ configValue: true,
+ getterFunc: func() any { return config.TemplateMarkdownFoldLongFiles() },
+ expectedResult: true,
+ },
+ {
+ name: "GetTemplateMarkdownMaxLineLength",
+ configKey: "output.markdown.maxLineLength",
+ configValue: 120,
+ getterFunc: func() any { return config.TemplateMarkdownMaxLineLength() },
+ expectedResult: 120,
+ },
+ {
+ name: "GetTemplateCustomCSS",
+ configKey: "output.markdown.customCSS",
+ configValue: "body { color: blue; }",
+ getterFunc: func() any { return config.TemplateCustomCSS() },
+ expectedResult: "body { color: blue; }",
+ },
+
+ // Custom template configuration getters
+ {
+ name: "GetTemplateCustomHeader",
+ configKey: "output.custom.header",
+ configValue: "# Custom Header\n",
+ getterFunc: func() any { return config.TemplateCustomHeader() },
+ expectedResult: "# Custom Header\n",
+ },
+ {
+ name: "GetTemplateCustomFooter",
+ configKey: "output.custom.footer",
+ configValue: "---\nFooter content",
+ getterFunc: func() any { return config.TemplateCustomFooter() },
+ expectedResult: "---\nFooter content",
+ },
+ {
+ name: "GetTemplateCustomFileHeader",
+ configKey: "output.custom.fileHeader",
+ configValue: "## File: {{ .Path }}",
+ getterFunc: func() any { return config.TemplateCustomFileHeader() },
+ expectedResult: "## File: {{ .Path }}",
+ },
+ {
+ name: "GetTemplateCustomFileFooter",
+ configKey: "output.custom.fileFooter",
+ configValue: "---",
+ getterFunc: func() any { return config.TemplateCustomFileFooter() },
+ expectedResult: "---",
+ },
+
+ // Custom languages map getter
+ {
+ name: "GetCustomLanguages",
+ configKey: "fileTypes.customLanguages",
+ configValue: map[string]string{".vue": "vue", ".svelte": "svelte"},
+ getterFunc: func() any { return config.CustomLanguages() },
+ expectedResult: map[string]string{".vue": "vue", ".svelte": "svelte"},
+ },
+
+ // Template variables map getter
+ {
+ name: "GetTemplateVariables",
+ configKey: "output.variables",
+ configValue: map[string]string{"project": "gibidify", "version": "1.0"},
+ getterFunc: func() any { return config.TemplateVariables() },
+ expectedResult: map[string]string{"project": "gibidify", "version": "1.0"},
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ // Reset viper and set the specific configuration
+ testutil.SetViperKeys(t, map[string]any{
+ tt.configKey: tt.configValue,
+ })
+
+ // Call the getter function and compare results
+ result := tt.getterFunc()
+ if !reflect.DeepEqual(result, tt.expectedResult) {
+ t.Errorf("Test %s: expected %v (type %T), got %v (type %T)",
+ tt.name, tt.expectedResult, tt.expectedResult, result, result)
+ }
+ })
+ }
+}
+
+// TestConfigGettersWithDefaults tests that getters return appropriate default values
+// when configuration keys are not set.
+func TestConfigGettersWithDefaults(t *testing.T) {
+ // Reset viper to ensure clean state
+ testutil.ResetViperConfig(t, "")
+
+ // Test numeric getters with concrete default assertions
+ t.Run("numeric_getters", func(t *testing.T) {
+ assertInt64Getter(t, "FileSizeLimit", config.FileSizeLimit, shared.ConfigFileSizeLimitDefault)
+ assertIntGetter(t, "MaxConcurrency", config.MaxConcurrency, shared.ConfigMaxConcurrencyDefault)
+ assertIntGetter(t, "TemplateMarkdownHeaderLevel", config.TemplateMarkdownHeaderLevel,
+ shared.ConfigMarkdownHeaderLevelDefault)
+ assertIntGetter(t, "MaxFiles", config.MaxFiles, shared.ConfigMaxFilesDefault)
+ assertInt64Getter(t, "MaxTotalSize", config.MaxTotalSize, shared.ConfigMaxTotalSizeDefault)
+ assertIntGetter(t, "FileProcessingTimeoutSec", config.FileProcessingTimeoutSec,
+ shared.ConfigFileProcessingTimeoutSecDefault)
+ assertIntGetter(t, "OverallTimeoutSec", config.OverallTimeoutSec, shared.ConfigOverallTimeoutSecDefault)
+ assertIntGetter(t, "MaxConcurrentReads", config.MaxConcurrentReads, shared.ConfigMaxConcurrentReadsDefault)
+ assertIntGetter(t, "HardMemoryLimitMB", config.HardMemoryLimitMB, shared.ConfigHardMemoryLimitMBDefault)
+ })
+
+ // Test boolean getters with concrete default assertions
+ t.Run("boolean_getters", func(t *testing.T) {
+ assertBoolGetter(t, "FileTypesEnabled", config.FileTypesEnabled, shared.ConfigFileTypesEnabledDefault)
+ assertBoolGetter(t, "BackpressureEnabled", config.BackpressureEnabled, shared.ConfigBackpressureEnabledDefault)
+ assertBoolGetter(t, "ResourceLimitsEnabled", config.ResourceLimitsEnabled,
+ shared.ConfigResourceLimitsEnabledDefault)
+ assertBoolGetter(t, "EnableGracefulDegradation", config.EnableGracefulDegradation,
+ shared.ConfigEnableGracefulDegradationDefault)
+ assertBoolGetter(t, "TemplateMarkdownUseCodeBlocks", config.TemplateMarkdownUseCodeBlocks,
+ shared.ConfigMarkdownUseCodeBlocksDefault)
+ assertBoolGetter(t, "TemplateMarkdownTableOfContents", config.TemplateMarkdownTableOfContents,
+ shared.ConfigMarkdownTableOfContentsDefault)
+ })
+
+ // Test string getters with concrete default assertions
+ t.Run("string_getters", func(t *testing.T) {
+ assertStringGetter(t, "OutputTemplate", config.OutputTemplate, shared.ConfigOutputTemplateDefault)
+ assertStringGetter(t, "TemplateCustomCSS", config.TemplateCustomCSS, shared.ConfigMarkdownCustomCSSDefault)
+ assertStringGetter(t, "TemplateCustomHeader", config.TemplateCustomHeader, shared.ConfigCustomHeaderDefault)
+ assertStringGetter(t, "TemplateCustomFooter", config.TemplateCustomFooter, shared.ConfigCustomFooterDefault)
+ })
+}
+
+// assertInt64Getter tests an int64 getter returns the expected default value.
+func assertInt64Getter(t *testing.T, name string, getter func() int64, expected int64) {
+ t.Helper()
+ result := getter()
+ if result != expected {
+ t.Errorf("%s: expected %d, got %d", name, expected, result)
+ }
+}
+
+// assertIntGetter tests an int getter returns the expected default value.
+func assertIntGetter(t *testing.T, name string, getter func() int, expected int) {
+ t.Helper()
+ result := getter()
+ if result != expected {
+ t.Errorf("%s: expected %d, got %d", name, expected, result)
+ }
+}
+
+// assertBoolGetter tests a bool getter returns the expected default value.
+func assertBoolGetter(t *testing.T, name string, getter func() bool, expected bool) {
+ t.Helper()
+ result := getter()
+ if result != expected {
+ t.Errorf("%s: expected %v, got %v", name, expected, result)
+ }
+}
+
+// assertStringGetter tests a string getter returns the expected default value.
+func assertStringGetter(t *testing.T, name string, getter func() string, expected string) {
+ t.Helper()
+ result := getter()
+ if result != expected {
+ t.Errorf("%s: expected %q, got %q", name, expected, result)
+ }
+}
diff --git a/config/loader.go b/config/loader.go
index 18f6c68..0b09d71 100644
--- a/config/loader.go
+++ b/config/loader.go
@@ -1,15 +1,13 @@
+// Package config handles application configuration management.
package config
import (
- "flag"
"os"
"path/filepath"
- "sync/atomic"
- "github.com/sirupsen/logrus"
"github.com/spf13/viper"
- "github.com/ivuorinen/gibidify/gibidiutils"
+ "github.com/ivuorinen/gibidify/shared"
)
// LoadConfig reads configuration from a YAML file.
@@ -17,115 +15,105 @@ import (
// 1. $XDG_CONFIG_HOME/gibidify/config.yaml
// 2. $HOME/.config/gibidify/config.yaml
// 3. The current directory as fallback.
-//
-// Note: LoadConfig relies on isRunningTest() which requires the testing package
-// to have registered its flags (e.g., via flag.Parse() or during test initialization).
-// If called too early (e.g., from init() or before TestMain), test detection may not work reliably.
-// For explicit control, use SetRunningInTest() before calling LoadConfig.
func LoadConfig() {
viper.SetConfigName("config")
- viper.SetConfigType("yaml")
+ viper.SetConfigType(shared.FormatYAML)
+
+ logger := shared.GetLogger()
if xdgConfig := os.Getenv("XDG_CONFIG_HOME"); xdgConfig != "" {
// Validate XDG_CONFIG_HOME for path traversal attempts
- if err := gibidiutils.ValidateConfigPath(xdgConfig); err != nil {
- logrus.Warnf("Invalid XDG_CONFIG_HOME path, using default config: %v", err)
+ if err := shared.ValidateConfigPath(xdgConfig); err != nil {
+ logger.Warnf("Invalid XDG_CONFIG_HOME path, using default config: %v", err)
} else {
- configPath := filepath.Join(xdgConfig, "gibidify")
+ configPath := filepath.Join(xdgConfig, shared.AppName)
viper.AddConfigPath(configPath)
}
} else if home, err := os.UserHomeDir(); err == nil {
- viper.AddConfigPath(filepath.Join(home, ".config", "gibidify"))
+ viper.AddConfigPath(filepath.Join(home, ".config", shared.AppName))
}
// Only add current directory if no config file named gibidify.yaml exists
// to avoid conflicts with the project's output file
- if _, err := os.Stat("gibidify.yaml"); os.IsNotExist(err) {
+ if _, err := os.Stat(shared.AppName + ".yaml"); os.IsNotExist(err) {
viper.AddConfigPath(".")
}
if err := viper.ReadInConfig(); err != nil {
- // Suppress this info-level log when running tests.
- // Prefer an explicit test flag (SetRunningInTest) but fall back to runtime detection.
- if runningInTest.Load() || isRunningTest() {
- // Keep a debug-level record so tests that enable debug can still see it.
- logrus.Debugf("Config file not found (tests): %v", err)
- } else {
- logrus.Infof("Config file not found, using default values: %v", err)
- }
- setDefaultConfig()
+ logger.Infof("Config file not found, using default values: %v", err)
+ SetDefaultConfig()
} else {
- logrus.Infof("Using config file: %s", viper.ConfigFileUsed())
+ logger.Infof("Using config file: %s", viper.ConfigFileUsed())
// Validate configuration after loading
if err := ValidateConfig(); err != nil {
- logrus.Warnf("Configuration validation failed: %v", err)
- logrus.Info("Falling back to default configuration")
+ logger.Warnf("Configuration validation failed: %v", err)
+ logger.Info("Falling back to default configuration")
// Reset viper and set defaults when validation fails
viper.Reset()
- setDefaultConfig()
+ SetDefaultConfig()
}
}
}
-// setDefaultConfig sets default configuration values.
-func setDefaultConfig() {
- viper.SetDefault("fileSizeLimit", DefaultFileSizeLimit)
- // Default ignored directories.
- viper.SetDefault("ignoreDirectories", []string{
- "vendor", "node_modules", ".git", "dist", "build", "target", "bower_components", "cache", "tmp",
- })
+// SetDefaultConfig sets default configuration values.
+func SetDefaultConfig() {
+ // File size limits
+ viper.SetDefault(shared.ConfigKeyFileSizeLimit, shared.ConfigFileSizeLimitDefault)
+ viper.SetDefault(shared.ConfigKeyIgnoreDirectories, shared.ConfigIgnoredDirectoriesDefault)
+ viper.SetDefault(shared.ConfigKeyMaxConcurrency, shared.ConfigMaxConcurrencyDefault)
+ viper.SetDefault(shared.ConfigKeySupportedFormats, shared.ConfigSupportedFormatsDefault)
+ viper.SetDefault(shared.ConfigKeyFilePatterns, shared.ConfigFilePatternsDefault)
// FileTypeRegistry defaults
- viper.SetDefault("fileTypes.enabled", true)
- viper.SetDefault("fileTypes.customImageExtensions", []string{})
- viper.SetDefault("fileTypes.customBinaryExtensions", []string{})
- viper.SetDefault("fileTypes.customLanguages", map[string]string{})
- viper.SetDefault("fileTypes.disabledImageExtensions", []string{})
- viper.SetDefault("fileTypes.disabledBinaryExtensions", []string{})
- viper.SetDefault("fileTypes.disabledLanguageExtensions", []string{})
+ viper.SetDefault(shared.ConfigKeyFileTypesEnabled, shared.ConfigFileTypesEnabledDefault)
+ viper.SetDefault(shared.ConfigKeyFileTypesCustomImageExtensions, shared.ConfigCustomImageExtensionsDefault)
+ viper.SetDefault(shared.ConfigKeyFileTypesCustomBinaryExtensions, shared.ConfigCustomBinaryExtensionsDefault)
+ viper.SetDefault(shared.ConfigKeyFileTypesCustomLanguages, shared.ConfigCustomLanguagesDefault)
+ viper.SetDefault(shared.ConfigKeyFileTypesDisabledImageExtensions, shared.ConfigDisabledImageExtensionsDefault)
+ viper.SetDefault(shared.ConfigKeyFileTypesDisabledBinaryExtensions, shared.ConfigDisabledBinaryExtensionsDefault)
+ viper.SetDefault(shared.ConfigKeyFileTypesDisabledLanguageExts, shared.ConfigDisabledLanguageExtensionsDefault)
- // Back-pressure and memory management defaults
- viper.SetDefault("backpressure.enabled", true)
- viper.SetDefault("backpressure.maxPendingFiles", 1000) // Max files in file channel buffer
- viper.SetDefault("backpressure.maxPendingWrites", 100) // Max writes in write channel buffer
- viper.SetDefault("backpressure.maxMemoryUsage", 104857600) // 100MB max memory usage
- viper.SetDefault("backpressure.memoryCheckInterval", 1000) // Check memory every 1000 files
+ // Backpressure and memory management defaults
+ viper.SetDefault(shared.ConfigKeyBackpressureEnabled, shared.ConfigBackpressureEnabledDefault)
+ viper.SetDefault(shared.ConfigKeyBackpressureMaxPendingFiles, shared.ConfigMaxPendingFilesDefault)
+ viper.SetDefault(shared.ConfigKeyBackpressureMaxPendingWrites, shared.ConfigMaxPendingWritesDefault)
+ viper.SetDefault(shared.ConfigKeyBackpressureMaxMemoryUsage, shared.ConfigMaxMemoryUsageDefault)
+ viper.SetDefault(shared.ConfigKeyBackpressureMemoryCheckInt, shared.ConfigMemoryCheckIntervalDefault)
// Resource limit defaults
- viper.SetDefault("resourceLimits.enabled", true)
- viper.SetDefault("resourceLimits.maxFiles", DefaultMaxFiles)
- viper.SetDefault("resourceLimits.maxTotalSize", DefaultMaxTotalSize)
- viper.SetDefault("resourceLimits.fileProcessingTimeoutSec", DefaultFileProcessingTimeoutSec)
- viper.SetDefault("resourceLimits.overallTimeoutSec", DefaultOverallTimeoutSec)
- viper.SetDefault("resourceLimits.maxConcurrentReads", DefaultMaxConcurrentReads)
- viper.SetDefault("resourceLimits.rateLimitFilesPerSec", DefaultRateLimitFilesPerSec)
- viper.SetDefault("resourceLimits.hardMemoryLimitMB", DefaultHardMemoryLimitMB)
- viper.SetDefault("resourceLimits.enableGracefulDegradation", true)
- viper.SetDefault("resourceLimits.enableResourceMonitoring", true)
-}
+ viper.SetDefault(shared.ConfigKeyResourceLimitsEnabled, shared.ConfigResourceLimitsEnabledDefault)
+ viper.SetDefault(shared.ConfigKeyResourceLimitsMaxFiles, shared.ConfigMaxFilesDefault)
+ viper.SetDefault(shared.ConfigKeyResourceLimitsMaxTotalSize, shared.ConfigMaxTotalSizeDefault)
+ viper.SetDefault(shared.ConfigKeyResourceLimitsFileProcessingTO, shared.ConfigFileProcessingTimeoutSecDefault)
+ viper.SetDefault(shared.ConfigKeyResourceLimitsOverallTO, shared.ConfigOverallTimeoutSecDefault)
+ viper.SetDefault(shared.ConfigKeyResourceLimitsMaxConcurrentReads, shared.ConfigMaxConcurrentReadsDefault)
+ viper.SetDefault(shared.ConfigKeyResourceLimitsRateLimitFilesPerSec, shared.ConfigRateLimitFilesPerSecDefault)
+ viper.SetDefault(shared.ConfigKeyResourceLimitsHardMemoryLimitMB, shared.ConfigHardMemoryLimitMBDefault)
+ viper.SetDefault(shared.ConfigKeyResourceLimitsEnableGracefulDeg, shared.ConfigEnableGracefulDegradationDefault)
+ viper.SetDefault(shared.ConfigKeyResourceLimitsEnableMonitoring, shared.ConfigEnableResourceMonitoringDefault)
-var runningInTest atomic.Bool
-
-// SetRunningInTest allows tests to explicitly indicate they are running under `go test`.
-// Call this from TestMain in tests to suppress noisy info logs while still allowing
-// debug-level output for tests that enable it.
-func SetRunningInTest(b bool) {
- runningInTest.Store(b)
-}
-
-// isRunningTest attempts to detect if the binary is running under `go test`.
-// Prefer checking for standard test flags registered by the testing package.
-// This is reliable when `go test` initializes the flag set.
-//
-// IMPORTANT: This function relies on flag.Lookup which returns nil if the testing
-// package hasn't registered test flags yet. Callers must invoke this after flag
-// parsing (or test flag registration) has occurred. If invoked too early (e.g.,
-// from init() or early in TestMain before flags are parsed), detection will fail.
-// For explicit control, use SetRunningInTest() instead.
-func isRunningTest() bool {
- // Look for the well-known test flags created by the testing package.
- // If any are present in the flag registry, we're running under `go test`.
- if flag.Lookup("test.v") != nil || flag.Lookup("test.run") != nil || flag.Lookup("test.bench") != nil {
- return true
- }
- return false
+ // Output configuration defaults
+ viper.SetDefault(shared.ConfigKeyOutputTemplate, shared.ConfigOutputTemplateDefault)
+ viper.SetDefault("output.metadata.includeStats", shared.ConfigMetadataIncludeStatsDefault)
+ viper.SetDefault("output.metadata.includeTimestamp", shared.ConfigMetadataIncludeTimestampDefault)
+ viper.SetDefault("output.metadata.includeFileCount", shared.ConfigMetadataIncludeFileCountDefault)
+ viper.SetDefault("output.metadata.includeSourcePath", shared.ConfigMetadataIncludeSourcePathDefault)
+ viper.SetDefault("output.metadata.includeFileTypes", shared.ConfigMetadataIncludeFileTypesDefault)
+ viper.SetDefault("output.metadata.includeProcessingTime", shared.ConfigMetadataIncludeProcessingTimeDefault)
+ viper.SetDefault("output.metadata.includeTotalSize", shared.ConfigMetadataIncludeTotalSizeDefault)
+ viper.SetDefault("output.metadata.includeMetrics", shared.ConfigMetadataIncludeMetricsDefault)
+ viper.SetDefault("output.markdown.useCodeBlocks", shared.ConfigMarkdownUseCodeBlocksDefault)
+ viper.SetDefault("output.markdown.includeLanguage", shared.ConfigMarkdownIncludeLanguageDefault)
+ viper.SetDefault(shared.ConfigKeyOutputMarkdownHeaderLevel, shared.ConfigMarkdownHeaderLevelDefault)
+ viper.SetDefault("output.markdown.tableOfContents", shared.ConfigMarkdownTableOfContentsDefault)
+ viper.SetDefault("output.markdown.useCollapsible", shared.ConfigMarkdownUseCollapsibleDefault)
+ viper.SetDefault("output.markdown.syntaxHighlighting", shared.ConfigMarkdownSyntaxHighlightingDefault)
+ viper.SetDefault("output.markdown.lineNumbers", shared.ConfigMarkdownLineNumbersDefault)
+ viper.SetDefault("output.markdown.foldLongFiles", shared.ConfigMarkdownFoldLongFilesDefault)
+ viper.SetDefault(shared.ConfigKeyOutputMarkdownMaxLineLen, shared.ConfigMarkdownMaxLineLengthDefault)
+ viper.SetDefault(shared.ConfigKeyOutputMarkdownCustomCSS, shared.ConfigMarkdownCustomCSSDefault)
+ viper.SetDefault(shared.ConfigKeyOutputCustomHeader, shared.ConfigCustomHeaderDefault)
+ viper.SetDefault(shared.ConfigKeyOutputCustomFooter, shared.ConfigCustomFooterDefault)
+ viper.SetDefault(shared.ConfigKeyOutputCustomFileHeader, shared.ConfigCustomFileHeaderDefault)
+ viper.SetDefault(shared.ConfigKeyOutputCustomFileFooter, shared.ConfigCustomFileFooterDefault)
+ viper.SetDefault(shared.ConfigKeyOutputVariables, shared.ConfigTemplateVariablesDefault)
}
diff --git a/config/loader_test.go b/config/loader_test.go
index 6290f02..0661d40 100644
--- a/config/loader_test.go
+++ b/config/loader_test.go
@@ -7,6 +7,7 @@ import (
"github.com/spf13/viper"
"github.com/ivuorinen/gibidify/config"
+ "github.com/ivuorinen/gibidify/shared"
"github.com/ivuorinen/gibidify/testutil"
)
@@ -26,14 +27,14 @@ func TestDefaultConfig(t *testing.T) {
testutil.ResetViperConfig(t, tmpDir)
// Check defaults
- defaultSizeLimit := config.GetFileSizeLimit()
+ defaultSizeLimit := config.FileSizeLimit()
if defaultSizeLimit != defaultFileSizeLimit {
t.Errorf("Expected default file size limit of 5242880, got %d", defaultSizeLimit)
}
- ignoredDirs := config.GetIgnoredDirectories()
+ ignoredDirs := config.IgnoredDirectories()
if len(ignoredDirs) == 0 {
- t.Errorf("Expected some default ignored directories, got none")
+ t.Error("Expected some default ignored directories, got none")
}
// Restore Viper state
@@ -76,13 +77,11 @@ ignoreDirectories:
// TestLoadConfigWithValidation tests that invalid config files fall back to defaults.
func TestLoadConfigWithValidation(t *testing.T) {
// Create a temporary config file with invalid content
- configContent := `
-fileSizeLimit: 100
-ignoreDirectories:
- - node_modules
- - ""
- - .git
-`
+ configContent := "fileSizeLimit: 100\n" +
+ "ignoreDirectories:\n" +
+ "- node_modules\n" +
+ "- \"\"\n" +
+ "- .git\n"
tempDir := t.TempDir()
configFile := tempDir + "/config.yaml"
@@ -100,13 +99,13 @@ ignoreDirectories:
config.LoadConfig()
// Should have fallen back to defaults due to validation failure
- if config.GetFileSizeLimit() != int64(config.DefaultFileSizeLimit) {
- t.Errorf("Expected default file size limit after validation failure, got %d", config.GetFileSizeLimit())
+ if config.FileSizeLimit() != int64(shared.ConfigFileSizeLimitDefault) {
+ t.Errorf("Expected default file size limit after validation failure, got %d", config.FileSizeLimit())
}
- if containsString(config.GetIgnoredDirectories(), "") {
+ if containsString(config.IgnoredDirectories(), "") {
t.Errorf(
"Expected ignored directories not to contain empty string after validation failure, got %v",
- config.GetIgnoredDirectories(),
+ config.IgnoredDirectories(),
)
}
}
@@ -119,5 +118,6 @@ func containsString(slice []string, item string) bool {
return true
}
}
+
return false
}
diff --git a/config/validation.go b/config/validation.go
index 373598d..3a310bd 100644
--- a/config/validation.go
+++ b/config/validation.go
@@ -1,3 +1,4 @@
+// Package config handles application configuration management.
package config
import (
@@ -6,591 +7,611 @@ import (
"github.com/spf13/viper"
- "github.com/ivuorinen/gibidify/gibidiutils"
+ "github.com/ivuorinen/gibidify/shared"
)
-// validateFileSizeLimit validates the file size limit configuration.
-func validateFileSizeLimit() []string {
- var errors []string
- fileSizeLimit := viper.GetInt64("fileSizeLimit")
- if fileSizeLimit < MinFileSizeLimit {
- errors = append(
- errors,
- fmt.Sprintf("fileSizeLimit (%d) is below minimum (%d)", fileSizeLimit, MinFileSizeLimit),
- )
- }
- if fileSizeLimit > MaxFileSizeLimit {
- errors = append(
- errors,
- fmt.Sprintf("fileSizeLimit (%d) exceeds maximum (%d)", fileSizeLimit, MaxFileSizeLimit),
- )
- }
- return errors
-}
-
-// validateIgnoreDirectories validates the ignore directories configuration.
-func validateIgnoreDirectories() []string {
- var errors []string
- ignoreDirectories := viper.GetStringSlice("ignoreDirectories")
- for i, dir := range ignoreDirectories {
- dir = strings.TrimSpace(dir)
- if dir == "" {
- errors = append(errors, fmt.Sprintf("ignoreDirectories[%d] is empty", i))
- continue
- }
- if strings.Contains(dir, "/") {
- errors = append(
- errors,
- fmt.Sprintf(
- "ignoreDirectories[%d] (%s) contains path separator - only directory names are allowed",
- i,
- dir,
- ),
- )
- }
- if strings.HasPrefix(dir, ".") && dir != ".git" && dir != ".vscode" && dir != ".idea" {
- errors = append(
- errors,
- fmt.Sprintf("ignoreDirectories[%d] (%s) starts with dot - this may cause unexpected behavior", i, dir),
- )
- }
- }
- return errors
-}
-
-// validateSupportedFormats validates the supported output formats configuration.
-func validateSupportedFormats() []string {
- var errors []string
- if viper.IsSet("supportedFormats") {
- supportedFormats := viper.GetStringSlice("supportedFormats")
- validFormats := map[string]bool{"json": true, "yaml": true, "markdown": true}
- for i, format := range supportedFormats {
- format = strings.ToLower(strings.TrimSpace(format))
- if !validFormats[format] {
- errors = append(
- errors,
- fmt.Sprintf("supportedFormats[%d] (%s) is not a valid format (json, yaml, markdown)", i, format),
- )
- }
- }
- }
- return errors
-}
-
-// validateConcurrencySettings validates the concurrency settings configuration.
-func validateConcurrencySettings() []string {
- var errors []string
- if viper.IsSet("maxConcurrency") {
- maxConcurrency := viper.GetInt("maxConcurrency")
- if maxConcurrency < 1 {
- errors = append(
- errors,
- fmt.Sprintf("maxConcurrency (%d) must be at least 1", maxConcurrency),
- )
- }
- if maxConcurrency > 100 {
- errors = append(
- errors,
- fmt.Sprintf("maxConcurrency (%d) is unreasonably high (max 100)", maxConcurrency),
- )
- }
- }
- return errors
-}
-
-// validateFilePatterns validates the file patterns configuration.
-func validateFilePatterns() []string {
- var errors []string
- if viper.IsSet("filePatterns") {
- filePatterns := viper.GetStringSlice("filePatterns")
- for i, pattern := range filePatterns {
- pattern = strings.TrimSpace(pattern)
- if pattern == "" {
- errors = append(errors, fmt.Sprintf("filePatterns[%d] is empty", i))
- continue
- }
- // Basic validation - patterns should contain at least one alphanumeric character
- if !strings.ContainsAny(pattern, "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789") {
- errors = append(
- errors,
- fmt.Sprintf("filePatterns[%d] (%s) appears to be invalid", i, pattern),
- )
- }
- }
- }
- return errors
-}
-
-// validateFileTypes validates the FileTypeRegistry configuration.
-// validateCustomImageExtensions validates custom image extensions configuration.
-func validateCustomImageExtensions() []string {
- var errors []string
- if !viper.IsSet("fileTypes.customImageExtensions") {
- return errors
- }
-
- customImages := viper.GetStringSlice("fileTypes.customImageExtensions")
- for i, ext := range customImages {
- ext = strings.TrimSpace(ext)
- if ext == "" {
- errors = append(
- errors,
- fmt.Sprintf("fileTypes.customImageExtensions[%d] is empty", i),
- )
- continue
- }
- if !strings.HasPrefix(ext, ".") {
- errors = append(
- errors,
- fmt.Sprintf("fileTypes.customImageExtensions[%d] (%s) must start with a dot", i, ext),
- )
- }
- }
- return errors
-}
-
-// validateCustomBinaryExtensions validates custom binary extensions configuration.
-func validateCustomBinaryExtensions() []string {
- var errors []string
- if !viper.IsSet("fileTypes.customBinaryExtensions") {
- return errors
- }
-
- customBinary := viper.GetStringSlice("fileTypes.customBinaryExtensions")
- for i, ext := range customBinary {
- ext = strings.TrimSpace(ext)
- if ext == "" {
- errors = append(
- errors,
- fmt.Sprintf("fileTypes.customBinaryExtensions[%d] is empty", i),
- )
- continue
- }
- if !strings.HasPrefix(ext, ".") {
- errors = append(
- errors,
- fmt.Sprintf("fileTypes.customBinaryExtensions[%d] (%s) must start with a dot", i, ext),
- )
- }
- }
- return errors
-}
-
-// validateCustomLanguages validates custom languages configuration.
-func validateCustomLanguages() []string {
- var errors []string
- if !viper.IsSet("fileTypes.customLanguages") {
- return errors
- }
-
- customLangs := viper.GetStringMapString("fileTypes.customLanguages")
- for ext, lang := range customLangs {
- ext = strings.TrimSpace(ext)
- lang = strings.TrimSpace(lang)
- if ext == "" {
- errors = append(errors, "fileTypes.customLanguages contains empty extension key")
- continue
- }
- if !strings.HasPrefix(ext, ".") {
- errors = append(
- errors,
- fmt.Sprintf("fileTypes.customLanguages extension (%s) must start with a dot", ext),
- )
- }
- if lang == "" {
- errors = append(
- errors,
- fmt.Sprintf("fileTypes.customLanguages[%s] has empty language value", ext),
- )
- }
- }
- return errors
-}
-
-// validateFileTypes validates the FileTypeRegistry configuration.
-func validateFileTypes() []string {
- var errors []string
- errors = append(errors, validateCustomImageExtensions()...)
- errors = append(errors, validateCustomBinaryExtensions()...)
- errors = append(errors, validateCustomLanguages()...)
- return errors
-}
-
-// validateBackpressureConfig validates the back-pressure configuration.
-// validateBackpressureMaxPendingFiles validates max pending files configuration.
-func validateBackpressureMaxPendingFiles() []string {
- var errors []string
- if !viper.IsSet("backpressure.maxPendingFiles") {
- return errors
- }
-
- maxPendingFiles := viper.GetInt("backpressure.maxPendingFiles")
- if maxPendingFiles < 1 {
- errors = append(
- errors,
- fmt.Sprintf("backpressure.maxPendingFiles (%d) must be at least 1", maxPendingFiles),
- )
- }
- if maxPendingFiles > 100000 {
- errors = append(
- errors,
- fmt.Sprintf("backpressure.maxPendingFiles (%d) is unreasonably high (max 100000)", maxPendingFiles),
- )
- }
- return errors
-}
-
-// validateBackpressureMaxPendingWrites validates max pending writes configuration.
-func validateBackpressureMaxPendingWrites() []string {
- var errors []string
- if !viper.IsSet("backpressure.maxPendingWrites") {
- return errors
- }
-
- maxPendingWrites := viper.GetInt("backpressure.maxPendingWrites")
- if maxPendingWrites < 1 {
- errors = append(
- errors,
- fmt.Sprintf("backpressure.maxPendingWrites (%d) must be at least 1", maxPendingWrites),
- )
- }
- if maxPendingWrites > 10000 {
- errors = append(
- errors,
- fmt.Sprintf("backpressure.maxPendingWrites (%d) is unreasonably high (max 10000)", maxPendingWrites),
- )
- }
- return errors
-}
-
-// validateBackpressureMaxMemoryUsage validates max memory usage configuration.
-func validateBackpressureMaxMemoryUsage() []string {
- var errors []string
- if !viper.IsSet("backpressure.maxMemoryUsage") {
- return errors
- }
-
- maxMemoryUsage := viper.GetInt64("backpressure.maxMemoryUsage")
- if maxMemoryUsage < 1048576 { // 1MB minimum
- errors = append(
- errors,
- fmt.Sprintf("backpressure.maxMemoryUsage (%d) must be at least 1MB (1048576 bytes)", maxMemoryUsage),
- )
- }
- if maxMemoryUsage > 104857600 { // 100MB maximum
- errors = append(
- errors,
- fmt.Sprintf("backpressure.maxMemoryUsage (%d) is unreasonably high (max 100MB)", maxMemoryUsage),
- )
- }
- return errors
-}
-
-// validateBackpressureMemoryCheckInterval validates memory check interval configuration.
-func validateBackpressureMemoryCheckInterval() []string {
- var errors []string
- if !viper.IsSet("backpressure.memoryCheckInterval") {
- return errors
- }
-
- interval := viper.GetInt("backpressure.memoryCheckInterval")
- if interval < 1 {
- errors = append(
- errors,
- fmt.Sprintf("backpressure.memoryCheckInterval (%d) must be at least 1", interval),
- )
- }
- if interval > 100000 {
- errors = append(
- errors,
- fmt.Sprintf("backpressure.memoryCheckInterval (%d) is unreasonably high (max 100000)", interval),
- )
- }
- return errors
-}
-
-// validateBackpressureConfig validates the back-pressure configuration.
-func validateBackpressureConfig() []string {
- var errors []string
- errors = append(errors, validateBackpressureMaxPendingFiles()...)
- errors = append(errors, validateBackpressureMaxPendingWrites()...)
- errors = append(errors, validateBackpressureMaxMemoryUsage()...)
- errors = append(errors, validateBackpressureMemoryCheckInterval()...)
- return errors
-}
-
-// validateResourceLimits validates the resource limits configuration.
-// validateResourceLimitsMaxFiles validates max files configuration.
-func validateResourceLimitsMaxFiles() []string {
- var errors []string
- if !viper.IsSet("resourceLimits.maxFiles") {
- return errors
- }
-
- maxFiles := viper.GetInt("resourceLimits.maxFiles")
- if maxFiles < MinMaxFiles {
- errors = append(
- errors,
- fmt.Sprintf("resourceLimits.maxFiles (%d) must be at least %d", maxFiles, MinMaxFiles),
- )
- }
- if maxFiles > MaxMaxFiles {
- errors = append(
- errors,
- fmt.Sprintf("resourceLimits.maxFiles (%d) exceeds maximum (%d)", maxFiles, MaxMaxFiles),
- )
- }
- return errors
-}
-
-// validateResourceLimitsMaxTotalSize validates max total size configuration.
-func validateResourceLimitsMaxTotalSize() []string {
- var errors []string
- if !viper.IsSet("resourceLimits.maxTotalSize") {
- return errors
- }
-
- maxTotalSize := viper.GetInt64("resourceLimits.maxTotalSize")
- if maxTotalSize < MinMaxTotalSize {
- errors = append(
- errors,
- fmt.Sprintf("resourceLimits.maxTotalSize (%d) must be at least %d", maxTotalSize, MinMaxTotalSize),
- )
- }
- if maxTotalSize > MaxMaxTotalSize {
- errors = append(
- errors,
- fmt.Sprintf("resourceLimits.maxTotalSize (%d) exceeds maximum (%d)", maxTotalSize, MaxMaxTotalSize),
- )
- }
- return errors
-}
-
-// validateResourceLimitsTimeouts validates timeout configurations.
-func validateResourceLimitsTimeouts() []string {
- var errors []string
-
- if viper.IsSet("resourceLimits.fileProcessingTimeoutSec") {
- timeout := viper.GetInt("resourceLimits.fileProcessingTimeoutSec")
- if timeout < MinFileProcessingTimeoutSec {
- errors = append(
- errors,
- fmt.Sprintf(
- "resourceLimits.fileProcessingTimeoutSec (%d) must be at least %d",
- timeout,
- MinFileProcessingTimeoutSec,
- ),
- )
- }
- if timeout > MaxFileProcessingTimeoutSec {
- errors = append(
- errors,
- fmt.Sprintf(
- "resourceLimits.fileProcessingTimeoutSec (%d) exceeds maximum (%d)",
- timeout,
- MaxFileProcessingTimeoutSec,
- ),
- )
- }
- }
-
- if viper.IsSet("resourceLimits.overallTimeoutSec") {
- timeout := viper.GetInt("resourceLimits.overallTimeoutSec")
- if timeout < MinOverallTimeoutSec {
- errors = append(
- errors,
- fmt.Sprintf("resourceLimits.overallTimeoutSec (%d) must be at least %d", timeout, MinOverallTimeoutSec),
- )
- }
- if timeout > MaxOverallTimeoutSec {
- errors = append(
- errors,
- fmt.Sprintf(
- "resourceLimits.overallTimeoutSec (%d) exceeds maximum (%d)",
- timeout,
- MaxOverallTimeoutSec,
- ),
- )
- }
- }
-
- return errors
-}
-
-// validateResourceLimitsConcurrency validates concurrency configurations.
-func validateResourceLimitsConcurrency() []string {
- var errors []string
-
- if viper.IsSet("resourceLimits.maxConcurrentReads") {
- maxReads := viper.GetInt("resourceLimits.maxConcurrentReads")
- if maxReads < MinMaxConcurrentReads {
- errors = append(
- errors,
- fmt.Sprintf(
- "resourceLimits.maxConcurrentReads (%d) must be at least %d",
- maxReads,
- MinMaxConcurrentReads,
- ),
- )
- }
- if maxReads > MaxMaxConcurrentReads {
- errors = append(
- errors,
- fmt.Sprintf(
- "resourceLimits.maxConcurrentReads (%d) exceeds maximum (%d)",
- maxReads,
- MaxMaxConcurrentReads,
- ),
- )
- }
- }
-
- if viper.IsSet("resourceLimits.rateLimitFilesPerSec") {
- rateLimit := viper.GetInt("resourceLimits.rateLimitFilesPerSec")
- if rateLimit < MinRateLimitFilesPerSec {
- errors = append(
- errors,
- fmt.Sprintf(
- "resourceLimits.rateLimitFilesPerSec (%d) must be at least %d",
- rateLimit,
- MinRateLimitFilesPerSec,
- ),
- )
- }
- if rateLimit > MaxRateLimitFilesPerSec {
- errors = append(
- errors,
- fmt.Sprintf(
- "resourceLimits.rateLimitFilesPerSec (%d) exceeds maximum (%d)",
- rateLimit,
- MaxRateLimitFilesPerSec,
- ),
- )
- }
- }
-
- return errors
-}
-
-// validateResourceLimitsMemory validates memory limit configuration.
-func validateResourceLimitsMemory() []string {
- var errors []string
- if !viper.IsSet("resourceLimits.hardMemoryLimitMB") {
- return errors
- }
-
- memLimit := viper.GetInt("resourceLimits.hardMemoryLimitMB")
- if memLimit < MinHardMemoryLimitMB {
- errors = append(
- errors,
- fmt.Sprintf(
- "resourceLimits.hardMemoryLimitMB (%d) must be at least %d",
- memLimit,
- MinHardMemoryLimitMB,
- ),
- )
- }
- if memLimit > MaxHardMemoryLimitMB {
- errors = append(
- errors,
- fmt.Sprintf(
- "resourceLimits.hardMemoryLimitMB (%d) exceeds maximum (%d)",
- memLimit,
- MaxHardMemoryLimitMB,
- ),
- )
- }
- return errors
-}
-
-// validateResourceLimits validates the resource limits configuration.
-func validateResourceLimits() []string {
- var errors []string
- errors = append(errors, validateResourceLimitsMaxFiles()...)
- errors = append(errors, validateResourceLimitsMaxTotalSize()...)
- errors = append(errors, validateResourceLimitsTimeouts()...)
- errors = append(errors, validateResourceLimitsConcurrency()...)
- errors = append(errors, validateResourceLimitsMemory()...)
- return errors
-}
-
// ValidateConfig validates the loaded configuration.
func ValidateConfig() error {
var validationErrors []string
- // Collect validation errors from all validation helpers
- validationErrors = append(validationErrors, validateFileSizeLimit()...)
- validationErrors = append(validationErrors, validateIgnoreDirectories()...)
- validationErrors = append(validationErrors, validateSupportedFormats()...)
- validationErrors = append(validationErrors, validateConcurrencySettings()...)
- validationErrors = append(validationErrors, validateFilePatterns()...)
- validationErrors = append(validationErrors, validateFileTypes()...)
- validationErrors = append(validationErrors, validateBackpressureConfig()...)
- validationErrors = append(validationErrors, validateResourceLimits()...)
+ // Validate basic settings
+ validationErrors = append(validationErrors, validateBasicSettings()...)
+ validationErrors = append(validationErrors, validateFileTypeSettings()...)
+ validationErrors = append(validationErrors, validateBackpressureSettings()...)
+ validationErrors = append(validationErrors, validateResourceLimitSettings()...)
if len(validationErrors) > 0 {
- return gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeConfiguration,
- gibidiutils.CodeConfigValidation,
+ return shared.NewStructuredError(
+ shared.ErrorTypeConfiguration,
+ shared.CodeConfigValidation,
"configuration validation failed: "+strings.Join(validationErrors, "; "),
"",
- map[string]interface{}{"validation_errors": validationErrors},
+ map[string]any{"validation_errors": validationErrors},
)
}
return nil
}
-// ValidateFileSize checks if a file size is within the configured limit.
-func ValidateFileSize(size int64) error {
- limit := GetFileSizeLimit()
- if size > limit {
- return gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeValidation,
- gibidiutils.CodeValidationSize,
- fmt.Sprintf("file size (%d bytes) exceeds limit (%d bytes)", size, limit),
- "",
- map[string]interface{}{"file_size": size, "size_limit": limit},
+// validateBasicSettings validates basic configuration settings.
+func validateBasicSettings() []string {
+ var validationErrors []string
+
+ validationErrors = append(validationErrors, validateFileSizeLimit()...)
+ validationErrors = append(validationErrors, validateIgnoreDirectories()...)
+ validationErrors = append(validationErrors, validateSupportedFormats()...)
+ validationErrors = append(validationErrors, validateConcurrencySettings()...)
+ validationErrors = append(validationErrors, validateFilePatterns()...)
+
+ return validationErrors
+}
+
+// validateFileSizeLimit validates the file size limit setting.
+func validateFileSizeLimit() []string {
+ var validationErrors []string
+
+ fileSizeLimit := viper.GetInt64(shared.ConfigKeyFileSizeLimit)
+ if fileSizeLimit < shared.ConfigFileSizeLimitMin {
+ validationErrors = append(
+ validationErrors,
+ fmt.Sprintf("fileSizeLimit (%d) is below minimum (%d)", fileSizeLimit, shared.ConfigFileSizeLimitMin),
)
}
+ if fileSizeLimit > shared.ConfigFileSizeLimitMax {
+ validationErrors = append(
+ validationErrors,
+ fmt.Sprintf("fileSizeLimit (%d) exceeds maximum (%d)", fileSizeLimit, shared.ConfigFileSizeLimitMax),
+ )
+ }
+
+ return validationErrors
+}
+
+// validateIgnoreDirectories validates the ignore directories setting.
+func validateIgnoreDirectories() []string {
+ var validationErrors []string
+
+ ignoreDirectories := viper.GetStringSlice(shared.ConfigKeyIgnoreDirectories)
+ for i, dir := range ignoreDirectories {
+ if errMsg := validateEmptyElement(shared.ConfigKeyIgnoreDirectories, dir, i); errMsg != "" {
+ validationErrors = append(validationErrors, errMsg)
+
+ continue
+ }
+ dir = strings.TrimSpace(dir)
+ if strings.Contains(dir, "/") {
+ validationErrors = append(
+ validationErrors,
+ fmt.Sprintf(
+ "ignoreDirectories[%d] (%s) contains path separator - only directory names are allowed", i, dir,
+ ),
+ )
+ }
+ if strings.HasPrefix(dir, ".") && dir != ".git" && dir != ".vscode" && dir != ".idea" {
+ validationErrors = append(
+ validationErrors,
+ fmt.Sprintf("ignoreDirectories[%d] (%s) starts with dot - this may cause unexpected behavior", i, dir),
+ )
+ }
+ }
+
+ return validationErrors
+}
+
+// validateSupportedFormats validates the supported formats setting.
+func validateSupportedFormats() []string {
+ var validationErrors []string
+
+ if !viper.IsSet(shared.ConfigKeySupportedFormats) {
+ return validationErrors
+ }
+
+ supportedFormats := viper.GetStringSlice(shared.ConfigKeySupportedFormats)
+ validFormats := map[string]bool{shared.FormatJSON: true, shared.FormatYAML: true, shared.FormatMarkdown: true}
+ for i, format := range supportedFormats {
+ format = strings.ToLower(strings.TrimSpace(format))
+ if !validFormats[format] {
+ validationErrors = append(
+ validationErrors,
+ fmt.Sprintf("supportedFormats[%d] (%s) is not a valid format (json, yaml, markdown)", i, format),
+ )
+ }
+ }
+
+ return validationErrors
+}
+
+// validateConcurrencySettings validates the concurrency settings.
+func validateConcurrencySettings() []string {
+ var validationErrors []string
+
+ if !viper.IsSet(shared.ConfigKeyMaxConcurrency) {
+ return validationErrors
+ }
+
+ maxConcurrency := viper.GetInt(shared.ConfigKeyMaxConcurrency)
+ if maxConcurrency < 1 {
+ validationErrors = append(
+ validationErrors, fmt.Sprintf("maxConcurrency (%d) must be at least 1", maxConcurrency),
+ )
+ }
+ if maxConcurrency > 100 {
+ validationErrors = append(
+ validationErrors,
+ fmt.Sprintf("maxConcurrency (%d) is unreasonably high (max 100)", maxConcurrency),
+ )
+ }
+
+ return validationErrors
+}
+
+// validateFilePatterns validates the file patterns setting.
+func validateFilePatterns() []string {
+ var validationErrors []string
+
+ if !viper.IsSet(shared.ConfigKeyFilePatterns) {
+ return validationErrors
+ }
+
+ filePatterns := viper.GetStringSlice(shared.ConfigKeyFilePatterns)
+ for i, pattern := range filePatterns {
+ if errMsg := validateEmptyElement(shared.ConfigKeyFilePatterns, pattern, i); errMsg != "" {
+ validationErrors = append(validationErrors, errMsg)
+
+ continue
+ }
+ pattern = strings.TrimSpace(pattern)
+ // Basic validation - patterns should contain at least one alphanumeric character
+ if !strings.ContainsAny(pattern, "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789") {
+ validationErrors = append(
+ validationErrors,
+ fmt.Sprintf("filePatterns[%d] (%s) appears to be invalid", i, pattern),
+ )
+ }
+ }
+
+ return validationErrors
+}
+
+// validateFileTypeSettings validates file type configuration settings.
+func validateFileTypeSettings() []string {
+ var validationErrors []string
+
+ validationErrors = append(validationErrors, validateCustomImageExtensions()...)
+ validationErrors = append(validationErrors, validateCustomBinaryExtensions()...)
+ validationErrors = append(validationErrors, validateCustomLanguages()...)
+
+ return validationErrors
+}
+
+// validateCustomImageExtensions validates custom image extensions.
+func validateCustomImageExtensions() []string {
+ var validationErrors []string
+
+ if !viper.IsSet(shared.ConfigKeyFileTypesCustomImageExtensions) {
+ return validationErrors
+ }
+
+ customImages := viper.GetStringSlice(shared.ConfigKeyFileTypesCustomImageExtensions)
+ for i, ext := range customImages {
+ if errMsg := validateEmptyElement(shared.ConfigKeyFileTypesCustomImageExtensions, ext, i); errMsg != "" {
+ validationErrors = append(validationErrors, errMsg)
+
+ continue
+ }
+ ext = strings.TrimSpace(ext)
+ if errMsg := validateDotPrefix(shared.ConfigKeyFileTypesCustomImageExtensions, ext, i); errMsg != "" {
+ validationErrors = append(validationErrors, errMsg)
+ }
+ }
+
+ return validationErrors
+}
+
+// validateCustomBinaryExtensions validates custom binary extensions.
+func validateCustomBinaryExtensions() []string {
+ var validationErrors []string
+
+ if !viper.IsSet(shared.ConfigKeyFileTypesCustomBinaryExtensions) {
+ return validationErrors
+ }
+
+ customBinary := viper.GetStringSlice(shared.ConfigKeyFileTypesCustomBinaryExtensions)
+ for i, ext := range customBinary {
+ if errMsg := validateEmptyElement(shared.ConfigKeyFileTypesCustomBinaryExtensions, ext, i); errMsg != "" {
+ validationErrors = append(validationErrors, errMsg)
+
+ continue
+ }
+ ext = strings.TrimSpace(ext)
+ if errMsg := validateDotPrefix(shared.ConfigKeyFileTypesCustomBinaryExtensions, ext, i); errMsg != "" {
+ validationErrors = append(validationErrors, errMsg)
+ }
+ }
+
+ return validationErrors
+}
+
+// validateCustomLanguages validates custom language mappings.
+func validateCustomLanguages() []string {
+ var validationErrors []string
+
+ if !viper.IsSet(shared.ConfigKeyFileTypesCustomLanguages) {
+ return validationErrors
+ }
+
+ customLangs := viper.GetStringMapString(shared.ConfigKeyFileTypesCustomLanguages)
+ for ext, lang := range customLangs {
+ ext = strings.TrimSpace(ext)
+ if ext == "" {
+ validationErrors = append(
+ validationErrors,
+ shared.ConfigKeyFileTypesCustomLanguages+" contains empty extension key",
+ )
+
+ continue
+ }
+ if errMsg := validateDotPrefixMap(shared.ConfigKeyFileTypesCustomLanguages, ext); errMsg != "" {
+ validationErrors = append(validationErrors, errMsg)
+ }
+ if errMsg := validateEmptyMapValue(shared.ConfigKeyFileTypesCustomLanguages, ext, lang); errMsg != "" {
+ validationErrors = append(validationErrors, errMsg)
+ }
+ }
+
+ return validationErrors
+}
+
+// validateBackpressureSettings validates back-pressure configuration settings.
+func validateBackpressureSettings() []string {
+ var validationErrors []string
+
+ validationErrors = append(validationErrors, validateMaxPendingFiles()...)
+ validationErrors = append(validationErrors, validateMaxPendingWrites()...)
+ validationErrors = append(validationErrors, validateMaxMemoryUsage()...)
+ validationErrors = append(validationErrors, validateMemoryCheckInterval()...)
+
+ return validationErrors
+}
+
+// validateMaxPendingFiles validates backpressure.maxPendingFiles setting.
+func validateMaxPendingFiles() []string {
+ var validationErrors []string
+
+ if !viper.IsSet(shared.ConfigKeyBackpressureMaxPendingFiles) {
+ return validationErrors
+ }
+
+ maxPendingFiles := viper.GetInt(shared.ConfigKeyBackpressureMaxPendingFiles)
+ if maxPendingFiles < 1 {
+ validationErrors = append(
+ validationErrors,
+ fmt.Sprintf("backpressure.maxPendingFiles (%d) must be at least 1", maxPendingFiles),
+ )
+ }
+ if maxPendingFiles > 100000 {
+ validationErrors = append(
+ validationErrors,
+ fmt.Sprintf("backpressure.maxPendingFiles (%d) is unreasonably high (max 100000)", maxPendingFiles),
+ )
+ }
+
+ return validationErrors
+}
+
+// validateMaxPendingWrites validates backpressure.maxPendingWrites setting.
+func validateMaxPendingWrites() []string {
+ var validationErrors []string
+
+ if !viper.IsSet(shared.ConfigKeyBackpressureMaxPendingWrites) {
+ return validationErrors
+ }
+
+ maxPendingWrites := viper.GetInt(shared.ConfigKeyBackpressureMaxPendingWrites)
+ if maxPendingWrites < 1 {
+ validationErrors = append(
+ validationErrors,
+ fmt.Sprintf("backpressure.maxPendingWrites (%d) must be at least 1", maxPendingWrites),
+ )
+ }
+ if maxPendingWrites > 10000 {
+ validationErrors = append(
+ validationErrors,
+ fmt.Sprintf("backpressure.maxPendingWrites (%d) is unreasonably high (max 10000)", maxPendingWrites),
+ )
+ }
+
+ return validationErrors
+}
+
+// validateMaxMemoryUsage validates backpressure.maxMemoryUsage setting.
+func validateMaxMemoryUsage() []string {
+ var validationErrors []string
+
+ if !viper.IsSet(shared.ConfigKeyBackpressureMaxMemoryUsage) {
+ return validationErrors
+ }
+
+ maxMemoryUsage := viper.GetInt64(shared.ConfigKeyBackpressureMaxMemoryUsage)
+ minMemory := int64(shared.BytesPerMB) // 1MB minimum
+ maxMemory := int64(10 * shared.BytesPerGB) // 10GB maximum
+ if maxMemoryUsage < minMemory {
+ validationErrors = append(
+ validationErrors,
+ fmt.Sprintf("backpressure.maxMemoryUsage (%d) must be at least 1MB (%d bytes)", maxMemoryUsage, minMemory),
+ )
+ }
+ if maxMemoryUsage > maxMemory { // 10GB maximum
+ validationErrors = append(
+ validationErrors,
+ fmt.Sprintf("backpressure.maxMemoryUsage (%d) is unreasonably high (max 10GB)", maxMemoryUsage),
+ )
+ }
+
+ return validationErrors
+}
+
+// validateMemoryCheckInterval validates backpressure.memoryCheckInterval setting.
+func validateMemoryCheckInterval() []string {
+ var validationErrors []string
+
+ if !viper.IsSet(shared.ConfigKeyBackpressureMemoryCheckInt) {
+ return validationErrors
+ }
+
+ interval := viper.GetInt(shared.ConfigKeyBackpressureMemoryCheckInt)
+ if interval < 1 {
+ validationErrors = append(
+ validationErrors,
+ fmt.Sprintf("backpressure.memoryCheckInterval (%d) must be at least 1", interval),
+ )
+ }
+ if interval > 100000 {
+ validationErrors = append(
+ validationErrors,
+ fmt.Sprintf("backpressure.memoryCheckInterval (%d) is unreasonably high (max 100000)", interval),
+ )
+ }
+
+ return validationErrors
+}
+
+// validateResourceLimitSettings validates resource limit configuration settings.
+func validateResourceLimitSettings() []string {
+ var validationErrors []string
+
+ validationErrors = append(validationErrors, validateMaxFilesLimit()...)
+ validationErrors = append(validationErrors, validateMaxTotalSizeLimit()...)
+ validationErrors = append(validationErrors, validateTimeoutLimits()...)
+ validationErrors = append(validationErrors, validateConcurrencyLimits()...)
+ validationErrors = append(validationErrors, validateMemoryLimits()...)
+
+ return validationErrors
+}
+
+// validateMaxFilesLimit validates resourceLimits.maxFiles setting.
+func validateMaxFilesLimit() []string {
+ var validationErrors []string
+
+ if !viper.IsSet(shared.ConfigKeyResourceLimitsMaxFiles) {
+ return validationErrors
+ }
+
+ maxFiles := viper.GetInt(shared.ConfigKeyResourceLimitsMaxFiles)
+ if maxFiles < shared.ConfigMaxFilesMin {
+ validationErrors = append(
+ validationErrors,
+ fmt.Sprintf("resourceLimits.maxFiles (%d) must be at least %d", maxFiles, shared.ConfigMaxFilesMin),
+ )
+ }
+ if maxFiles > shared.ConfigMaxFilesMax {
+ validationErrors = append(
+ validationErrors,
+ fmt.Sprintf("resourceLimits.maxFiles (%d) exceeds maximum (%d)", maxFiles, shared.ConfigMaxFilesMax),
+ )
+ }
+
+ return validationErrors
+}
+
+// validateMaxTotalSizeLimit validates resourceLimits.maxTotalSize setting.
+func validateMaxTotalSizeLimit() []string {
+ var validationErrors []string
+
+ if !viper.IsSet(shared.ConfigKeyResourceLimitsMaxTotalSize) {
+ return validationErrors
+ }
+
+ maxTotalSize := viper.GetInt64(shared.ConfigKeyResourceLimitsMaxTotalSize)
+ minTotalSize := int64(shared.ConfigMaxTotalSizeMin)
+ maxTotalSizeLimit := int64(shared.ConfigMaxTotalSizeMax)
+ if maxTotalSize < minTotalSize {
+ validationErrors = append(
+ validationErrors,
+ fmt.Sprintf("resourceLimits.maxTotalSize (%d) must be at least %d", maxTotalSize, minTotalSize),
+ )
+ }
+ if maxTotalSize > maxTotalSizeLimit {
+ validationErrors = append(
+ validationErrors,
+ fmt.Sprintf("resourceLimits.maxTotalSize (%d) exceeds maximum (%d)", maxTotalSize, maxTotalSizeLimit),
+ )
+ }
+
+ return validationErrors
+}
+
+// validateTimeoutLimits validates timeout-related resource limit settings.
+func validateTimeoutLimits() []string {
+ var validationErrors []string
+
+ if viper.IsSet(shared.ConfigKeyResourceLimitsFileProcessingTO) {
+ timeout := viper.GetInt(shared.ConfigKeyResourceLimitsFileProcessingTO)
+ if timeout < shared.ConfigFileProcessingTimeoutSecMin {
+ validationErrors = append(
+ validationErrors,
+ fmt.Sprintf(
+ "resourceLimits.fileProcessingTimeoutSec (%d) must be at least %d",
+ timeout,
+ shared.ConfigFileProcessingTimeoutSecMin,
+ ),
+ )
+ }
+ if timeout > shared.ConfigFileProcessingTimeoutSecMax {
+ validationErrors = append(
+ validationErrors,
+ fmt.Sprintf(
+ "resourceLimits.fileProcessingTimeoutSec (%d) exceeds maximum (%d)",
+ timeout,
+ shared.ConfigFileProcessingTimeoutSecMax,
+ ),
+ )
+ }
+ }
+
+ if viper.IsSet(shared.ConfigKeyResourceLimitsOverallTO) {
+ timeout := viper.GetInt(shared.ConfigKeyResourceLimitsOverallTO)
+ minTimeout := shared.ConfigOverallTimeoutSecMin
+ maxTimeout := shared.ConfigOverallTimeoutSecMax
+ if timeout < minTimeout {
+ validationErrors = append(
+ validationErrors,
+ fmt.Sprintf("resourceLimits.overallTimeoutSec (%d) must be at least %d", timeout, minTimeout),
+ )
+ }
+ if timeout > maxTimeout {
+ validationErrors = append(
+ validationErrors,
+ fmt.Sprintf("resourceLimits.overallTimeoutSec (%d) exceeds maximum (%d)", timeout, maxTimeout),
+ )
+ }
+ }
+
+ return validationErrors
+}
+
+// validateConcurrencyLimits validates concurrency-related resource limit settings.
+func validateConcurrencyLimits() []string {
+ var validationErrors []string
+
+ if viper.IsSet(shared.ConfigKeyResourceLimitsMaxConcurrentReads) {
+ maxReads := viper.GetInt(shared.ConfigKeyResourceLimitsMaxConcurrentReads)
+ minReads := shared.ConfigMaxConcurrentReadsMin
+ maxReadsLimit := shared.ConfigMaxConcurrentReadsMax
+ if maxReads < minReads {
+ validationErrors = append(
+ validationErrors,
+ fmt.Sprintf("resourceLimits.maxConcurrentReads (%d) must be at least %d", maxReads, minReads),
+ )
+ }
+ if maxReads > maxReadsLimit {
+ validationErrors = append(
+ validationErrors,
+ fmt.Sprintf("resourceLimits.maxConcurrentReads (%d) exceeds maximum (%d)", maxReads, maxReadsLimit),
+ )
+ }
+ }
+
+ if viper.IsSet(shared.ConfigKeyResourceLimitsRateLimitFilesPerSec) {
+ rateLimit := viper.GetInt(shared.ConfigKeyResourceLimitsRateLimitFilesPerSec)
+ minRate := shared.ConfigRateLimitFilesPerSecMin
+ maxRate := shared.ConfigRateLimitFilesPerSecMax
+ if rateLimit < minRate {
+ validationErrors = append(
+ validationErrors,
+ fmt.Sprintf("resourceLimits.rateLimitFilesPerSec (%d) must be at least %d", rateLimit, minRate),
+ )
+ }
+ if rateLimit > maxRate {
+ validationErrors = append(
+ validationErrors,
+ fmt.Sprintf("resourceLimits.rateLimitFilesPerSec (%d) exceeds maximum (%d)", rateLimit, maxRate),
+ )
+ }
+ }
+
+ return validationErrors
+}
+
+// validateMemoryLimits validates memory-related resource limit settings.
+func validateMemoryLimits() []string {
+ var validationErrors []string
+
+ if !viper.IsSet(shared.ConfigKeyResourceLimitsHardMemoryLimitMB) {
+ return validationErrors
+ }
+
+ memLimit := viper.GetInt(shared.ConfigKeyResourceLimitsHardMemoryLimitMB)
+ minMemLimit := shared.ConfigHardMemoryLimitMBMin
+ maxMemLimit := shared.ConfigHardMemoryLimitMBMax
+ if memLimit < minMemLimit {
+ validationErrors = append(
+ validationErrors,
+ fmt.Sprintf("resourceLimits.hardMemoryLimitMB (%d) must be at least %d", memLimit, minMemLimit),
+ )
+ }
+ if memLimit > maxMemLimit {
+ validationErrors = append(
+ validationErrors,
+ fmt.Sprintf("resourceLimits.hardMemoryLimitMB (%d) exceeds maximum (%d)", memLimit, maxMemLimit),
+ )
+ }
+
+ return validationErrors
+}
+
+// ValidateFileSize checks if a file size is within the configured limit.
+func ValidateFileSize(size int64) error {
+ limit := FileSizeLimit()
+ if size > limit {
+ return shared.NewStructuredError(
+ shared.ErrorTypeValidation,
+ shared.CodeValidationSize,
+ fmt.Sprintf(shared.FileProcessingMsgSizeExceeds, size, limit),
+ "",
+ map[string]any{"file_size": size, "size_limit": limit},
+ )
+ }
+
return nil
}
// ValidateOutputFormat checks if an output format is valid.
func ValidateOutputFormat(format string) error {
if !IsValidFormat(format) {
- return gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeValidation,
- gibidiutils.CodeValidationFormat,
+ return shared.NewStructuredError(
+ shared.ErrorTypeValidation,
+ shared.CodeValidationFormat,
fmt.Sprintf("unsupported output format: %s (supported: json, yaml, markdown)", format),
"",
- map[string]interface{}{"format": format},
+ map[string]any{"format": format},
)
}
+
return nil
}
// ValidateConcurrency checks if a concurrency level is valid.
func ValidateConcurrency(concurrency int) error {
if concurrency < 1 {
- return gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeValidation,
- gibidiutils.CodeValidationFormat,
+ return shared.NewStructuredError(
+ shared.ErrorTypeValidation,
+ shared.CodeValidationFormat,
fmt.Sprintf("concurrency (%d) must be at least 1", concurrency),
"",
- map[string]interface{}{"concurrency": concurrency},
+ map[string]any{"concurrency": concurrency},
)
}
- if viper.IsSet("maxConcurrency") {
- maxConcurrency := GetMaxConcurrency()
+ if viper.IsSet(shared.ConfigKeyMaxConcurrency) {
+ maxConcurrency := MaxConcurrency()
if concurrency > maxConcurrency {
- return gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeValidation,
- gibidiutils.CodeValidationFormat,
+ return shared.NewStructuredError(
+ shared.ErrorTypeValidation,
+ shared.CodeValidationFormat,
fmt.Sprintf("concurrency (%d) exceeds maximum (%d)", concurrency, maxConcurrency),
"",
- map[string]interface{}{"concurrency": concurrency, "max_concurrency": maxConcurrency},
+ map[string]any{"concurrency": concurrency, "max_concurrency": maxConcurrency},
)
}
}
diff --git a/config/validation_helpers.go b/config/validation_helpers.go
new file mode 100644
index 0000000..8e5376b
--- /dev/null
+++ b/config/validation_helpers.go
@@ -0,0 +1,51 @@
+// Package config handles application configuration management.
+package config
+
+import (
+ "fmt"
+ "strings"
+)
+
+// validateEmptyElement checks if an element in a slice is empty after trimming whitespace.
+// Returns a formatted error message if empty, or empty string if valid.
+func validateEmptyElement(fieldPath, value string, index int) string {
+ value = strings.TrimSpace(value)
+ if value == "" {
+ return fmt.Sprintf("%s[%d] is empty", fieldPath, index)
+ }
+
+ return ""
+}
+
+// validateDotPrefix ensures an extension starts with a dot.
+// Returns a formatted error message if missing dot prefix, or empty string if valid.
+func validateDotPrefix(fieldPath, value string, index int) string {
+ value = strings.TrimSpace(value)
+ if !strings.HasPrefix(value, ".") {
+ return fmt.Sprintf("%s[%d] (%s) must start with a dot", fieldPath, index, value)
+ }
+
+ return ""
+}
+
+// validateDotPrefixMap ensures a map key (extension) starts with a dot.
+// Returns a formatted error message if missing dot prefix, or empty string if valid.
+func validateDotPrefixMap(fieldPath, key string) string {
+ key = strings.TrimSpace(key)
+ if !strings.HasPrefix(key, ".") {
+ return fmt.Sprintf("%s extension (%s) must start with a dot", fieldPath, key)
+ }
+
+ return ""
+}
+
+// validateEmptyMapValue checks if a map value is empty after trimming whitespace.
+// Returns a formatted error message if empty, or empty string if valid.
+func validateEmptyMapValue(fieldPath, key, value string) string {
+ value = strings.TrimSpace(value)
+ if value == "" {
+ return fmt.Sprintf("%s[%s] has empty language value", fieldPath, key)
+ }
+
+ return ""
+}
diff --git a/config/validation_test.go b/config/validation_test.go
index 13bedc8..ef452e3 100644
--- a/config/validation_test.go
+++ b/config/validation_test.go
@@ -8,44 +8,44 @@ import (
"github.com/spf13/viper"
"github.com/ivuorinen/gibidify/config"
- "github.com/ivuorinen/gibidify/gibidiutils"
+ "github.com/ivuorinen/gibidify/shared"
)
// TestValidateConfig tests the configuration validation functionality.
func TestValidateConfig(t *testing.T) {
tests := []struct {
name string
- config map[string]interface{}
+ config map[string]any
wantErr bool
errContains string
}{
{
name: "valid default config",
- config: map[string]interface{}{
- "fileSizeLimit": config.DefaultFileSizeLimit,
+ config: map[string]any{
+ "fileSizeLimit": shared.ConfigFileSizeLimitDefault,
"ignoreDirectories": []string{"node_modules", ".git"},
},
wantErr: false,
},
{
name: "file size limit too small",
- config: map[string]interface{}{
- "fileSizeLimit": config.MinFileSizeLimit - 1,
+ config: map[string]any{
+ "fileSizeLimit": shared.ConfigFileSizeLimitMin - 1,
},
wantErr: true,
errContains: "fileSizeLimit",
},
{
name: "file size limit too large",
- config: map[string]interface{}{
- "fileSizeLimit": config.MaxFileSizeLimit + 1,
+ config: map[string]any{
+ "fileSizeLimit": shared.ConfigFileSizeLimitMax + 1,
},
wantErr: true,
errContains: "fileSizeLimit",
},
{
name: "empty ignore directory",
- config: map[string]interface{}{
+ config: map[string]any{
"ignoreDirectories": []string{"node_modules", "", ".git"},
},
wantErr: true,
@@ -53,7 +53,7 @@ func TestValidateConfig(t *testing.T) {
},
{
name: "ignore directory with path separator",
- config: map[string]interface{}{
+ config: map[string]any{
"ignoreDirectories": []string{"node_modules", "src/build", ".git"},
},
wantErr: true,
@@ -61,7 +61,7 @@ func TestValidateConfig(t *testing.T) {
},
{
name: "invalid supported format",
- config: map[string]interface{}{
+ config: map[string]any{
"supportedFormats": []string{"json", "xml", "yaml"},
},
wantErr: true,
@@ -69,7 +69,7 @@ func TestValidateConfig(t *testing.T) {
},
{
name: "invalid max concurrency",
- config: map[string]interface{}{
+ config: map[string]any{
"maxConcurrency": 0,
},
wantErr: true,
@@ -77,8 +77,8 @@ func TestValidateConfig(t *testing.T) {
},
{
name: "valid comprehensive config",
- config: map[string]interface{}{
- "fileSizeLimit": config.DefaultFileSizeLimit,
+ config: map[string]any{
+ "fileSizeLimit": shared.ConfigFileSizeLimitDefault,
"ignoreDirectories": []string{"node_modules", ".git", ".vscode"},
"supportedFormats": []string{"json", "yaml", "markdown"},
"maxConcurrency": 8,
@@ -89,157 +89,170 @@ func TestValidateConfig(t *testing.T) {
}
for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- // Reset viper for each test
- viper.Reset()
+ t.Run(
+ tt.name, func(t *testing.T) {
+ // Reset viper for each test
+ viper.Reset()
- // Set test configuration
- for key, value := range tt.config {
- viper.Set(key, value)
- }
-
- // Load defaults for missing values
- config.LoadConfig()
-
- err := config.ValidateConfig()
-
- if tt.wantErr {
- if err == nil {
- t.Errorf("Expected error but got none")
- return
- }
- if tt.errContains != "" && !strings.Contains(err.Error(), tt.errContains) {
- t.Errorf("Expected error to contain %q, got %q", tt.errContains, err.Error())
+ // Set test configuration
+ for key, value := range tt.config {
+ viper.Set(key, value)
}
- // Check that it's a structured error
- var structErr *gibidiutils.StructuredError
- if !errorAs(err, &structErr) {
- t.Errorf("Expected structured error, got %T", err)
- return
+ // Set defaults for missing values without touching disk
+ config.SetDefaultConfig()
+
+ err := config.ValidateConfig()
+
+ if tt.wantErr {
+ validateExpectedError(t, err, tt.errContains)
+ } else if err != nil {
+ t.Errorf("Expected no error but got: %v", err)
}
- if structErr.Type != gibidiutils.ErrorTypeConfiguration {
- t.Errorf("Expected error type %v, got %v", gibidiutils.ErrorTypeConfiguration, structErr.Type)
- }
- if structErr.Code != gibidiutils.CodeConfigValidation {
- t.Errorf("Expected error code %v, got %v", gibidiutils.CodeConfigValidation, structErr.Code)
- }
- } else if err != nil {
- t.Errorf("Expected no error but got: %v", err)
- }
- })
+ },
+ )
}
}
-// TestValidationFunctions tests individual validation functions.
-func TestValidationFunctions(t *testing.T) {
- t.Run("IsValidFormat", func(t *testing.T) {
- tests := []struct {
- format string
- valid bool
- }{
- {"json", true},
- {"yaml", true},
- {"markdown", true},
- {"JSON", true},
- {"xml", false},
- {"txt", false},
- {"", false},
- {" json ", true},
+// TestIsValidFormat tests the IsValidFormat function.
+func TestIsValidFormat(t *testing.T) {
+ tests := []struct {
+ format string
+ valid bool
+ }{
+ {"json", true},
+ {"yaml", true},
+ {"markdown", true},
+ {"JSON", true},
+ {"xml", false},
+ {"txt", false},
+ {"", false},
+ {" json ", true},
+ }
+
+ for _, tt := range tests {
+ result := config.IsValidFormat(tt.format)
+ if result != tt.valid {
+ t.Errorf("IsValidFormat(%q) = %v, want %v", tt.format, result, tt.valid)
}
-
- for _, tt := range tests {
- result := config.IsValidFormat(tt.format)
- if result != tt.valid {
- t.Errorf("IsValidFormat(%q) = %v, want %v", tt.format, result, tt.valid)
- }
- }
- })
-
- t.Run("ValidateFileSize", func(t *testing.T) {
- viper.Reset()
- viper.Set("fileSizeLimit", config.DefaultFileSizeLimit)
-
- tests := []struct {
- name string
- size int64
- wantErr bool
- }{
- {"size within limit", config.DefaultFileSizeLimit - 1, false},
- {"size at limit", config.DefaultFileSizeLimit, false},
- {"size exceeds limit", config.DefaultFileSizeLimit + 1, true},
- {"zero size", 0, false},
- }
-
- for _, tt := range tests {
- err := config.ValidateFileSize(tt.size)
- if (err != nil) != tt.wantErr {
- t.Errorf("%s: ValidateFileSize(%d) error = %v, wantErr %v", tt.name, tt.size, err, tt.wantErr)
- }
- }
- })
-
- t.Run("ValidateOutputFormat", func(t *testing.T) {
- tests := []struct {
- format string
- wantErr bool
- }{
- {"json", false},
- {"yaml", false},
- {"markdown", false},
- {"xml", true},
- {"txt", true},
- {"", true},
- }
-
- for _, tt := range tests {
- err := config.ValidateOutputFormat(tt.format)
- if (err != nil) != tt.wantErr {
- t.Errorf("ValidateOutputFormat(%q) error = %v, wantErr %v", tt.format, err, tt.wantErr)
- }
- }
- })
-
- t.Run("ValidateConcurrency", func(t *testing.T) {
- tests := []struct {
- name string
- concurrency int
- maxConcurrency int
- setMax bool
- wantErr bool
- }{
- {"valid concurrency", 4, 0, false, false},
- {"minimum concurrency", 1, 0, false, false},
- {"zero concurrency", 0, 0, false, true},
- {"negative concurrency", -1, 0, false, true},
- {"concurrency within max", 4, 8, true, false},
- {"concurrency exceeds max", 16, 8, true, true},
- }
-
- for _, tt := range tests {
- viper.Reset()
- if tt.setMax {
- viper.Set("maxConcurrency", tt.maxConcurrency)
- }
-
- err := config.ValidateConcurrency(tt.concurrency)
- if (err != nil) != tt.wantErr {
- t.Errorf("%s: ValidateConcurrency(%d) error = %v, wantErr %v", tt.name, tt.concurrency, err, tt.wantErr)
- }
- }
- })
+ }
}
-func errorAs(err error, target interface{}) bool {
+// TestValidateFileSize tests the ValidateFileSize function.
+func TestValidateFileSize(t *testing.T) {
+ viper.Reset()
+ viper.Set("fileSizeLimit", shared.ConfigFileSizeLimitDefault)
+
+ tests := []struct {
+ name string
+ size int64
+ wantErr bool
+ }{
+ {"size within limit", shared.ConfigFileSizeLimitDefault - 1, false},
+ {"size at limit", shared.ConfigFileSizeLimitDefault, false},
+ {"size exceeds limit", shared.ConfigFileSizeLimitDefault + 1, true},
+ {"zero size", 0, false},
+ }
+
+ for _, tt := range tests {
+ err := config.ValidateFileSize(tt.size)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("%s: ValidateFileSize(%d) error = %v, wantErr %v", tt.name, tt.size, err, tt.wantErr)
+ }
+ }
+}
+
+// TestValidateOutputFormat tests the ValidateOutputFormat function.
+func TestValidateOutputFormat(t *testing.T) {
+ tests := []struct {
+ format string
+ wantErr bool
+ }{
+ {"json", false},
+ {"yaml", false},
+ {"markdown", false},
+ {"xml", true},
+ {"txt", true},
+ {"", true},
+ }
+
+ for _, tt := range tests {
+ err := config.ValidateOutputFormat(tt.format)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("ValidateOutputFormat(%q) error = %v, wantErr %v", tt.format, err, tt.wantErr)
+ }
+ }
+}
+
+// TestValidateConcurrency tests the ValidateConcurrency function.
+func TestValidateConcurrency(t *testing.T) {
+ tests := []struct {
+ name string
+ concurrency int
+ maxConcurrency int
+ setMax bool
+ wantErr bool
+ }{
+ {"valid concurrency", 4, 0, false, false},
+ {"minimum concurrency", 1, 0, false, false},
+ {"zero concurrency", 0, 0, false, true},
+ {"negative concurrency", -1, 0, false, true},
+ {"concurrency within max", 4, 8, true, false},
+ {"concurrency exceeds max", 16, 8, true, true},
+ }
+
+ for _, tt := range tests {
+ viper.Reset()
+ if tt.setMax {
+ viper.Set("maxConcurrency", tt.maxConcurrency)
+ }
+
+ err := config.ValidateConcurrency(tt.concurrency)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("%s: ValidateConcurrency(%d) error = %v, wantErr %v", tt.name, tt.concurrency, err, tt.wantErr)
+ }
+ }
+}
+
+// validateExpectedError validates that an error occurred and matches expectations.
+func validateExpectedError(t *testing.T, err error, errContains string) {
+ t.Helper()
+ if err == nil {
+ t.Error(shared.TestMsgExpectedError)
+
+ return
+ }
+ if errContains != "" && !strings.Contains(err.Error(), errContains) {
+ t.Errorf("Expected error to contain %q, got %q", errContains, err.Error())
+ }
+
+ // Check that it's a structured error
+ var structErr *shared.StructuredError
+ if !errorAs(err, &structErr) {
+ t.Errorf("Expected structured error, got %T", err)
+
+ return
+ }
+ if structErr.Type != shared.ErrorTypeConfiguration {
+ t.Errorf("Expected error type %v, got %v", shared.ErrorTypeConfiguration, structErr.Type)
+ }
+ if structErr.Code != shared.CodeConfigValidation {
+ t.Errorf("Expected error code %v, got %v", shared.CodeConfigValidation, structErr.Code)
+ }
+}
+
+func errorAs(err error, target any) bool {
if err == nil {
return false
}
- var structErr *gibidiutils.StructuredError
+ structErr := &shared.StructuredError{}
if errors.As(err, &structErr) {
- if ptr, ok := target.(**gibidiutils.StructuredError); ok {
+ if ptr, ok := target.(**shared.StructuredError); ok {
*ptr = structErr
+
return true
}
}
+
return false
}
diff --git a/examples/basic-usage.md b/examples/basic-usage.md
new file mode 100644
index 0000000..53ad76c
--- /dev/null
+++ b/examples/basic-usage.md
@@ -0,0 +1,219 @@
+# Basic Usage Examples
+
+This directory contains practical examples of how to use gibidify for various use cases.
+
+## Simple Code Aggregation
+
+The most basic use case - aggregate all code files from a project into a single output:
+
+```bash
+# Aggregate all files from current directory to markdown
+gibidify -source . -format markdown -destination output.md
+
+# Aggregate specific directory to JSON
+gibidify -source ./src -format json -destination code-dump.json
+
+# Aggregate with custom worker count
+gibidify -source ./project -format yaml -destination project.yaml -concurrency 8
+```
+
+## With Configuration File
+
+For repeatable processing with custom settings:
+
+1. Copy the configuration example:
+```bash
+cp config.example.yaml ~/.config/gibidify/config.yaml
+```
+
+2. Edit the configuration file to your needs, then run:
+```bash
+gibidify -source ./my-project
+```
+
+## Output Formats
+
+### JSON Output
+Best for programmatic processing and data analysis:
+
+```bash
+gibidify -source ./src -format json -destination api-code.json
+```
+
+Example JSON structure:
+```json
+{
+ "files": [
+ {
+ "path": "src/main.go",
+ "content": "package main...",
+ "language": "go",
+ "size": 1024
+ }
+ ],
+ "metadata": {
+ "total_files": 15,
+ "total_size": 45678,
+ "processing_time": "1.2s"
+ }
+}
+```
+
+### Markdown Output
+Great for documentation and code reviews:
+
+```bash
+gibidify -source ./src -format markdown -destination code-review.md
+```
+
+### YAML Output
+Structured and human-readable:
+
+```bash
+gibidify -source ./config -format yaml -destination config-dump.yaml
+```
+
+## Advanced Usage Examples
+
+### Large Codebase Processing
+For processing large projects with performance optimizations:
+
+```bash
+gibidify -source ./large-project \
+ -format json \
+ -destination large-output.json \
+ -concurrency 16 \
+ --verbose
+```
+
+### Memory-Conscious Processing
+For systems with limited memory:
+
+```bash
+gibidify -source ./project \
+ -format markdown \
+ -destination output.md \
+ -concurrency 4
+```
+
+### Filtered Processing
+Process only specific file types (when configured):
+
+```bash
+# Configure file patterns in config.yaml
+filePatterns:
+ - "*.go"
+ - "*.py"
+ - "*.js"
+
+# Then run
+gibidify -source ./mixed-project -destination filtered.json
+```
+
+### CI/CD Integration
+For automated documentation generation:
+
+```bash
+# In your CI pipeline
+gibidify -source . \
+ -format markdown \
+ -destination docs/codebase.md \
+ --no-colors \
+ --no-progress \
+ -concurrency 2
+```
+
+## Error Handling
+
+### Graceful Failure Handling
+The tool handles common issues gracefully:
+
+```bash
+# This will fail gracefully if source doesn't exist
+gibidify -source ./nonexistent -destination out.json
+
+# This will warn about permission issues but continue
+gibidify -source ./restricted-dir -destination out.md --verbose
+```
+
+### Resource Limits
+Configure resource limits in your config file:
+
+```yaml
+resourceLimits:
+ enabled: true
+ maxFiles: 5000
+ maxTotalSize: 1073741824 # 1GB
+ fileProcessingTimeoutSec: 30
+ overallTimeoutSec: 1800 # 30 minutes
+ hardMemoryLimitMB: 512
+```
+
+## Performance Tips
+
+1. **Adjust Concurrency**: Start with number of CPU cores, adjust based on I/O vs CPU bound work
+2. **Use Appropriate Format**: JSON is fastest, Markdown has more overhead
+3. **Configure File Limits**: Set reasonable limits in config.yaml for your use case
+4. **Monitor Memory**: Use `--verbose` to see memory usage during processing
+5. **Use Progress Indicators**: Enable progress bars for long-running operations
+
+## Integration Examples
+
+### With Git Hooks
+Create a pre-commit hook to generate code documentation:
+
+```bash
+#!/bin/sh
+# .git/hooks/pre-commit
+gibidify -source . -format markdown -destination docs/current-code.md
+git add docs/current-code.md
+```
+
+### With Make
+Add to your Makefile:
+
+```makefile
+.PHONY: code-dump
+code-dump:
+ gibidify -source ./src -format json -destination dist/codebase.json
+
+.PHONY: docs
+docs:
+ gibidify -source . -format markdown -destination docs/codebase.md
+```
+
+### Docker Usage
+```dockerfile
+FROM golang:1.25-alpine
+RUN go install github.com/ivuorinen/gibidify@latest
+WORKDIR /workspace
+COPY . .
+RUN gibidify -source . -format json -destination /output/codebase.json
+```
+
+## Common Use Cases
+
+### 1. Code Review Preparation
+```bash
+gibidify -source ./feature-branch -format markdown -destination review.md
+```
+
+### 2. AI Code Analysis
+```bash
+gibidify -source ./src -format json -destination ai-input.json
+```
+
+### 3. Documentation Generation
+```bash
+gibidify -source ./lib -format markdown -destination api-docs.md
+```
+
+### 4. Backup Creation
+```bash
+gibidify -source ./project -format yaml -destination backup-$(date +%Y%m%d).yaml
+```
+
+### 5. Code Migration Prep
+```bash
+gibidify -source ./legacy-code -format json -destination migration-analysis.json
+```
diff --git a/examples/configuration-examples.md b/examples/configuration-examples.md
new file mode 100644
index 0000000..f1e572e
--- /dev/null
+++ b/examples/configuration-examples.md
@@ -0,0 +1,469 @@
+# Configuration Examples
+
+This document provides practical configuration examples for different use cases.
+
+## Basic Configuration
+
+Create `~/.config/gibidify/config.yaml`:
+
+```yaml
+# Basic setup for most projects
+fileSizeLimit: 5242880 # 5MB per file
+maxConcurrency: 8
+
+ignoreDirectories:
+ - vendor
+ - node_modules
+ - .git
+ - dist
+ - target
+
+# Enable file type detection
+fileTypes:
+ enabled: true
+```
+
+## Development Environment Configuration
+
+Optimized for active development with fast feedback:
+
+```yaml
+# ~/.config/gibidify/config.yaml
+fileSizeLimit: 1048576 # 1MB - smaller files for faster processing
+
+ignoreDirectories:
+ - vendor
+ - node_modules
+ - .git
+ - dist
+ - build
+ - tmp
+ - cache
+ - .vscode
+ - .idea
+
+# Conservative resource limits for development
+resourceLimits:
+ enabled: true
+ maxFiles: 1000
+ maxTotalSize: 104857600 # 100MB
+ fileProcessingTimeoutSec: 10
+ overallTimeoutSec: 300 # 5 minutes
+ maxConcurrentReads: 4
+ hardMemoryLimitMB: 256
+
+# Fast backpressure for responsive development
+backpressure:
+ enabled: true
+ maxPendingFiles: 500
+ maxPendingWrites: 50
+ maxMemoryUsage: 52428800 # 50MB
+ memoryCheckInterval: 100
+
+# Simple output for quick reviews
+output:
+ metadata:
+ includeStats: true
+ includeTimestamp: true
+```
+
+## Production/CI Configuration
+
+High-performance setup for automated processing:
+
+```yaml
+# Production configuration
+fileSizeLimit: 10485760 # 10MB per file
+maxConcurrency: 16
+
+ignoreDirectories:
+ - vendor
+ - node_modules
+ - .git
+ - dist
+ - build
+ - target
+ - tmp
+ - cache
+ - coverage
+ - .nyc_output
+ - __pycache__
+
+# High-performance resource limits
+resourceLimits:
+ enabled: true
+ maxFiles: 50000
+ maxTotalSize: 10737418240 # 10GB
+ fileProcessingTimeoutSec: 60
+ overallTimeoutSec: 7200 # 2 hours
+ maxConcurrentReads: 20
+ hardMemoryLimitMB: 2048
+
+# High-throughput backpressure
+backpressure:
+ enabled: true
+ maxPendingFiles: 5000
+ maxPendingWrites: 500
+ maxMemoryUsage: 1073741824 # 1GB
+ memoryCheckInterval: 1000
+
+# Comprehensive output for analysis
+output:
+ metadata:
+ includeStats: true
+ includeTimestamp: true
+ includeFileCount: true
+ includeSourcePath: true
+ includeFileTypes: true
+ includeProcessingTime: true
+ includeTotalSize: true
+ includeMetrics: true
+```
+
+## Security-Focused Configuration
+
+Restrictive settings for untrusted input:
+
+```yaml
+# Security-first configuration
+fileSizeLimit: 1048576 # 1MB maximum
+
+ignoreDirectories:
+ - "**/.*" # All hidden directories
+ - vendor
+ - node_modules
+ - tmp
+ - temp
+ - cache
+
+# Strict resource limits
+resourceLimits:
+ enabled: true
+ maxFiles: 100 # Very restrictive
+ maxTotalSize: 10485760 # 10MB total
+ fileProcessingTimeoutSec: 5
+ overallTimeoutSec: 60 # 1 minute max
+ maxConcurrentReads: 2
+ rateLimitFilesPerSec: 10 # Rate limiting enabled
+ hardMemoryLimitMB: 128 # Low memory limit
+
+# Conservative backpressure
+backpressure:
+ enabled: true
+ maxPendingFiles: 50
+ maxPendingWrites: 10
+ maxMemoryUsage: 10485760 # 10MB
+ memoryCheckInterval: 10 # Frequent checks
+
+# Minimal file type detection
+fileTypes:
+ enabled: true
+ # Disable potentially risky file types
+ disabledLanguageExtensions:
+ - .bat
+ - .cmd
+ - .ps1
+ - .sh
+ disabledBinaryExtensions:
+ - .exe
+ - .dll
+ - .so
+```
+
+## Language-Specific Configuration
+
+### Go Projects
+```yaml
+fileSizeLimit: 5242880
+
+ignoreDirectories:
+ - vendor
+ - .git
+ - bin
+ - pkg
+
+fileTypes:
+ enabled: true
+ customLanguages:
+ .mod: go-mod
+ .sum: go-sum
+
+filePatterns:
+ - "*.go"
+ - "go.mod"
+ - "go.sum"
+ - "*.md"
+```
+
+### JavaScript/Node.js Projects
+```yaml
+fileSizeLimit: 2097152 # 2MB
+
+ignoreDirectories:
+ - node_modules
+ - .git
+ - dist
+ - build
+ - coverage
+ - .nyc_output
+
+fileTypes:
+ enabled: true
+ customLanguages:
+ .vue: vue
+ .svelte: svelte
+ .astro: astro
+
+filePatterns:
+ - "*.js"
+ - "*.ts"
+ - "*.jsx"
+ - "*.tsx"
+ - "*.vue"
+ - "*.json"
+ - "*.md"
+```
+
+### Python Projects
+```yaml
+fileSizeLimit: 5242880
+
+ignoreDirectories:
+ - .git
+ - __pycache__
+ - .pytest_cache
+ - venv
+ - env
+ - .env
+ - dist
+ - build
+ - .tox
+
+fileTypes:
+ enabled: true
+ customLanguages:
+ .pyi: python-interface
+ .ipynb: jupyter-notebook
+
+filePatterns:
+ - "*.py"
+ - "*.pyi"
+ - "requirements*.txt"
+ - "*.toml"
+ - "*.cfg"
+ - "*.ini"
+ - "*.md"
+```
+
+## Output Format Configurations
+
+### Detailed Markdown Output
+```yaml
+output:
+ template: "detailed"
+
+ metadata:
+ includeStats: true
+ includeTimestamp: true
+ includeFileCount: true
+ includeSourcePath: true
+ includeFileTypes: true
+ includeProcessingTime: true
+
+ markdown:
+ useCodeBlocks: true
+ includeLanguage: true
+ headerLevel: 2
+ tableOfContents: true
+ syntaxHighlighting: true
+ lineNumbers: true
+ maxLineLength: 120
+
+ variables:
+ project_name: "My Project"
+ author: "Development Team"
+ version: "1.0.0"
+```
+
+### Compact JSON Output
+```yaml
+output:
+ template: "minimal"
+
+ metadata:
+ includeStats: true
+ includeFileCount: true
+```
+
+### Custom Template Output
+```yaml
+output:
+ template: "custom"
+
+ custom:
+ header: |
+ # {{ .ProjectName }} Code Dump
+ Generated: {{ .Timestamp }}
+ Total Files: {{ .FileCount }}
+
+ footer: |
+ ---
+ Processing completed in {{ .ProcessingTime }}
+
+ fileHeader: |
+ ## {{ .Path }}
+ Language: {{ .Language }} | Size: {{ .Size }} bytes
+
+ fileFooter: ""
+
+ variables:
+ project_name: "Custom Project"
+```
+
+## Environment-Specific Configurations
+
+### Docker Container
+```yaml
+# Optimized for containerized environments
+fileSizeLimit: 5242880
+maxConcurrency: 4 # Conservative for containers
+
+resourceLimits:
+ enabled: true
+ hardMemoryLimitMB: 512
+ maxFiles: 5000
+ overallTimeoutSec: 1800
+
+backpressure:
+ enabled: true
+ maxMemoryUsage: 268435456 # 256MB
+```
+
+### GitHub Actions
+```yaml
+# CI/CD optimized configuration
+fileSizeLimit: 2097152
+maxConcurrency: 2 # Conservative for shared runners
+
+ignoreDirectories:
+ - .git
+ - .github
+ - node_modules
+ - vendor
+ - dist
+ - build
+
+resourceLimits:
+ enabled: true
+ maxFiles: 2000
+ overallTimeoutSec: 900 # 15 minutes
+ hardMemoryLimitMB: 1024
+```
+
+### Local Development
+```yaml
+# Developer-friendly settings
+fileSizeLimit: 10485760 # 10MB
+maxConcurrency: 8
+
+# Show progress and verbose output
+output:
+ metadata:
+ includeStats: true
+ includeTimestamp: true
+ includeProcessingTime: true
+ includeMetrics: true
+
+ markdown:
+ useCodeBlocks: true
+ includeLanguage: true
+ syntaxHighlighting: true
+```
+
+## Template Examples
+
+### Custom API Documentation Template
+```yaml
+output:
+ template: "custom"
+
+ custom:
+ header: |
+ # {{ .Variables.api_name }} API Documentation
+ Version: {{ .Variables.version }}
+ Generated: {{ .Timestamp }}
+
+ ## Overview
+ This document contains the complete source code for the {{ .Variables.api_name }} API.
+
+ ## Statistics
+ - Total Files: {{ .FileCount }}
+ - Total Size: {{ .TotalSize | formatSize }}
+ - Processing Time: {{ .ProcessingTime }}
+
+ ---
+
+ fileHeader: |
+ ### {{ .Path }}
+
+ **Type:** {{ .Language }}
+ **Size:** {{ .Size | formatSize }}
+
+ ```{{ .Language }}
+
+ fileFooter: |
+ ```
+
+ ---
+
+ footer: |
+ ## Summary
+
+ Documentation generated with [gibidify](https://github.com/ivuorinen/gibidify)
+
+ variables:
+ api_name: "My API"
+ version: "v1.2.3"
+```
+
+### Code Review Template
+```yaml
+output:
+ template: "custom"
+
+ custom:
+ header: |
+ # Code Review: {{ .Variables.pr_title }}
+
+ **PR Number:** #{{ .Variables.pr_number }}
+ **Author:** {{ .Variables.author }}
+ **Date:** {{ .Timestamp }}
+
+ ## Files Changed ({{ .FileCount }})
+
+ fileHeader: |
+ ## 📄 {{ .Path }}
+
+
+ {{ .Language | upper }} • {{ .Size | formatSize }}
+
+ ```{{ .Language }}
+
+ fileFooter: |
+ ```
+
+
+
+ footer: |
+ ---
+
+ **Review Summary:**
+ - Files reviewed: {{ .FileCount }}
+ - Total size: {{ .TotalSize | formatSize }}
+ - Generated in: {{ .ProcessingTime }}
+
+ variables:
+ pr_title: "Feature Implementation"
+ pr_number: "123"
+ author: "developer@example.com"
+```
diff --git a/fileproc/backpressure.go b/fileproc/backpressure.go
index 4426424..07a02ce 100644
--- a/fileproc/backpressure.go
+++ b/fileproc/backpressure.go
@@ -3,16 +3,13 @@ package fileproc
import (
"context"
- "math"
"runtime"
"sync"
"sync/atomic"
"time"
- "github.com/sirupsen/logrus"
-
"github.com/ivuorinen/gibidify/config"
- "github.com/ivuorinen/gibidify/gibidiutils"
+ "github.com/ivuorinen/gibidify/shared"
)
// BackpressureManager manages memory usage and applies back-pressure when needed.
@@ -31,11 +28,11 @@ type BackpressureManager struct {
// NewBackpressureManager creates a new back-pressure manager with configuration.
func NewBackpressureManager() *BackpressureManager {
return &BackpressureManager{
- enabled: config.GetBackpressureEnabled(),
- maxMemoryUsage: config.GetMaxMemoryUsage(),
- memoryCheckInterval: config.GetMemoryCheckInterval(),
- maxPendingFiles: config.GetMaxPendingFiles(),
- maxPendingWrites: config.GetMaxPendingWrites(),
+ enabled: config.BackpressureEnabled(),
+ maxMemoryUsage: config.MaxMemoryUsage(),
+ memoryCheckInterval: config.MemoryCheckInterval(),
+ maxPendingFiles: config.MaxPendingFiles(),
+ maxPendingWrites: config.MaxPendingWrites(),
lastMemoryCheck: time.Now(),
}
}
@@ -45,38 +42,52 @@ func (bp *BackpressureManager) CreateChannels() (chan string, chan WriteRequest)
var fileCh chan string
var writeCh chan WriteRequest
+ logger := shared.GetLogger()
if bp.enabled {
// Use buffered channels with configured limits
fileCh = make(chan string, bp.maxPendingFiles)
writeCh = make(chan WriteRequest, bp.maxPendingWrites)
- logrus.Debugf("Created buffered channels: files=%d, writes=%d", bp.maxPendingFiles, bp.maxPendingWrites)
+ logger.Debugf("Created buffered channels: files=%d, writes=%d", bp.maxPendingFiles, bp.maxPendingWrites)
} else {
// Use unbuffered channels (default behavior)
fileCh = make(chan string)
writeCh = make(chan WriteRequest)
- logrus.Debug("Created unbuffered channels (back-pressure disabled)")
+ logger.Debug("Created unbuffered channels (back-pressure disabled)")
}
return fileCh, writeCh
}
// ShouldApplyBackpressure checks if back-pressure should be applied.
-func (bp *BackpressureManager) ShouldApplyBackpressure(_ context.Context) bool {
+func (bp *BackpressureManager) ShouldApplyBackpressure(ctx context.Context) bool {
+ // Check for context cancellation first
+ select {
+ case <-ctx.Done():
+ return false // No need for backpressure if canceled
+ default:
+ }
+
if !bp.enabled {
return false
}
// Check if we should evaluate memory usage
filesProcessed := atomic.AddInt64(&bp.filesProcessed, 1)
- // Avoid divide by zero - if interval is 0, check every file
- if bp.memoryCheckInterval > 0 && int(filesProcessed)%bp.memoryCheckInterval != 0 {
+
+ // Guard against zero or negative interval to avoid modulo-by-zero panic
+ interval := bp.memoryCheckInterval
+ if interval <= 0 {
+ interval = 1
+ }
+
+ if int(filesProcessed)%interval != 0 {
return false
}
// Get current memory usage
var m runtime.MemStats
runtime.ReadMemStats(&m)
- currentMemory := gibidiutils.SafeUint64ToInt64WithDefault(m.Alloc, math.MaxInt64)
+ currentMemory := shared.SafeUint64ToInt64WithDefault(m.Alloc, 0)
bp.mu.Lock()
defer bp.mu.Unlock()
@@ -84,18 +95,22 @@ func (bp *BackpressureManager) ShouldApplyBackpressure(_ context.Context) bool {
bp.lastMemoryCheck = time.Now()
// Check if we're over the memory limit
+ logger := shared.GetLogger()
if currentMemory > bp.maxMemoryUsage {
if !bp.memoryWarningLogged {
- logrus.Warnf("Memory usage (%d bytes) exceeds limit (%d bytes), applying back-pressure",
- currentMemory, bp.maxMemoryUsage)
+ logger.Warnf(
+ "Memory usage (%d bytes) exceeds limit (%d bytes), applying back-pressure",
+ currentMemory, bp.maxMemoryUsage,
+ )
bp.memoryWarningLogged = true
}
+
return true
}
// Reset warning flag if we're back under the limit
if bp.memoryWarningLogged && currentMemory < bp.maxMemoryUsage*8/10 { // 80% of limit
- logrus.Infof("Memory usage normalized (%d bytes), removing back-pressure", currentMemory)
+ logger.Infof("Memory usage normalized (%d bytes), removing back-pressure", currentMemory)
bp.memoryWarningLogged = false
}
@@ -108,14 +123,6 @@ func (bp *BackpressureManager) ApplyBackpressure(ctx context.Context) {
return
}
- // Check for context cancellation before doing expensive operations
- select {
- case <-ctx.Done():
- return
- default:
- // Continue with backpressure logic
- }
-
// Force garbage collection to free up memory
runtime.GC()
@@ -130,11 +137,12 @@ func (bp *BackpressureManager) ApplyBackpressure(ctx context.Context) {
// Log memory usage after GC
var m runtime.MemStats
runtime.ReadMemStats(&m)
- logrus.Debugf("Applied back-pressure: memory after GC = %d bytes", m.Alloc)
+ logger := shared.GetLogger()
+ logger.Debugf("Applied back-pressure: memory after GC = %d bytes", m.Alloc)
}
-// GetStats returns current back-pressure statistics.
-func (bp *BackpressureManager) GetStats() BackpressureStats {
+// Stats returns current back-pressure statistics.
+func (bp *BackpressureManager) Stats() BackpressureStats {
bp.mu.RLock()
defer bp.mu.RUnlock()
@@ -144,7 +152,7 @@ func (bp *BackpressureManager) GetStats() BackpressureStats {
return BackpressureStats{
Enabled: bp.enabled,
FilesProcessed: atomic.LoadInt64(&bp.filesProcessed),
- CurrentMemoryUsage: gibidiutils.SafeUint64ToInt64WithDefault(m.Alloc, math.MaxInt64),
+ CurrentMemoryUsage: shared.SafeUint64ToInt64WithDefault(m.Alloc, 0),
MaxMemoryUsage: bp.maxMemoryUsage,
MemoryWarningActive: bp.memoryWarningLogged,
LastMemoryCheck: bp.lastMemoryCheck,
@@ -171,9 +179,11 @@ func (bp *BackpressureManager) WaitForChannelSpace(ctx context.Context, fileCh c
return
}
- // Check if file channel is getting full (>=90% capacity)
- if bp.maxPendingFiles > 0 && len(fileCh) >= bp.maxPendingFiles*9/10 {
- logrus.Debugf("File channel is %d%% full, waiting for space", len(fileCh)*100/bp.maxPendingFiles)
+ logger := shared.GetLogger()
+ // Check if file channel is getting full (>90% capacity)
+ fileCap := cap(fileCh)
+ if fileCap > 0 && len(fileCh) > fileCap*9/10 {
+ logger.Debugf("File channel is %d%% full, waiting for space", len(fileCh)*100/fileCap)
// Wait a bit for the channel to drain
select {
@@ -183,9 +193,10 @@ func (bp *BackpressureManager) WaitForChannelSpace(ctx context.Context, fileCh c
}
}
- // Check if write channel is getting full (>=90% capacity)
- if bp.maxPendingWrites > 0 && len(writeCh) >= bp.maxPendingWrites*9/10 {
- logrus.Debugf("Write channel is %d%% full, waiting for space", len(writeCh)*100/bp.maxPendingWrites)
+ // Check if write channel is getting full (>90% capacity)
+ writeCap := cap(writeCh)
+ if writeCap > 0 && len(writeCh) > writeCap*9/10 {
+ logger.Debugf("Write channel is %d%% full, waiting for space", len(writeCh)*100/writeCap)
// Wait a bit for the channel to drain
select {
@@ -198,10 +209,13 @@ func (bp *BackpressureManager) WaitForChannelSpace(ctx context.Context, fileCh c
// LogBackpressureInfo logs back-pressure configuration and status.
func (bp *BackpressureManager) LogBackpressureInfo() {
+ logger := shared.GetLogger()
if bp.enabled {
- logrus.Infof("Back-pressure enabled: maxMemory=%dMB, fileBuffer=%d, writeBuffer=%d, checkInterval=%d",
- bp.maxMemoryUsage/1024/1024, bp.maxPendingFiles, bp.maxPendingWrites, bp.memoryCheckInterval)
+ logger.Infof(
+ "Back-pressure enabled: maxMemory=%dMB, fileBuffer=%d, writeBuffer=%d, checkInterval=%d",
+ bp.maxMemoryUsage/int64(shared.BytesPerMB), bp.maxPendingFiles, bp.maxPendingWrites, bp.memoryCheckInterval,
+ )
} else {
- logrus.Info("Back-pressure disabled")
+ logger.Info("Back-pressure disabled")
}
}
diff --git a/fileproc/backpressure_behavior_test.go b/fileproc/backpressure_behavior_test.go
deleted file mode 100644
index 3308b8e..0000000
--- a/fileproc/backpressure_behavior_test.go
+++ /dev/null
@@ -1,177 +0,0 @@
-package fileproc
-
-import (
- "context"
- "testing"
- "time"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestBackpressureManagerShouldApplyBackpressure(t *testing.T) {
- ctx := context.Background()
-
- t.Run("returns false when disabled", func(t *testing.T) {
- bm := NewBackpressureManager()
- bm.enabled = false
-
- shouldApply := bm.ShouldApplyBackpressure(ctx)
- assert.False(t, shouldApply)
- })
-
- t.Run("checks memory at intervals", func(_ *testing.T) {
- bm := NewBackpressureManager()
- bm.enabled = true
- bm.memoryCheckInterval = 10
-
- // Should not check memory on most calls
- for i := 1; i < 10; i++ {
- shouldApply := bm.ShouldApplyBackpressure(ctx)
- // Can't predict result, but shouldn't panic
- _ = shouldApply
- }
-
- // Should check memory on 10th call
- shouldApply := bm.ShouldApplyBackpressure(ctx)
- // Result depends on actual memory usage
- _ = shouldApply
- })
-
- t.Run("detects high memory usage", func(t *testing.T) {
- bm := NewBackpressureManager()
- bm.enabled = true
- bm.memoryCheckInterval = 1
- bm.maxMemoryUsage = 1 // Set very low limit to trigger
-
- shouldApply := bm.ShouldApplyBackpressure(ctx)
- // Should detect high memory usage
- assert.True(t, shouldApply)
- })
-}
-
-func TestBackpressureManagerApplyBackpressure(t *testing.T) {
- ctx := context.Background()
-
- t.Run("does nothing when disabled", func(t *testing.T) {
- bm := NewBackpressureManager()
- bm.enabled = false
-
- // Use a channel to verify the function returns quickly
- done := make(chan struct{})
- go func() {
- bm.ApplyBackpressure(ctx)
- close(done)
- }()
-
- // Should complete quickly when disabled
- select {
- case <-done:
- // Success - function returned
- case <-time.After(50 * time.Millisecond):
- t.Fatal("ApplyBackpressure did not return quickly when disabled")
- }
- })
-
- t.Run("applies delay when enabled", func(t *testing.T) {
- bm := NewBackpressureManager()
- bm.enabled = true
-
- // Use a channel to verify the function blocks for some time
- done := make(chan struct{})
- started := make(chan struct{})
- go func() {
- close(started)
- bm.ApplyBackpressure(ctx)
- close(done)
- }()
-
- // Wait for goroutine to start
- <-started
-
- // Should NOT complete immediately - verify it blocks for at least 5ms
- select {
- case <-done:
- t.Fatal("ApplyBackpressure returned too quickly when enabled")
- case <-time.After(5 * time.Millisecond):
- // Good - it's blocking as expected
- }
-
- // Now wait for it to complete (should finish within reasonable time)
- select {
- case <-done:
- // Success - function eventually returned
- case <-time.After(500 * time.Millisecond):
- t.Fatal("ApplyBackpressure did not complete within timeout")
- }
- })
-
- t.Run("respects context cancellation", func(t *testing.T) {
- bm := NewBackpressureManager()
- bm.enabled = true
-
- ctx, cancel := context.WithCancel(context.Background())
- cancel() // Cancel immediately
-
- start := time.Now()
- bm.ApplyBackpressure(ctx)
- duration := time.Since(start)
-
- // Should return quickly when context is cancelled
- assert.Less(t, duration, 5*time.Millisecond)
- })
-}
-
-func TestBackpressureManagerLogBackpressureInfo(t *testing.T) {
- bm := NewBackpressureManager()
- bm.enabled = true // Ensure enabled so filesProcessed is incremented
-
- // Apply some operations
- ctx := context.Background()
- bm.ShouldApplyBackpressure(ctx)
- bm.ApplyBackpressure(ctx)
-
- // This should not panic
- bm.LogBackpressureInfo()
-
- stats := bm.GetStats()
- assert.Greater(t, stats.FilesProcessed, int64(0))
-}
-
-func TestBackpressureManagerMemoryLimiting(t *testing.T) {
- t.Run("triggers on low memory limit", func(t *testing.T) {
- bm := NewBackpressureManager()
- bm.enabled = true
- bm.memoryCheckInterval = 1 // Check every file
- bm.maxMemoryUsage = 1 // Very low limit to guarantee trigger
-
- ctx := context.Background()
-
- // Should detect memory over limit
- shouldApply := bm.ShouldApplyBackpressure(ctx)
- assert.True(t, shouldApply)
- stats := bm.GetStats()
- assert.True(t, stats.MemoryWarningActive)
- })
-
- t.Run("resets warning when memory normalizes", func(t *testing.T) {
- bm := NewBackpressureManager()
- bm.enabled = true
- bm.memoryCheckInterval = 1
- // Simulate warning by first triggering high memory usage
- bm.maxMemoryUsage = 1 // Very low to trigger warning
- ctx := context.Background()
- _ = bm.ShouldApplyBackpressure(ctx)
- stats := bm.GetStats()
- assert.True(t, stats.MemoryWarningActive)
-
- // Now set high limit so we're under it
- bm.maxMemoryUsage = 1024 * 1024 * 1024 * 10 // 10GB
-
- shouldApply := bm.ShouldApplyBackpressure(ctx)
- assert.False(t, shouldApply)
-
- // Warning should be reset (via public API)
- stats = bm.GetStats()
- assert.False(t, stats.MemoryWarningActive)
- })
-}
diff --git a/fileproc/backpressure_channels_test.go b/fileproc/backpressure_channels_test.go
deleted file mode 100644
index 2882918..0000000
--- a/fileproc/backpressure_channels_test.go
+++ /dev/null
@@ -1,262 +0,0 @@
-package fileproc
-
-import (
- "context"
- "testing"
- "time"
-
- "github.com/spf13/viper"
- "github.com/stretchr/testify/assert"
-)
-
-const (
- // CI-safe timeout constants
- fastOpTimeout = 100 * time.Millisecond // Operations that should complete quickly
- slowOpMinTime = 10 * time.Millisecond // Minimum time for blocking operations
-)
-
-// cleanupViperConfig is a test helper that captures and restores viper configuration.
-// It takes a testing.T and a list of config keys to save/restore.
-// Returns a cleanup function that should be called via t.Cleanup.
-func cleanupViperConfig(t *testing.T, keys ...string) {
- t.Helper()
- // Capture original values
- origValues := make(map[string]interface{})
- for _, key := range keys {
- origValues[key] = viper.Get(key)
- }
- // Register cleanup to restore values
- t.Cleanup(func() {
- for key, val := range origValues {
- if val != nil {
- viper.Set(key, val)
- }
- }
- })
-}
-
-func TestBackpressureManagerCreateChannels(t *testing.T) {
- t.Run("creates buffered channels when enabled", func(t *testing.T) {
- // Capture and restore viper config
- cleanupViperConfig(t, testBackpressureEnabled, testBackpressureMaxFiles, testBackpressureMaxWrites)
-
- viper.Set(testBackpressureEnabled, true)
- viper.Set(testBackpressureMaxFiles, 10)
- viper.Set(testBackpressureMaxWrites, 10)
- bm := NewBackpressureManager()
-
- fileCh, writeCh := bm.CreateChannels()
- assert.NotNil(t, fileCh)
- assert.NotNil(t, writeCh)
-
- // Test that channels have buffer capacity
- assert.Greater(t, cap(fileCh), 0)
- assert.Greater(t, cap(writeCh), 0)
-
- // Test sending and receiving
- fileCh <- "test.go"
- val := <-fileCh
- assert.Equal(t, "test.go", val)
-
- writeCh <- WriteRequest{Content: "test content"}
- writeReq := <-writeCh
- assert.Equal(t, "test content", writeReq.Content)
-
- close(fileCh)
- close(writeCh)
- })
-
- t.Run("creates unbuffered channels when disabled", func(t *testing.T) {
- // Use viper to configure instead of direct field access
- cleanupViperConfig(t, testBackpressureEnabled)
-
- viper.Set(testBackpressureEnabled, false)
- bm := NewBackpressureManager()
-
- fileCh, writeCh := bm.CreateChannels()
- assert.NotNil(t, fileCh)
- assert.NotNil(t, writeCh)
-
- // Unbuffered channels have capacity 0
- assert.Equal(t, 0, cap(fileCh))
- assert.Equal(t, 0, cap(writeCh))
-
- close(fileCh)
- close(writeCh)
- })
-}
-
-func TestBackpressureManagerWaitForChannelSpace(t *testing.T) {
- t.Run("does nothing when disabled", func(t *testing.T) {
- // Use viper to configure instead of direct field access
- cleanupViperConfig(t, testBackpressureEnabled)
-
- viper.Set(testBackpressureEnabled, false)
- bm := NewBackpressureManager()
-
- fileCh := make(chan string, 1)
- writeCh := make(chan WriteRequest, 1)
-
- // Use context with timeout instead of measuring elapsed time
- ctx, cancel := context.WithTimeout(context.Background(), fastOpTimeout)
- defer cancel()
-
- done := make(chan struct{})
- go func() {
- bm.WaitForChannelSpace(ctx, fileCh, writeCh)
- close(done)
- }()
-
- // Should return immediately (before timeout)
- select {
- case <-done:
- // Success - operation completed quickly
- case <-ctx.Done():
- t.Fatal("WaitForChannelSpace should return immediately when disabled")
- }
-
- close(fileCh)
- close(writeCh)
- })
-
- t.Run("waits when file channel is nearly full", func(t *testing.T) {
- // Use viper to configure instead of direct field access
- cleanupViperConfig(t, testBackpressureEnabled, testBackpressureMaxFiles)
-
- viper.Set(testBackpressureEnabled, true)
- viper.Set(testBackpressureMaxFiles, 10)
- bm := NewBackpressureManager()
-
- // Create channel with exact capacity
- fileCh := make(chan string, 10)
- writeCh := make(chan WriteRequest, 10)
-
- // Fill file channel to >90% (with minimum of 1)
- target := max(1, int(float64(cap(fileCh))*0.9))
- for i := 0; i < target; i++ {
- fileCh <- "file.txt"
- }
-
- // Test that it blocks by verifying it doesn't complete immediately
- ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
- defer cancel()
- done := make(chan struct{})
- start := time.Now()
- go func() {
- bm.WaitForChannelSpace(ctx, fileCh, writeCh)
- close(done)
- }()
-
- // Verify it doesn't complete immediately (within first millisecond)
- select {
- case <-done:
- t.Fatal("WaitForChannelSpace should block when channel is nearly full")
- case <-time.After(1 * time.Millisecond):
- // Good - it's blocking as expected
- }
-
- // Wait for it to complete
- <-done
- duration := time.Since(start)
- // Just verify it took some measurable time (very lenient for CI)
- assert.GreaterOrEqual(t, duration, 1*time.Millisecond)
-
- // Clean up
- for i := 0; i < target; i++ {
- <-fileCh
- }
- close(fileCh)
- close(writeCh)
- })
-
- t.Run("waits when write channel is nearly full", func(t *testing.T) {
- // Use viper to configure instead of direct field access
- cleanupViperConfig(t, testBackpressureEnabled, testBackpressureMaxWrites)
-
- viper.Set(testBackpressureEnabled, true)
- viper.Set(testBackpressureMaxWrites, 10)
- bm := NewBackpressureManager()
-
- fileCh := make(chan string, 10)
- writeCh := make(chan WriteRequest, 10)
-
- // Fill write channel to >90% (with minimum of 1)
- target := max(1, int(float64(cap(writeCh))*0.9))
- for i := 0; i < target; i++ {
- writeCh <- WriteRequest{}
- }
-
- // Test that it blocks by verifying it doesn't complete immediately
- ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
- defer cancel()
- done := make(chan struct{})
- start := time.Now()
- go func() {
- bm.WaitForChannelSpace(ctx, fileCh, writeCh)
- close(done)
- }()
-
- // Verify it doesn't complete immediately (within first millisecond)
- select {
- case <-done:
- t.Fatal("WaitForChannelSpace should block when channel is nearly full")
- case <-time.After(1 * time.Millisecond):
- // Good - it's blocking as expected
- }
-
- // Wait for it to complete
- <-done
- duration := time.Since(start)
- // Just verify it took some measurable time (very lenient for CI)
- assert.GreaterOrEqual(t, duration, 1*time.Millisecond)
-
- // Clean up
- for i := 0; i < target; i++ {
- <-writeCh
- }
- close(fileCh)
- close(writeCh)
- })
-
- t.Run("respects context cancellation", func(t *testing.T) {
- // Use viper to configure instead of direct field access
- cleanupViperConfig(t, testBackpressureEnabled, testBackpressureMaxFiles)
-
- viper.Set(testBackpressureEnabled, true)
- viper.Set(testBackpressureMaxFiles, 10)
- bm := NewBackpressureManager()
-
- fileCh := make(chan string, 10)
- writeCh := make(chan WriteRequest, 10)
-
- // Fill channel
- for i := 0; i < 10; i++ {
- fileCh <- "file.txt"
- }
-
- ctx, cancel := context.WithCancel(context.Background())
- cancel() // Cancel immediately
-
- // Use timeout to verify it returns quickly
- done := make(chan struct{})
- go func() {
- bm.WaitForChannelSpace(ctx, fileCh, writeCh)
- close(done)
- }()
-
- // Should return quickly when context is cancelled
- select {
- case <-done:
- // Success - returned due to cancellation
- case <-time.After(fastOpTimeout):
- t.Fatal("WaitForChannelSpace should return immediately when context is cancelled")
- }
-
- // Clean up
- for i := 0; i < 10; i++ {
- <-fileCh
- }
- close(fileCh)
- close(writeCh)
- })
-}
diff --git a/fileproc/backpressure_concurrency_test.go b/fileproc/backpressure_concurrency_test.go
deleted file mode 100644
index ad4b897..0000000
--- a/fileproc/backpressure_concurrency_test.go
+++ /dev/null
@@ -1,195 +0,0 @@
-package fileproc
-
-import (
- "context"
- "sync"
- "testing"
- "time"
-
- "github.com/spf13/viper"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-)
-
-func TestBackpressureManagerConcurrency(t *testing.T) {
- // Configure via viper instead of direct field access
- origEnabled := viper.Get(testBackpressureEnabled)
- t.Cleanup(func() {
- if origEnabled != nil {
- viper.Set(testBackpressureEnabled, origEnabled)
- }
- })
- viper.Set(testBackpressureEnabled, true)
-
- bm := NewBackpressureManager()
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- var wg sync.WaitGroup
-
- // Multiple goroutines checking backpressure
- for i := 0; i < 10; i++ {
- wg.Add(1)
- go func() {
- defer wg.Done()
- bm.ShouldApplyBackpressure(ctx)
- }()
- }
-
- // Multiple goroutines applying backpressure
- for i := 0; i < 5; i++ {
- wg.Add(1)
- go func() {
- defer wg.Done()
- bm.ApplyBackpressure(ctx)
- }()
- }
-
- // Multiple goroutines getting stats
- for i := 0; i < 5; i++ {
- wg.Add(1)
- go func() {
- defer wg.Done()
- bm.GetStats()
- }()
- }
-
- // Multiple goroutines creating channels
- // Note: CreateChannels returns new channels each time, caller owns them
- type channelResult struct {
- fileCh chan string
- writeCh chan WriteRequest
- }
- results := make(chan channelResult, 3)
- for i := 0; i < 3; i++ {
- wg.Add(1)
- go func() {
- defer wg.Done()
- fileCh, writeCh := bm.CreateChannels()
- results <- channelResult{fileCh, writeCh}
- }()
- }
-
- wg.Wait()
- close(results)
-
- // Verify channels are created and have expected properties
- for result := range results {
- assert.NotNil(t, result.fileCh)
- assert.NotNil(t, result.writeCh)
- // Close channels to prevent resource leak (caller owns them)
- close(result.fileCh)
- close(result.writeCh)
- }
-
- // Verify stats are consistent
- stats := bm.GetStats()
- assert.GreaterOrEqual(t, stats.FilesProcessed, int64(10))
-}
-
-func TestBackpressureManagerIntegration(t *testing.T) {
- // Configure via viper instead of direct field access
- origEnabled := viper.Get(testBackpressureEnabled)
- origMaxFiles := viper.Get(testBackpressureMaxFiles)
- origMaxWrites := viper.Get(testBackpressureMaxWrites)
- origCheckInterval := viper.Get(testBackpressureMemoryCheck)
- origMaxMemory := viper.Get(testBackpressureMaxMemory)
- t.Cleanup(func() {
- if origEnabled != nil {
- viper.Set(testBackpressureEnabled, origEnabled)
- }
- if origMaxFiles != nil {
- viper.Set(testBackpressureMaxFiles, origMaxFiles)
- }
- if origMaxWrites != nil {
- viper.Set(testBackpressureMaxWrites, origMaxWrites)
- }
- if origCheckInterval != nil {
- viper.Set(testBackpressureMemoryCheck, origCheckInterval)
- }
- if origMaxMemory != nil {
- viper.Set(testBackpressureMaxMemory, origMaxMemory)
- }
- })
-
- viper.Set(testBackpressureEnabled, true)
- viper.Set(testBackpressureMaxFiles, 10)
- viper.Set(testBackpressureMaxWrites, 10)
- viper.Set(testBackpressureMemoryCheck, 10)
- viper.Set(testBackpressureMaxMemory, 100*1024*1024) // 100MB
-
- bm := NewBackpressureManager()
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- // Create channels - caller owns these channels and is responsible for closing them
- fileCh, writeCh := bm.CreateChannels()
- require.NotNil(t, fileCh)
- require.NotNil(t, writeCh)
- require.Greater(t, cap(fileCh), 0, "fileCh should be buffered")
- require.Greater(t, cap(writeCh), 0, "writeCh should be buffered")
-
- // Simulate file processing
- var wg sync.WaitGroup
-
- // Producer
- wg.Add(1)
- go func() {
- defer wg.Done()
- for i := 0; i < 100; i++ {
- // Check for backpressure
- if bm.ShouldApplyBackpressure(ctx) {
- bm.ApplyBackpressure(ctx)
- }
-
- // Wait for channel space if needed
- bm.WaitForChannelSpace(ctx, fileCh, writeCh)
-
- select {
- case fileCh <- "file.txt":
- // File sent
- case <-ctx.Done():
- return
- }
- }
- }()
-
- // Consumer
- wg.Add(1)
- go func() {
- defer wg.Done()
- for i := 0; i < 100; i++ {
- select {
- case <-fileCh:
- // Process file (do not manually increment filesProcessed)
- case <-ctx.Done():
- return
- }
- }
- }()
-
- // Wait for completion
- done := make(chan struct{})
- go func() {
- wg.Wait()
- close(done)
- }()
-
- select {
- case <-done:
- // Success
- case <-time.After(5 * time.Second):
- t.Fatal("Integration test timeout")
- }
-
- // Log final info
- bm.LogBackpressureInfo()
-
- // Check final stats
- stats := bm.GetStats()
- assert.GreaterOrEqual(t, stats.FilesProcessed, int64(100))
-
- // Clean up - caller owns the channels, safe to close now that goroutines have finished
- close(fileCh)
- close(writeCh)
-}
diff --git a/fileproc/backpressure_init_test.go b/fileproc/backpressure_init_test.go
deleted file mode 100644
index 3660392..0000000
--- a/fileproc/backpressure_init_test.go
+++ /dev/null
@@ -1,151 +0,0 @@
-package fileproc
-
-import (
- "context"
- "testing"
-
- "github.com/spf13/viper"
- "github.com/stretchr/testify/assert"
-)
-
-// setupViperCleanup is a test helper that captures and restores viper configuration.
-// It takes a testing.T and a list of config keys to save/restore.
-func setupViperCleanup(t *testing.T, keys []string) {
- t.Helper()
- // Capture original values and track which keys existed
- origValues := make(map[string]interface{})
- keysExisted := make(map[string]bool)
- for _, key := range keys {
- val := viper.Get(key)
- origValues[key] = val
- keysExisted[key] = viper.IsSet(key)
- }
- // Register cleanup to restore values
- t.Cleanup(func() {
- for _, key := range keys {
- if keysExisted[key] {
- viper.Set(key, origValues[key])
- } else {
- // Key didn't exist originally, so remove it
- allSettings := viper.AllSettings()
- delete(allSettings, key)
- viper.Reset()
- for k, v := range allSettings {
- viper.Set(k, v)
- }
- }
- }
- })
-}
-
-func TestNewBackpressureManager(t *testing.T) {
- keys := []string{
- testBackpressureEnabled,
- testBackpressureMaxMemory,
- testBackpressureMemoryCheck,
- testBackpressureMaxFiles,
- testBackpressureMaxWrites,
- }
- setupViperCleanup(t, keys)
-
- viper.Set(testBackpressureEnabled, true)
- viper.Set(testBackpressureMaxMemory, 100)
- viper.Set(testBackpressureMemoryCheck, 10)
- viper.Set(testBackpressureMaxFiles, 10)
- viper.Set(testBackpressureMaxWrites, 10)
-
- bm := NewBackpressureManager()
- assert.NotNil(t, bm)
- assert.True(t, bm.enabled)
- assert.Greater(t, bm.maxMemoryUsage, int64(0))
- assert.Greater(t, bm.memoryCheckInterval, 0)
- assert.Greater(t, bm.maxPendingFiles, 0)
- assert.Greater(t, bm.maxPendingWrites, 0)
- assert.Equal(t, int64(0), bm.filesProcessed)
-}
-
-func TestBackpressureStatsStructure(t *testing.T) {
- // Behavioral test that exercises BackpressureManager and validates stats
- keys := []string{
- testBackpressureEnabled,
- testBackpressureMaxMemory,
- testBackpressureMemoryCheck,
- testBackpressureMaxFiles,
- testBackpressureMaxWrites,
- }
- setupViperCleanup(t, keys)
-
- // Configure backpressure with realistic settings
- viper.Set(testBackpressureEnabled, true)
- viper.Set(testBackpressureMaxMemory, 100*1024*1024) // 100MB
- viper.Set(testBackpressureMemoryCheck, 1) // Check every file
- viper.Set(testBackpressureMaxFiles, 1000)
- viper.Set(testBackpressureMaxWrites, 500)
-
- bm := NewBackpressureManager()
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- // Simulate processing files
- initialStats := bm.GetStats()
- assert.True(t, initialStats.Enabled, "backpressure should be enabled")
- assert.Equal(t, int64(0), initialStats.FilesProcessed, "initially no files processed")
-
- // Capture initial timestamp to verify it gets updated
- initialLastCheck := initialStats.LastMemoryCheck
-
- // Process some files to trigger memory checks
- for i := 0; i < 5; i++ {
- bm.ShouldApplyBackpressure(ctx)
- }
-
- // Verify stats reflect the operations
- stats := bm.GetStats()
- assert.True(t, stats.Enabled, "enabled flag should be set")
- assert.Equal(t, int64(5), stats.FilesProcessed, "should have processed 5 files")
- assert.Greater(t, stats.CurrentMemoryUsage, int64(0), "memory usage should be tracked")
- assert.Equal(t, int64(100*1024*1024), stats.MaxMemoryUsage, "max memory should match config")
- assert.Equal(t, 1000, stats.MaxPendingFiles, "maxPendingFiles should match config")
- assert.Equal(t, 500, stats.MaxPendingWrites, "maxPendingWrites should match config")
- assert.True(t, stats.LastMemoryCheck.After(initialLastCheck) || stats.LastMemoryCheck.Equal(initialLastCheck),
- "lastMemoryCheck should be updated or remain initialized")
-}
-
-func TestBackpressureManagerGetStats(t *testing.T) {
- keys := []string{
- testBackpressureEnabled,
- testBackpressureMemoryCheck,
- }
- setupViperCleanup(t, keys)
-
- // Ensure config enables backpressure and checks every call
- viper.Set(testBackpressureEnabled, true)
- viper.Set(testBackpressureMemoryCheck, 1)
-
- bm := NewBackpressureManager()
-
- // Capture initial timestamp to verify it gets updated
- initialStats := bm.GetStats()
- initialLastCheck := initialStats.LastMemoryCheck
-
- // Process some files to update stats
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- for i := 0; i < 5; i++ {
- bm.ShouldApplyBackpressure(ctx)
- }
-
- stats := bm.GetStats()
-
- assert.True(t, stats.Enabled)
- assert.Equal(t, int64(5), stats.FilesProcessed)
- assert.Greater(t, stats.CurrentMemoryUsage, int64(0))
- assert.Equal(t, bm.maxMemoryUsage, stats.MaxMemoryUsage)
- assert.Equal(t, bm.maxPendingFiles, stats.MaxPendingFiles)
- assert.Equal(t, bm.maxPendingWrites, stats.MaxPendingWrites)
-
- // LastMemoryCheck should be updated after processing files (memoryCheckInterval=1)
- assert.True(t, stats.LastMemoryCheck.After(initialLastCheck),
- "lastMemoryCheck should be updated after memory checks")
-}
diff --git a/fileproc/backpressure_test.go b/fileproc/backpressure_test.go
new file mode 100644
index 0000000..096fa47
--- /dev/null
+++ b/fileproc/backpressure_test.go
@@ -0,0 +1,344 @@
+package fileproc_test
+
+import (
+ "context"
+ "runtime"
+ "testing"
+ "time"
+
+ "github.com/ivuorinen/gibidify/fileproc"
+ "github.com/ivuorinen/gibidify/shared"
+ "github.com/ivuorinen/gibidify/testutil"
+)
+
+func TestNewBackpressureManager(t *testing.T) {
+ // Test creating a new backpressure manager
+ bp := fileproc.NewBackpressureManager()
+
+ if bp == nil {
+ t.Error("Expected backpressure manager to be created, got nil")
+ }
+
+ // The backpressure manager should be initialized with config values
+ // We can't test the internal values directly since they're private,
+ // but we can test that it was created successfully
+}
+
+func TestBackpressureManagerCreateChannels(t *testing.T) {
+ testutil.ResetViperConfig(t, "")
+
+ bp := fileproc.NewBackpressureManager()
+
+ // Test creating channels
+ fileCh, writeCh := bp.CreateChannels()
+
+ // Verify channels are created
+ if fileCh == nil {
+ t.Error("Expected file channel to be created, got nil")
+ }
+ if writeCh == nil {
+ t.Error("Expected write channel to be created, got nil")
+ }
+
+ // Test that channels can be used
+ select {
+ case fileCh <- "test-file":
+ // Successfully sent to channel
+ default:
+ t.Error("Unable to send to file channel")
+ }
+
+ // Read from channel
+ select {
+ case file := <-fileCh:
+ if file != "test-file" {
+ t.Errorf("Expected 'test-file', got %s", file)
+ }
+ case <-time.After(100 * time.Millisecond):
+ t.Error("Timeout reading from file channel")
+ }
+}
+
+func TestBackpressureManagerShouldApplyBackpressure(t *testing.T) {
+ testutil.ResetViperConfig(t, "")
+
+ bp := fileproc.NewBackpressureManager()
+ ctx := context.Background()
+
+ // Test backpressure decision
+ shouldApply := bp.ShouldApplyBackpressure(ctx)
+
+ // Since we're using default config, backpressure behavior depends on settings
+ // We just test that the method returns without error
+ // shouldApply is a valid boolean value
+ _ = shouldApply
+}
+
+func TestBackpressureManagerApplyBackpressure(t *testing.T) {
+ testutil.ResetViperConfig(t, "")
+
+ bp := fileproc.NewBackpressureManager()
+ ctx := context.Background()
+
+ // Test applying backpressure
+ bp.ApplyBackpressure(ctx)
+
+ // ApplyBackpressure is a void method that should not panic
+ // If we reach here, the method executed successfully
+}
+
+func TestBackpressureManagerApplyBackpressureWithCancellation(t *testing.T) {
+ testutil.ResetViperConfig(t, "")
+
+ bp := fileproc.NewBackpressureManager()
+
+ // Create canceled context
+ ctx, cancel := context.WithCancel(context.Background())
+ cancel() // Cancel immediately
+
+ // Test applying backpressure with canceled context
+ bp.ApplyBackpressure(ctx)
+
+ // ApplyBackpressure doesn't return errors, but should handle cancellation gracefully
+ // If we reach here without hanging, the method handled cancellation properly
+}
+
+func TestBackpressureManagerGetStats(t *testing.T) {
+ testutil.ResetViperConfig(t, "")
+
+ bp := fileproc.NewBackpressureManager()
+
+ // Test getting stats
+ stats := bp.Stats()
+
+ // Stats should contain relevant information
+ if stats.FilesProcessed < 0 {
+ t.Error("Expected non-negative files processed count")
+ }
+
+ if stats.CurrentMemoryUsage < 0 {
+ t.Error("Expected non-negative memory usage")
+ }
+
+ if stats.MaxMemoryUsage < 0 {
+ t.Error("Expected non-negative max memory usage")
+ }
+
+ // Test that stats have reasonable values
+ if stats.MaxPendingFiles < 0 || stats.MaxPendingWrites < 0 {
+ t.Error("Expected non-negative channel buffer sizes")
+ }
+}
+
+func TestBackpressureManagerWaitForChannelSpace(t *testing.T) {
+ testutil.ResetViperConfig(t, "")
+
+ bp := fileproc.NewBackpressureManager()
+ ctx := context.Background()
+
+ // Create test channels
+ fileCh, writeCh := bp.CreateChannels()
+
+ // Test waiting for channel space
+ bp.WaitForChannelSpace(ctx, fileCh, writeCh)
+
+ // WaitForChannelSpace is void method that should complete without hanging
+ // If we reach here, the method executed successfully
+}
+
+func TestBackpressureManagerWaitForChannelSpaceWithCancellation(t *testing.T) {
+ testutil.ResetViperConfig(t, "")
+
+ bp := fileproc.NewBackpressureManager()
+
+ // Create canceled context
+ ctx, cancel := context.WithCancel(context.Background())
+ cancel()
+
+ // Create test channels
+ fileCh, writeCh := bp.CreateChannels()
+
+ // Test waiting for channel space with canceled context
+ bp.WaitForChannelSpace(ctx, fileCh, writeCh)
+
+ // WaitForChannelSpace should handle cancellation gracefully without hanging
+ // If we reach here, the method handled cancellation properly
+}
+
+func TestBackpressureManagerLogBackpressureInfo(t *testing.T) {
+ testutil.ResetViperConfig(t, "")
+
+ bp := fileproc.NewBackpressureManager()
+
+ // Test logging backpressure info
+ // This method primarily logs information, so we test it executes without panic
+ bp.LogBackpressureInfo()
+
+ // If we reach here without panic, the method worked
+}
+
+// BenchmarkBackpressureManager benchmarks backpressure operations.
+func BenchmarkBackpressureManagerCreateChannels(b *testing.B) {
+ bp := fileproc.NewBackpressureManager()
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ fileCh, writeCh := bp.CreateChannels()
+
+ // Use channels to prevent optimization
+ _ = fileCh
+ _ = writeCh
+
+ runtime.GC() // Force GC to measure memory impact
+ }
+}
+
+func BenchmarkBackpressureManagerShouldApplyBackpressure(b *testing.B) {
+ bp := fileproc.NewBackpressureManager()
+ ctx := context.Background()
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ shouldApply := bp.ShouldApplyBackpressure(ctx)
+ _ = shouldApply // Prevent optimization
+ }
+}
+
+func BenchmarkBackpressureManagerApplyBackpressure(b *testing.B) {
+ bp := fileproc.NewBackpressureManager()
+ ctx := context.Background()
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bp.ApplyBackpressure(ctx)
+ }
+}
+
+func BenchmarkBackpressureManagerGetStats(b *testing.B) {
+ bp := fileproc.NewBackpressureManager()
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ stats := bp.Stats()
+ _ = stats // Prevent optimization
+ }
+}
+
+// TestBackpressureManager_ShouldApplyBackpressure_EdgeCases tests various edge cases for backpressure decision.
+func TestBackpressureManagerShouldApplyBackpressureEdgeCases(t *testing.T) {
+ testutil.ApplyBackpressureOverrides(t, map[string]any{
+ shared.ConfigKeyBackpressureEnabled: true,
+ "backpressure.memory_check_interval": 2,
+ "backpressure.memory_limit_mb": 1,
+ })
+
+ bp := fileproc.NewBackpressureManager()
+ ctx := context.Background()
+
+ // Test multiple calls to trigger memory check interval logic
+ for i := 0; i < 10; i++ {
+ shouldApply := bp.ShouldApplyBackpressure(ctx)
+ _ = shouldApply
+ }
+
+ // At this point, memory checking should have triggered multiple times
+ // The actual decision depends on memory usage, but we're testing the paths
+}
+
+// TestBackpressureManager_CreateChannels_EdgeCases tests edge cases in channel creation.
+func TestBackpressureManagerCreateChannelsEdgeCases(t *testing.T) {
+ // Test with custom configuration that might trigger different buffer sizes
+ testutil.ApplyBackpressureOverrides(t, map[string]any{
+ "backpressure.file_buffer_size": 50,
+ "backpressure.write_buffer_size": 25,
+ })
+
+ bp := fileproc.NewBackpressureManager()
+
+ // Create multiple channel sets to test resource management
+ for i := 0; i < 5; i++ {
+ fileCh, writeCh := bp.CreateChannels()
+
+ // Verify channels work correctly
+ select {
+ case fileCh <- "test":
+ // Good - channel accepted value
+ default:
+ // This is also acceptable if buffer is full
+ }
+
+ // Test write channel
+ select {
+ case writeCh <- fileproc.WriteRequest{Path: "test", Content: "content"}:
+ // Good - channel accepted value
+ default:
+ // This is also acceptable if buffer is full
+ }
+ }
+}
+
+// TestBackpressureManager_WaitForChannelSpace_EdgeCases tests edge cases in channel space waiting.
+func TestBackpressureManagerWaitForChannelSpaceEdgeCases(t *testing.T) {
+ testutil.ApplyBackpressureOverrides(t, map[string]any{
+ shared.ConfigKeyBackpressureEnabled: true,
+ "backpressure.wait_timeout_ms": 10,
+ })
+
+ bp := fileproc.NewBackpressureManager()
+ ctx := context.Background()
+
+ // Create channels with small buffers
+ fileCh, writeCh := bp.CreateChannels()
+
+ // Fill up the channels to create pressure
+ go func() {
+ for i := 0; i < 100; i++ {
+ select {
+ case fileCh <- "file":
+ case <-time.After(1 * time.Millisecond):
+ }
+ }
+ }()
+
+ go func() {
+ for i := 0; i < 100; i++ {
+ select {
+ case writeCh <- fileproc.WriteRequest{Path: "test", Content: "content"}:
+ case <-time.After(1 * time.Millisecond):
+ }
+ }
+ }()
+
+ // Wait for channel space - should handle the full channels
+ bp.WaitForChannelSpace(ctx, fileCh, writeCh)
+}
+
+// TestBackpressureManager_MemoryPressure tests behavior under simulated memory pressure.
+func TestBackpressureManagerMemoryPressure(t *testing.T) {
+ // Test with very low memory limit to trigger backpressure
+ testutil.ApplyBackpressureOverrides(t, map[string]any{
+ shared.ConfigKeyBackpressureEnabled: true,
+ "backpressure.memory_limit_mb": 0.001,
+ "backpressure.memory_check_interval": 1,
+ })
+
+ bp := fileproc.NewBackpressureManager()
+ ctx := context.Background()
+
+ // Allocate some memory to potentially trigger limits
+ largeBuffer := make([]byte, 1024*1024) // 1MB
+ _ = largeBuffer[0]
+
+ // Test backpressure decision under memory pressure
+ for i := 0; i < 5; i++ {
+ shouldApply := bp.ShouldApplyBackpressure(ctx)
+ if shouldApply {
+ // Test applying backpressure when needed
+ bp.ApplyBackpressure(ctx)
+ t.Log("Backpressure applied due to memory pressure")
+ }
+ }
+
+ // Test logging
+ bp.LogBackpressureInfo()
+}
diff --git a/fileproc/cache.go b/fileproc/cache.go
index ab3ad60..c206508 100644
--- a/fileproc/cache.go
+++ b/fileproc/cache.go
@@ -1,3 +1,4 @@
+// Package fileproc handles file processing, collection, and output formatting.
package fileproc
// getNormalizedExtension efficiently extracts and normalizes the file extension with caching.
@@ -6,6 +7,7 @@ func (r *FileTypeRegistry) getNormalizedExtension(filename string) string {
r.cacheMutex.RLock()
if ext, exists := r.extCache[filename]; exists {
r.cacheMutex.RUnlock()
+
return ext
}
r.cacheMutex.RUnlock()
@@ -42,6 +44,7 @@ func (r *FileTypeRegistry) getFileTypeResult(filename string) FileTypeResult {
r.updateStats(func() {
r.stats.CacheHits++
})
+
return result
}
r.cacheMutex.RUnlock()
diff --git a/fileproc/collector.go b/fileproc/collector.go
index 6091c98..5113165 100644
--- a/fileproc/collector.go
+++ b/fileproc/collector.go
@@ -5,5 +5,6 @@ package fileproc
// and returns a slice of file paths.
func CollectFiles(root string) ([]string, error) {
w := NewProdWalker()
+
return w.Walk(root)
}
diff --git a/fileproc/collector_test.go b/fileproc/collector_test.go
index 55740c1..9ff85f9 100644
--- a/fileproc/collector_test.go
+++ b/fileproc/collector_test.go
@@ -2,6 +2,7 @@ package fileproc_test
import (
"os"
+ "path/filepath"
"testing"
"github.com/ivuorinen/gibidify/fileproc"
@@ -47,3 +48,70 @@ func TestCollectFilesError(t *testing.T) {
t.Fatal("Expected an error, got nil")
}
}
+
+// TestCollectFiles tests the actual CollectFiles function with a real directory.
+func TestCollectFiles(t *testing.T) {
+ // Create a temporary directory with test files
+ tmpDir := t.TempDir()
+
+ // Create test files with known supported extensions
+ testFiles := map[string]string{
+ "test1.go": "package main\n\nfunc main() {\n\t// Go file\n}",
+ "test2.py": "# Python file\nprint('hello world')",
+ "test3.js": "// JavaScript file\nconsole.log('hello');",
+ }
+
+ for name, content := range testFiles {
+ filePath := filepath.Join(tmpDir, name)
+ if err := os.WriteFile(filePath, []byte(content), 0o600); err != nil {
+ t.Fatalf("Failed to create test file %s: %v", name, err)
+ }
+ }
+
+ // Test CollectFiles
+ files, err := fileproc.CollectFiles(tmpDir)
+ if err != nil {
+ t.Fatalf("CollectFiles failed: %v", err)
+ }
+
+ // Verify we got the expected number of files
+ if len(files) != len(testFiles) {
+ t.Errorf("Expected %d files, got %d", len(testFiles), len(files))
+ }
+
+ // Verify all expected files are found
+ foundFiles := make(map[string]bool)
+ for _, file := range files {
+ foundFiles[file] = true
+ }
+
+ for expectedFile := range testFiles {
+ expectedPath := filepath.Join(tmpDir, expectedFile)
+ if !foundFiles[expectedPath] {
+ t.Errorf("Expected file %s not found in results", expectedPath)
+ }
+ }
+}
+
+// TestCollectFiles_NonExistentDirectory tests CollectFiles with a non-existent directory.
+func TestCollectFilesNonExistentDirectory(t *testing.T) {
+ _, err := fileproc.CollectFiles("/non/existent/directory")
+ if err == nil {
+ t.Error("Expected error for non-existent directory, got nil")
+ }
+}
+
+// TestCollectFiles_EmptyDirectory tests CollectFiles with an empty directory.
+func TestCollectFilesEmptyDirectory(t *testing.T) {
+ tmpDir := t.TempDir()
+ // Don't create any files
+
+ files, err := fileproc.CollectFiles(tmpDir)
+ if err != nil {
+ t.Fatalf("CollectFiles failed on empty directory: %v", err)
+ }
+
+ if len(files) != 0 {
+ t.Errorf("Expected 0 files in empty directory, got %d", len(files))
+ }
+}
diff --git a/fileproc/config.go b/fileproc/config.go
index 06f13e6..72470c5 100644
--- a/fileproc/config.go
+++ b/fileproc/config.go
@@ -1,156 +1,7 @@
+// Package fileproc handles file processing, collection, and output formatting.
package fileproc
-import (
- "fmt"
- "path/filepath"
- "strings"
-)
-
-const (
- // MaxRegistryEntries is the maximum number of entries allowed in registry config slices/maps.
- MaxRegistryEntries = 1000
- // MaxExtensionLength is the maximum length for a single extension string.
- MaxExtensionLength = 100
-)
-
-// RegistryConfig holds configuration for file type registry.
-// All paths must be relative without path traversal (no ".." or leading "/").
-// Extensions in CustomLanguages keys must start with "." or be alphanumeric with underscore/hyphen.
-type RegistryConfig struct {
- // CustomImages: file extensions to treat as images (e.g., ".svg", ".webp").
- // Must be relative paths without ".." or leading separators.
- CustomImages []string
-
- // CustomBinary: file extensions to treat as binary (e.g., ".bin", ".dat").
- // Must be relative paths without ".." or leading separators.
- CustomBinary []string
-
- // CustomLanguages: maps file extensions to language names (e.g., {".tsx": "TypeScript"}).
- // Keys must start with "." or be alphanumeric with underscore/hyphen.
- CustomLanguages map[string]string
-
- // DisabledImages: image extensions to disable from default registry.
- DisabledImages []string
-
- // DisabledBinary: binary extensions to disable from default registry.
- DisabledBinary []string
-
- // DisabledLanguages: language extensions to disable from default registry.
- DisabledLanguages []string
-}
-
-// Validate checks the RegistryConfig for invalid entries and enforces limits.
-func (c *RegistryConfig) Validate() error {
- // Validate CustomImages
- if err := validateExtensionSlice(c.CustomImages, "CustomImages"); err != nil {
- return err
- }
-
- // Validate CustomBinary
- if err := validateExtensionSlice(c.CustomBinary, "CustomBinary"); err != nil {
- return err
- }
-
- // Validate CustomLanguages
- if len(c.CustomLanguages) > MaxRegistryEntries {
- return fmt.Errorf(
- "CustomLanguages exceeds maximum entries (%d > %d)",
- len(c.CustomLanguages),
- MaxRegistryEntries,
- )
- }
- for ext, lang := range c.CustomLanguages {
- if err := validateExtension(ext, "CustomLanguages key"); err != nil {
- return err
- }
- if len(lang) > MaxExtensionLength {
- return fmt.Errorf(
- "CustomLanguages value %q exceeds maximum length (%d > %d)",
- lang,
- len(lang),
- MaxExtensionLength,
- )
- }
- }
-
- // Validate Disabled slices
- if err := validateExtensionSlice(c.DisabledImages, "DisabledImages"); err != nil {
- return err
- }
- if err := validateExtensionSlice(c.DisabledBinary, "DisabledBinary"); err != nil {
- return err
- }
-
- return validateExtensionSlice(c.DisabledLanguages, "DisabledLanguages")
-}
-
-// validateExtensionSlice validates a slice of extensions for path safety and limits.
-func validateExtensionSlice(slice []string, fieldName string) error {
- if len(slice) > MaxRegistryEntries {
- return fmt.Errorf("%s exceeds maximum entries (%d > %d)", fieldName, len(slice), MaxRegistryEntries)
- }
- for _, ext := range slice {
- if err := validateExtension(ext, fieldName); err != nil {
- return err
- }
- }
- return nil
-}
-
-// validateExtension validates a single extension for path safety.
-//
-//revive:disable-next-line:cyclomatic
-func validateExtension(ext, context string) error {
- // Reject empty strings
- if ext == "" {
- return fmt.Errorf("%s entry cannot be empty", context)
- }
-
- if len(ext) > MaxExtensionLength {
- return fmt.Errorf(
- "%s entry %q exceeds maximum length (%d > %d)",
- context, ext, len(ext), MaxExtensionLength,
- )
- }
-
- // Reject absolute paths
- if filepath.IsAbs(ext) {
- return fmt.Errorf("%s entry %q is an absolute path (not allowed)", context, ext)
- }
-
- // Reject path traversal
- if strings.Contains(ext, "..") {
- return fmt.Errorf("%s entry %q contains path traversal (not allowed)", context, ext)
- }
-
- // For extensions, verify they start with "." or are alphanumeric
- if strings.HasPrefix(ext, ".") {
- // Reject extensions containing path separators
- if strings.ContainsRune(ext, filepath.Separator) || strings.ContainsRune(ext, '/') ||
- strings.ContainsRune(ext, '\\') {
- return fmt.Errorf("%s entry %q contains path separators (not allowed)", context, ext)
- }
- // Valid extension format
- return nil
- }
-
- // Check if purely alphanumeric (for bare names)
- for _, r := range ext {
- isValid := (r >= 'a' && r <= 'z') ||
- (r >= 'A' && r <= 'Z') ||
- (r >= '0' && r <= '9') ||
- r == '_' || r == '-'
- if !isValid {
- return fmt.Errorf(
- "%s entry %q contains invalid characters (must start with '.' or be alphanumeric/_/-)",
- context,
- ext,
- )
- }
- }
-
- return nil
-}
+import "strings"
// ApplyCustomExtensions applies custom extensions from configuration.
func (r *FileTypeRegistry) ApplyCustomExtensions(
@@ -182,24 +33,12 @@ func (r *FileTypeRegistry) addExtensions(extensions []string, adder func(string)
// ConfigureFromSettings applies configuration settings to the registry.
// This function is called from main.go after config is loaded to avoid circular imports.
-// It validates the configuration before applying it.
-func ConfigureFromSettings(config RegistryConfig) error {
- // Validate configuration first
- if err := config.Validate(); err != nil {
- return err
- }
-
- registry := GetDefaultRegistry()
-
- // Only apply custom extensions if they are non-empty (len() for nil slices/maps is zero)
- if len(config.CustomImages) > 0 || len(config.CustomBinary) > 0 || len(config.CustomLanguages) > 0 {
- registry.ApplyCustomExtensions(config.CustomImages, config.CustomBinary, config.CustomLanguages)
- }
-
- // Only disable extensions if they are non-empty
- if len(config.DisabledImages) > 0 || len(config.DisabledBinary) > 0 || len(config.DisabledLanguages) > 0 {
- registry.DisableExtensions(config.DisabledImages, config.DisabledBinary, config.DisabledLanguages)
- }
-
- return nil
+func ConfigureFromSettings(
+ customImages, customBinary []string,
+ customLanguages map[string]string,
+ disabledImages, disabledBinary, disabledLanguages []string,
+) {
+ registry := DefaultRegistry()
+ registry.ApplyCustomExtensions(customImages, customBinary, customLanguages)
+ registry.DisableExtensions(disabledImages, disabledBinary, disabledLanguages)
}
diff --git a/fileproc/detection.go b/fileproc/detection.go
index f4e2929..173ce3c 100644
--- a/fileproc/detection.go
+++ b/fileproc/detection.go
@@ -1,3 +1,4 @@
+// Package fileproc handles file processing, collection, and output formatting.
package fileproc
import "strings"
@@ -14,9 +15,9 @@ func IsBinary(filename string) bool {
return getRegistry().IsBinary(filename)
}
-// GetLanguage returns the language identifier for the given filename based on its extension.
-func GetLanguage(filename string) string {
- return getRegistry().GetLanguage(filename)
+// Language returns the language identifier for the given filename based on its extension.
+func Language(filename string) string {
+ return getRegistry().Language(filename)
}
// Registry methods for detection
@@ -24,21 +25,24 @@ func GetLanguage(filename string) string {
// IsImage checks if the file extension indicates an image file.
func (r *FileTypeRegistry) IsImage(filename string) bool {
result := r.getFileTypeResult(filename)
+
return result.IsImage
}
// IsBinary checks if the file extension indicates a binary file.
func (r *FileTypeRegistry) IsBinary(filename string) bool {
result := r.getFileTypeResult(filename)
+
return result.IsBinary
}
-// GetLanguage returns the language identifier for the given filename based on its extension.
-func (r *FileTypeRegistry) GetLanguage(filename string) string {
+// Language returns the language identifier for the given filename based on its extension.
+func (r *FileTypeRegistry) Language(filename string) string {
if len(filename) < minExtensionLength {
return ""
}
result := r.getFileTypeResult(filename)
+
return result.Language
}
diff --git a/fileproc/extensions.go b/fileproc/extensions.go
index 602f107..b15a05c 100644
--- a/fileproc/extensions.go
+++ b/fileproc/extensions.go
@@ -1,5 +1,8 @@
+// Package fileproc handles file processing, collection, and output formatting.
package fileproc
+import "github.com/ivuorinen/gibidify/shared"
+
// getImageExtensions returns the default image file extensions.
func getImageExtensions() map[string]bool {
return map[string]bool{
@@ -130,15 +133,15 @@ func getLanguageMap() map[string]string {
".cmd": "batch",
// Data formats
- ".json": "json",
- ".yaml": "yaml",
- ".yml": "yaml",
+ ".json": shared.FormatJSON,
+ ".yaml": shared.FormatYAML,
+ ".yml": shared.FormatYAML,
".toml": "toml",
".xml": "xml",
".sql": "sql",
// Documentation
- ".md": "markdown",
+ ".md": shared.FormatMarkdown,
".rst": "rst",
".tex": "latex",
diff --git a/fileproc/fake_walker.go b/fileproc/fake_walker.go
index f809717..226ae02 100644
--- a/fileproc/fake_walker.go
+++ b/fileproc/fake_walker.go
@@ -12,5 +12,6 @@ func (fw FakeWalker) Walk(_ string) ([]string, error) {
if fw.Err != nil {
return nil, fw.Err
}
+
return fw.Files, nil
}
diff --git a/fileproc/file_filters.go b/fileproc/file_filters.go
index 995d98f..1008d33 100644
--- a/fileproc/file_filters.go
+++ b/fileproc/file_filters.go
@@ -1,3 +1,4 @@
+// Package fileproc handles file processing, collection, and output formatting.
package fileproc
import (
@@ -15,8 +16,8 @@ type FileFilter struct {
// NewFileFilter creates a new file filter with current configuration.
func NewFileFilter() *FileFilter {
return &FileFilter{
- ignoredDirs: config.GetIgnoredDirectories(),
- sizeLimit: config.GetFileSizeLimit(),
+ ignoredDirs: config.IgnoredDirectories(),
+ sizeLimit: config.FileSizeLimit(),
}
}
@@ -40,6 +41,7 @@ func (f *FileFilter) shouldSkipDirectory(entry os.DirEntry) bool {
return true
}
}
+
return false
}
diff --git a/fileproc/filetypes_concurrency_test.go b/fileproc/filetypes_concurrency_test.go
index 817d4b6..5792657 100644
--- a/fileproc/filetypes_concurrency_test.go
+++ b/fileproc/filetypes_concurrency_test.go
@@ -1,105 +1,200 @@
package fileproc
import (
+ "errors"
"fmt"
"sync"
"testing"
+
+ "github.com/ivuorinen/gibidify/shared"
)
-// TestFileTypeRegistry_ThreadSafety tests thread safety of the FileTypeRegistry.
-func TestFileTypeRegistry_ThreadSafety(t *testing.T) {
- const numGoroutines = 100
- const numOperationsPerGoroutine = 100
+const (
+ numGoroutines = 100
+ numOperationsPerGoroutine = 100
+)
+// TestFileTypeRegistryConcurrentReads tests concurrent read operations.
+// This test verifies thread-safety of registry reads under concurrent access.
+// For race condition detection, run with: go test -race
+func TestFileTypeRegistryConcurrentReads(t *testing.T) {
+ var wg sync.WaitGroup
+ errChan := make(chan error, numGoroutines)
+
+ for i := 0; i < numGoroutines; i++ {
+ wg.Go(func() {
+ if err := performConcurrentReads(); err != nil {
+ errChan <- err
+ }
+ })
+ }
+ wg.Wait()
+ close(errChan)
+
+ // Check for any errors from goroutines
+ for err := range errChan {
+ t.Errorf("Concurrent read operation failed: %v", err)
+ }
+}
+
+// TestFileTypeRegistryConcurrentRegistryAccess tests concurrent registry access.
+func TestFileTypeRegistryConcurrentRegistryAccess(t *testing.T) {
+ // Reset the registry to test concurrent initialization
+ ResetRegistryForTesting()
+ t.Cleanup(func() {
+ ResetRegistryForTesting()
+ })
+
+ registries := make([]*FileTypeRegistry, numGoroutines)
var wg sync.WaitGroup
- // Test concurrent read operations
- t.Run("ConcurrentReads", func(_ *testing.T) {
- for i := 0; i < numGoroutines; i++ {
- wg.Add(1)
- go func(_ int) {
- defer wg.Done()
- registry := GetDefaultRegistry()
+ for i := 0; i < numGoroutines; i++ {
+ idx := i // capture for closure
+ wg.Go(func() {
+ registries[idx] = DefaultRegistry()
+ })
+ }
+ wg.Wait()
- for j := 0; j < numOperationsPerGoroutine; j++ {
- // Test various file detection operations
- _ = registry.IsImage("test.png")
- _ = registry.IsBinary("test.exe")
- _ = registry.GetLanguage("test.go")
+ verifySameRegistryInstance(t, registries)
+}
- // Test global functions too
- _ = IsImage("image.jpg")
- _ = IsBinary("binary.dll")
- _ = GetLanguage("script.py")
- }
- }(i)
+// TestFileTypeRegistryConcurrentModifications tests concurrent modifications.
+func TestFileTypeRegistryConcurrentModifications(t *testing.T) {
+ var wg sync.WaitGroup
+
+ for i := 0; i < numGoroutines; i++ {
+ id := i // capture for closure
+ wg.Go(func() {
+ performConcurrentModifications(t, id)
+ })
+ }
+ wg.Wait()
+}
+
+// performConcurrentReads performs concurrent read operations on the registry.
+// Returns an error if any operation produces unexpected results.
+func performConcurrentReads() error {
+ registry := DefaultRegistry()
+
+ for j := 0; j < numOperationsPerGoroutine; j++ {
+ // Test various file detection operations with expected results
+ if !registry.IsImage(shared.TestFilePNG) {
+ return errors.New("expected .png to be detected as image")
}
- wg.Wait()
- })
-
- // Test concurrent registry access (singleton creation)
- t.Run("ConcurrentRegistryAccess", func(t *testing.T) {
- // Reset the registry to test concurrent initialization
- // Note: This is not safe in a real application, but needed for testing
- registryOnce = sync.Once{}
- registry = nil
-
- registries := make([]*FileTypeRegistry, numGoroutines)
-
- for i := 0; i < numGoroutines; i++ {
- wg.Add(1)
- go func(id int) {
- defer wg.Done()
- registries[id] = GetDefaultRegistry()
- }(i)
+ if !registry.IsBinary(shared.TestFileEXE) {
+ return errors.New("expected .exe to be detected as binary")
}
- wg.Wait()
-
- // Verify all goroutines got the same registry instance
- firstRegistry := registries[0]
- for i := 1; i < numGoroutines; i++ {
- if registries[i] != firstRegistry {
- t.Errorf("Registry %d is different from registry 0", i)
- }
+ if lang := registry.Language(shared.TestFileGo); lang != "go" {
+ return fmt.Errorf("expected .go to have language 'go', got %q", lang)
}
- })
- // Test concurrent modifications on separate registry instances
- t.Run("ConcurrentModifications", func(t *testing.T) {
- // Create separate registry instances for each goroutine to test modification thread safety
- for i := 0; i < numGoroutines; i++ {
- wg.Add(1)
- go func(id int) {
- defer wg.Done()
-
- // Create a new registry instance for this goroutine
- registry := &FileTypeRegistry{
- imageExts: make(map[string]bool),
- binaryExts: make(map[string]bool),
- languageMap: make(map[string]string),
- }
-
- for j := 0; j < numOperationsPerGoroutine; j++ {
- // Add unique extensions for this goroutine
- extSuffix := fmt.Sprintf("_%d_%d", id, j)
-
- registry.AddImageExtension(".img" + extSuffix)
- registry.AddBinaryExtension(".bin" + extSuffix)
- registry.AddLanguageMapping(".lang"+extSuffix, "lang"+extSuffix)
-
- // Verify the additions worked
- if !registry.IsImage("test.img" + extSuffix) {
- t.Errorf("Failed to add image extension .img%s", extSuffix)
- }
- if !registry.IsBinary("test.bin" + extSuffix) {
- t.Errorf("Failed to add binary extension .bin%s", extSuffix)
- }
- if registry.GetLanguage("test.lang"+extSuffix) != "lang"+extSuffix {
- t.Errorf("Failed to add language mapping .lang%s", extSuffix)
- }
- }
- }(i)
+ // Test global functions with expected results
+ if !IsImage(shared.TestFileImageJPG) {
+ return errors.New("expected .jpg to be detected as image")
+ }
+ if !IsBinary(shared.TestFileBinaryDLL) {
+ return errors.New("expected .dll to be detected as binary")
+ }
+ if lang := Language(shared.TestFileScriptPy); lang != "python" {
+ return fmt.Errorf("expected .py to have language 'python', got %q", lang)
+ }
+ }
+ return nil
+}
+
+// verifySameRegistryInstance verifies all goroutines got the same registry instance.
+func verifySameRegistryInstance(t *testing.T, registries []*FileTypeRegistry) {
+ t.Helper()
+
+ firstRegistry := registries[0]
+ for i := 1; i < numGoroutines; i++ {
+ if registries[i] != firstRegistry {
+ t.Errorf("Registry %d is different from registry 0", i)
+ }
+ }
+}
+
+// performConcurrentModifications performs concurrent modifications on separate registry instances.
+func performConcurrentModifications(t *testing.T, id int) {
+ t.Helper()
+
+ // Create a new registry instance for this goroutine
+ registry := createConcurrencyTestRegistry()
+
+ for j := 0; j < numOperationsPerGoroutine; j++ {
+ extSuffix := fmt.Sprintf("_%d_%d", id, j)
+
+ addTestExtensions(registry, extSuffix)
+ verifyTestExtensions(t, registry, extSuffix)
+ }
+}
+
+// createConcurrencyTestRegistry creates a new registry instance for concurrency testing.
+func createConcurrencyTestRegistry() *FileTypeRegistry {
+ return &FileTypeRegistry{
+ imageExts: make(map[string]bool),
+ binaryExts: make(map[string]bool),
+ languageMap: make(map[string]string),
+ extCache: make(map[string]string, shared.FileTypeRegistryMaxCacheSize),
+ resultCache: make(map[string]FileTypeResult, shared.FileTypeRegistryMaxCacheSize),
+ maxCacheSize: shared.FileTypeRegistryMaxCacheSize,
+ }
+}
+
+// addTestExtensions adds test extensions to the registry.
+func addTestExtensions(registry *FileTypeRegistry, extSuffix string) {
+ registry.AddImageExtension(".img" + extSuffix)
+ registry.AddBinaryExtension(".bin" + extSuffix)
+ registry.AddLanguageMapping(".lang"+extSuffix, "lang"+extSuffix)
+}
+
+// verifyTestExtensions verifies that test extensions were added correctly.
+func verifyTestExtensions(t *testing.T, registry *FileTypeRegistry, extSuffix string) {
+ t.Helper()
+
+ if !registry.IsImage("test.img" + extSuffix) {
+ t.Errorf("Failed to add image extension .img%s", extSuffix)
+ }
+ if !registry.IsBinary("test.bin" + extSuffix) {
+ t.Errorf("Failed to add binary extension .bin%s", extSuffix)
+ }
+ if registry.Language("test.lang"+extSuffix) != "lang"+extSuffix {
+ t.Errorf("Failed to add language mapping .lang%s", extSuffix)
+ }
+}
+
+// Benchmarks for concurrency performance
+
+// BenchmarkConcurrentReads benchmarks concurrent read operations on the registry.
+func BenchmarkConcurrentReads(b *testing.B) {
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ _ = performConcurrentReads()
}
- wg.Wait()
})
}
+
+// BenchmarkConcurrentRegistryAccess benchmarks concurrent registry singleton access.
+func BenchmarkConcurrentRegistryAccess(b *testing.B) {
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ _ = DefaultRegistry()
+ }
+ })
+}
+
+// BenchmarkConcurrentModifications benchmarks sequential registry modifications.
+// Note: Concurrent modifications to the same registry require external synchronization.
+// This benchmark measures the cost of modification operations themselves.
+func BenchmarkConcurrentModifications(b *testing.B) {
+ for b.Loop() {
+ registry := createConcurrencyTestRegistry()
+ for i := 0; i < 10; i++ {
+ extSuffix := fmt.Sprintf("_bench_%d", i)
+ registry.AddImageExtension(".img" + extSuffix)
+ registry.AddBinaryExtension(".bin" + extSuffix)
+ registry.AddLanguageMapping(".lang"+extSuffix, "lang"+extSuffix)
+ }
+ }
+}
diff --git a/fileproc/filetypes_config_test.go b/fileproc/filetypes_config_test.go
index df1e8c5..7c45814 100644
--- a/fileproc/filetypes_config_test.go
+++ b/fileproc/filetypes_config_test.go
@@ -3,218 +3,264 @@ package fileproc
import (
"testing"
- "github.com/stretchr/testify/require"
+ "github.com/ivuorinen/gibidify/shared"
)
-// TestFileTypeRegistry_Configuration tests the configuration functionality.
-func TestFileTypeRegistry_Configuration(t *testing.T) {
- // Create a new registry instance for testing
- registry := &FileTypeRegistry{
- imageExts: make(map[string]bool),
- binaryExts: make(map[string]bool),
- languageMap: make(map[string]string),
+const (
+ zigLang = "zig"
+)
+
+// TestFileTypeRegistryApplyCustomExtensions tests applying custom extensions.
+func TestFileTypeRegistryApplyCustomExtensions(t *testing.T) {
+ registry := createEmptyTestRegistry()
+
+ customImages := []string{".webp", ".avif", ".heic"}
+ customBinary := []string{".custom", ".mybin"}
+ customLanguages := map[string]string{
+ ".zig": zigLang,
+ ".odin": "odin",
+ ".v": "vlang",
}
- // Test ApplyCustomExtensions
- t.Run("ApplyCustomExtensions", func(t *testing.T) {
- customImages := []string{".webp", ".avif", ".heic"}
- customBinary := []string{".custom", ".mybin"}
- customLanguages := map[string]string{
- ".zig": "zig",
- ".odin": "odin",
- ".v": "vlang",
- }
+ registry.ApplyCustomExtensions(customImages, customBinary, customLanguages)
- registry.ApplyCustomExtensions(customImages, customBinary, customLanguages)
+ verifyCustomExtensions(t, registry, customImages, customBinary, customLanguages)
+}
- // Test custom image extensions
- for _, ext := range customImages {
- if !registry.IsImage("test" + ext) {
- t.Errorf("Expected %s to be recognized as image", ext)
- }
- }
+// TestFileTypeRegistryDisableExtensions tests disabling extensions.
+func TestFileTypeRegistryDisableExtensions(t *testing.T) {
+ registry := createEmptyTestRegistry()
- // Test custom binary extensions
- for _, ext := range customBinary {
- if !registry.IsBinary("test" + ext) {
- t.Errorf("Expected %s to be recognized as binary", ext)
- }
- }
+ // Add some extensions first
+ setupRegistryExtensions(registry)
- // Test custom language mappings
- for ext, expectedLang := range customLanguages {
- if lang := registry.GetLanguage("test" + ext); lang != expectedLang {
- t.Errorf("Expected %s to map to %s, got %s", ext, expectedLang, lang)
- }
- }
- })
+ // Verify they work before disabling
+ verifyExtensionsEnabled(t, registry)
- // Test DisableExtensions
- t.Run("DisableExtensions", func(t *testing.T) {
- // Add some extensions first
- registry.AddImageExtension(".png")
- registry.AddImageExtension(".jpg")
- registry.AddBinaryExtension(".exe")
- registry.AddBinaryExtension(".dll")
- registry.AddLanguageMapping(".go", "go")
- registry.AddLanguageMapping(".py", "python")
+ // Disable some extensions
+ disabledImages := []string{".png"}
+ disabledBinary := []string{".exe"}
+ disabledLanguages := []string{".go"}
- // Verify they work
- if !registry.IsImage("test.png") {
- t.Error("Expected .png to be image before disabling")
- }
- if !registry.IsBinary("test.exe") {
- t.Error("Expected .exe to be binary before disabling")
- }
- if registry.GetLanguage("test.go") != "go" {
- t.Error("Expected .go to map to go before disabling")
- }
+ registry.DisableExtensions(disabledImages, disabledBinary, disabledLanguages)
- // Disable some extensions
- disabledImages := []string{".png"}
- disabledBinary := []string{".exe"}
- disabledLanguages := []string{".go"}
+ // Verify disabled and remaining extensions
+ verifyExtensionsDisabled(t, registry)
+ verifyRemainingExtensions(t, registry)
+}
- registry.DisableExtensions(disabledImages, disabledBinary, disabledLanguages)
+// TestFileTypeRegistryEmptyValuesHandling tests handling of empty values.
+func TestFileTypeRegistryEmptyValuesHandling(t *testing.T) {
+ registry := createEmptyTestRegistry()
- // Test that disabled extensions no longer work
- if registry.IsImage("test.png") {
- t.Error("Expected .png to not be image after disabling")
- }
- if registry.IsBinary("test.exe") {
- t.Error("Expected .exe to not be binary after disabling")
- }
- if registry.GetLanguage("test.go") != "" {
- t.Error("Expected .go to not map to language after disabling")
- }
+ customImages := []string{"", shared.TestExtensionValid, ""}
+ customBinary := []string{"", shared.TestExtensionValid}
+ customLanguages := map[string]string{
+ "": "invalid",
+ shared.TestExtensionValid: "",
+ ".good": "good",
+ }
- // Test that non-disabled extensions still work
- if !registry.IsImage("test.jpg") {
- t.Error("Expected .jpg to still be image after disabling .png")
- }
- if !registry.IsBinary("test.dll") {
- t.Error("Expected .dll to still be binary after disabling .exe")
- }
- if registry.GetLanguage("test.py") != "python" {
- t.Error("Expected .py to still map to python after disabling .go")
- }
- })
+ registry.ApplyCustomExtensions(customImages, customBinary, customLanguages)
- // Test empty values handling
- t.Run("EmptyValuesHandling", func(t *testing.T) {
- registry := &FileTypeRegistry{
- imageExts: make(map[string]bool),
- binaryExts: make(map[string]bool),
- languageMap: make(map[string]string),
- }
+ verifyEmptyValueHandling(t, registry)
+}
- // Test with empty values
- customImages := []string{"", ".valid", ""}
- customBinary := []string{"", ".valid"}
- customLanguages := map[string]string{
- "": "invalid",
- ".valid": "",
- ".good": "good",
- }
+// TestFileTypeRegistryCaseInsensitiveHandling tests case insensitive handling.
+func TestFileTypeRegistryCaseInsensitiveHandling(t *testing.T) {
+ registry := createEmptyTestRegistry()
- registry.ApplyCustomExtensions(customImages, customBinary, customLanguages)
+ customImages := []string{".WEBP", ".Avif"}
+ customBinary := []string{".CUSTOM", ".MyBin"}
+ customLanguages := map[string]string{
+ ".ZIG": zigLang,
+ ".Odin": "odin",
+ }
- // Only valid entries should be added
- if registry.IsImage("test.") {
- t.Error("Expected empty extension to not be added as image")
- }
- if !registry.IsImage("test.valid") {
- t.Error("Expected .valid to be added as image")
- }
- if registry.IsBinary("test.") {
- t.Error("Expected empty extension to not be added as binary")
- }
- if !registry.IsBinary("test.valid") {
- t.Error("Expected .valid to be added as binary")
- }
- if registry.GetLanguage("test.") != "" {
- t.Error("Expected empty extension to not be added as language")
- }
- if registry.GetLanguage("test.valid") != "" {
- t.Error("Expected .valid with empty language to not be added")
- }
- if registry.GetLanguage("test.good") != "good" {
- t.Error("Expected .good to map to good")
- }
- })
+ registry.ApplyCustomExtensions(customImages, customBinary, customLanguages)
- // Test case-insensitive handling
- t.Run("CaseInsensitiveHandling", func(t *testing.T) {
- registry := &FileTypeRegistry{
- imageExts: make(map[string]bool),
- binaryExts: make(map[string]bool),
- languageMap: make(map[string]string),
- }
+ verifyCaseInsensitiveHandling(t, registry)
+}
- customImages := []string{".WEBP", ".Avif"}
- customBinary := []string{".CUSTOM", ".MyBin"}
- customLanguages := map[string]string{
- ".ZIG": "zig",
- ".Odin": "odin",
- }
+// createEmptyTestRegistry creates a new empty test registry instance for config testing.
+func createEmptyTestRegistry() *FileTypeRegistry {
+ return &FileTypeRegistry{
+ imageExts: make(map[string]bool),
+ binaryExts: make(map[string]bool),
+ languageMap: make(map[string]string),
+ extCache: make(map[string]string, shared.FileTypeRegistryMaxCacheSize),
+ resultCache: make(map[string]FileTypeResult, shared.FileTypeRegistryMaxCacheSize),
+ maxCacheSize: shared.FileTypeRegistryMaxCacheSize,
+ }
+}
- registry.ApplyCustomExtensions(customImages, customBinary, customLanguages)
+// verifyCustomExtensions verifies that custom extensions are applied correctly.
+func verifyCustomExtensions(
+ t *testing.T,
+ registry *FileTypeRegistry,
+ customImages, customBinary []string,
+ customLanguages map[string]string,
+) {
+ t.Helper()
- // Test that both upper and lower case work
- if !registry.IsImage("test.webp") {
- t.Error("Expected .webp (lowercase) to work after adding .WEBP")
+ // Test custom image extensions
+ for _, ext := range customImages {
+ if !registry.IsImage("test" + ext) {
+ t.Errorf("Expected %s to be recognized as image", ext)
}
- if !registry.IsImage("test.WEBP") {
- t.Error("Expected .WEBP (uppercase) to work")
+ }
+
+ // Test custom binary extensions
+ for _, ext := range customBinary {
+ if !registry.IsBinary("test" + ext) {
+ t.Errorf("Expected %s to be recognized as binary", ext)
}
- if !registry.IsBinary("test.custom") {
- t.Error("Expected .custom (lowercase) to work after adding .CUSTOM")
+ }
+
+ // Test custom language mappings
+ for ext, expectedLang := range customLanguages {
+ if lang := registry.Language("test" + ext); lang != expectedLang {
+ t.Errorf("Expected %s to map to %s, got %s", ext, expectedLang, lang)
}
- if !registry.IsBinary("test.CUSTOM") {
- t.Error("Expected .CUSTOM (uppercase) to work")
- }
- if registry.GetLanguage("test.zig") != "zig" {
- t.Error("Expected .zig (lowercase) to work after adding .ZIG")
- }
- if registry.GetLanguage("test.ZIG") != "zig" {
- t.Error("Expected .ZIG (uppercase) to work")
- }
- })
+ }
+}
+
+// setupRegistryExtensions adds test extensions to the registry.
+func setupRegistryExtensions(registry *FileTypeRegistry) {
+ registry.AddImageExtension(".png")
+ registry.AddImageExtension(".jpg")
+ registry.AddBinaryExtension(".exe")
+ registry.AddBinaryExtension(".dll")
+ registry.AddLanguageMapping(".go", "go")
+ registry.AddLanguageMapping(".py", "python")
+}
+
+// verifyExtensionsEnabled verifies that extensions are enabled before disabling.
+func verifyExtensionsEnabled(t *testing.T, registry *FileTypeRegistry) {
+ t.Helper()
+
+ if !registry.IsImage(shared.TestFilePNG) {
+ t.Error("Expected .png to be image before disabling")
+ }
+ if !registry.IsBinary(shared.TestFileEXE) {
+ t.Error("Expected .exe to be binary before disabling")
+ }
+ if registry.Language(shared.TestFileGo) != "go" {
+ t.Error("Expected .go to map to go before disabling")
+ }
+}
+
+// verifyExtensionsDisabled verifies that disabled extensions no longer work.
+func verifyExtensionsDisabled(t *testing.T, registry *FileTypeRegistry) {
+ t.Helper()
+
+ if registry.IsImage(shared.TestFilePNG) {
+ t.Error("Expected .png to not be image after disabling")
+ }
+ if registry.IsBinary(shared.TestFileEXE) {
+ t.Error("Expected .exe to not be binary after disabling")
+ }
+ if registry.Language(shared.TestFileGo) != "" {
+ t.Error("Expected .go to not map to language after disabling")
+ }
+}
+
+// verifyRemainingExtensions verifies that non-disabled extensions still work.
+func verifyRemainingExtensions(t *testing.T, registry *FileTypeRegistry) {
+ t.Helper()
+
+ if !registry.IsImage(shared.TestFileJPG) {
+ t.Error("Expected .jpg to still be image after disabling .png")
+ }
+ if !registry.IsBinary(shared.TestFileDLL) {
+ t.Error("Expected .dll to still be binary after disabling .exe")
+ }
+ if registry.Language(shared.TestFilePy) != "python" {
+ t.Error("Expected .py to still map to python after disabling .go")
+ }
+}
+
+// verifyEmptyValueHandling verifies handling of empty values.
+func verifyEmptyValueHandling(t *testing.T, registry *FileTypeRegistry) {
+ t.Helper()
+
+ if registry.IsImage("test") {
+ t.Error("Expected empty extension to not be added as image")
+ }
+ if !registry.IsImage(shared.TestFileValid) {
+ t.Error("Expected .valid to be added as image")
+ }
+ if registry.IsBinary("test") {
+ t.Error("Expected empty extension to not be added as binary")
+ }
+ if !registry.IsBinary(shared.TestFileValid) {
+ t.Error("Expected .valid to be added as binary")
+ }
+ if registry.Language("test") != "" {
+ t.Error("Expected empty extension to not be added as language")
+ }
+ if registry.Language(shared.TestFileValid) != "" {
+ t.Error("Expected .valid with empty language to not be added")
+ }
+ if registry.Language("test.good") != "good" {
+ t.Error("Expected .good to map to good")
+ }
+}
+
+// verifyCaseInsensitiveHandling verifies case insensitive handling.
+func verifyCaseInsensitiveHandling(t *testing.T, registry *FileTypeRegistry) {
+ t.Helper()
+
+ if !registry.IsImage(shared.TestFileWebP) {
+ t.Error("Expected .webp (lowercase) to work after adding .WEBP")
+ }
+ if !registry.IsImage("test.WEBP") {
+ t.Error("Expected .WEBP (uppercase) to work")
+ }
+ if !registry.IsBinary("test.custom") {
+ t.Error("Expected .custom (lowercase) to work after adding .CUSTOM")
+ }
+ if !registry.IsBinary("test.CUSTOM") {
+ t.Error("Expected .CUSTOM (uppercase) to work")
+ }
+ if registry.Language("test.zig") != zigLang {
+ t.Error("Expected .zig (lowercase) to work after adding .ZIG")
+ }
+ if registry.Language("test.ZIG") != zigLang {
+ t.Error("Expected .ZIG (uppercase) to work")
+ }
}
// TestConfigureFromSettings tests the global configuration function.
func TestConfigureFromSettings(t *testing.T) {
// Reset registry to ensure clean state
ResetRegistryForTesting()
- // Ensure cleanup runs even if test fails
- t.Cleanup(ResetRegistryForTesting)
// Test configuration application
customImages := []string{".webp", ".avif"}
customBinary := []string{".custom"}
- customLanguages := map[string]string{".zig": "zig"}
+ customLanguages := map[string]string{".zig": zigLang}
disabledImages := []string{".gif"} // Disable default extension
disabledBinary := []string{".exe"} // Disable default extension
disabledLanguages := []string{".rb"} // Disable default extension
- err := ConfigureFromSettings(RegistryConfig{
- CustomImages: customImages,
- CustomBinary: customBinary,
- CustomLanguages: customLanguages,
- DisabledImages: disabledImages,
- DisabledBinary: disabledBinary,
- DisabledLanguages: disabledLanguages,
- })
- require.NoError(t, err)
+ ConfigureFromSettings(
+ customImages,
+ customBinary,
+ customLanguages,
+ disabledImages,
+ disabledBinary,
+ disabledLanguages,
+ )
// Test that custom extensions work
- if !IsImage("test.webp") {
+ if !IsImage(shared.TestFileWebP) {
t.Error("Expected custom image extension .webp to work")
}
if !IsBinary("test.custom") {
t.Error("Expected custom binary extension .custom to work")
}
- if GetLanguage("test.zig") != "zig" {
+ if Language("test.zig") != zigLang {
t.Error("Expected custom language .zig to work")
}
@@ -222,41 +268,43 @@ func TestConfigureFromSettings(t *testing.T) {
if IsImage("test.gif") {
t.Error("Expected disabled image extension .gif to not work")
}
- if IsBinary("test.exe") {
+ if IsBinary(shared.TestFileEXE) {
t.Error("Expected disabled binary extension .exe to not work")
}
- if GetLanguage("test.rb") != "" {
+ if Language("test.rb") != "" {
t.Error("Expected disabled language extension .rb to not work")
}
// Test that non-disabled defaults still work
- if !IsImage("test.png") {
+ if !IsImage(shared.TestFilePNG) {
t.Error("Expected non-disabled image extension .png to still work")
}
- if !IsBinary("test.dll") {
+ if !IsBinary(shared.TestFileDLL) {
t.Error("Expected non-disabled binary extension .dll to still work")
}
- if GetLanguage("test.go") != "go" {
+ if Language(shared.TestFileGo) != "go" {
t.Error("Expected non-disabled language extension .go to still work")
}
// Test multiple calls don't override previous configuration
- err = ConfigureFromSettings(RegistryConfig{
- CustomImages: []string{".extra"},
- CustomBinary: []string{},
- CustomLanguages: map[string]string{},
- DisabledImages: []string{},
- DisabledBinary: []string{},
- DisabledLanguages: []string{},
- })
- require.NoError(t, err)
+ ConfigureFromSettings(
+ []string{".extra"},
+ []string{},
+ map[string]string{},
+ []string{},
+ []string{},
+ []string{},
+ )
// Previous configuration should still work
- if !IsImage("test.webp") {
+ if !IsImage(shared.TestFileWebP) {
t.Error("Expected previous configuration to persist")
}
// New configuration should also work
if !IsImage("test.extra") {
t.Error("Expected new configuration to be applied")
}
+
+ // Reset registry after test to avoid affecting other tests
+ ResetRegistryForTesting()
}
diff --git a/fileproc/filetypes_detection_test.go b/fileproc/filetypes_detection_test.go
index 5364367..312e9bd 100644
--- a/fileproc/filetypes_detection_test.go
+++ b/fileproc/filetypes_detection_test.go
@@ -2,31 +2,34 @@ package fileproc
import (
"testing"
+
+ "github.com/ivuorinen/gibidify/shared"
)
-// newTestRegistry creates a fresh registry instance for testing to avoid global state pollution.
-func newTestRegistry() *FileTypeRegistry {
+// createTestRegistry creates a fresh FileTypeRegistry instance for testing.
+// This helper reduces code duplication and ensures consistent registry initialization.
+func createTestRegistry() *FileTypeRegistry {
return &FileTypeRegistry{
imageExts: getImageExtensions(),
binaryExts: getBinaryExtensions(),
languageMap: getLanguageMap(),
- extCache: make(map[string]string, 1000),
- resultCache: make(map[string]FileTypeResult, 500),
- maxCacheSize: 500,
+ extCache: make(map[string]string, shared.FileTypeRegistryMaxCacheSize),
+ resultCache: make(map[string]FileTypeResult, shared.FileTypeRegistryMaxCacheSize),
+ maxCacheSize: shared.FileTypeRegistryMaxCacheSize,
}
}
// TestFileTypeRegistry_LanguageDetection tests the language detection functionality.
-func TestFileTypeRegistry_LanguageDetection(t *testing.T) {
- registry := newTestRegistry()
+func TestFileTypeRegistryLanguageDetection(t *testing.T) {
+ registry := createTestRegistry()
tests := []struct {
filename string
expected string
}{
// Programming languages
- {"main.go", "go"},
- {"script.py", "python"},
+ {shared.TestFileMainGo, "go"},
+ {shared.TestFileScriptPy, "python"},
{"app.js", "javascript"},
{"component.tsx", "typescript"},
{"service.ts", "typescript"},
@@ -96,17 +99,17 @@ func TestFileTypeRegistry_LanguageDetection(t *testing.T) {
for _, tt := range tests {
t.Run(tt.filename, func(t *testing.T) {
- result := registry.GetLanguage(tt.filename)
+ result := registry.Language(tt.filename)
if result != tt.expected {
- t.Errorf("GetLanguage(%q) = %q, expected %q", tt.filename, result, tt.expected)
+ t.Errorf("Language(%q) = %q, expected %q", tt.filename, result, tt.expected)
}
})
}
}
// TestFileTypeRegistry_ImageDetection tests the image detection functionality.
-func TestFileTypeRegistry_ImageDetection(t *testing.T) {
- registry := newTestRegistry()
+func TestFileTypeRegistryImageDetection(t *testing.T) {
+ registry := createTestRegistry()
tests := []struct {
filename string
@@ -114,7 +117,7 @@ func TestFileTypeRegistry_ImageDetection(t *testing.T) {
}{
// Common image formats
{"photo.png", true},
- {"image.jpg", true},
+ {shared.TestFileImageJPG, true},
{"picture.jpeg", true},
{"animation.gif", true},
{"bitmap.bmp", true},
@@ -155,8 +158,8 @@ func TestFileTypeRegistry_ImageDetection(t *testing.T) {
}
// TestFileTypeRegistry_BinaryDetection tests the binary detection functionality.
-func TestFileTypeRegistry_BinaryDetection(t *testing.T) {
- registry := newTestRegistry()
+func TestFileTypeRegistryBinaryDetection(t *testing.T) {
+ registry := createTestRegistry()
tests := []struct {
filename string
@@ -214,7 +217,7 @@ func TestFileTypeRegistry_BinaryDetection(t *testing.T) {
// Non-binary files
{"document.txt", false},
- {"script.py", false},
+ {shared.TestFileScriptPy, false},
{"config.json", false},
{"style.css", false},
{"page.html", false},
diff --git a/fileproc/filetypes_edge_cases_test.go b/fileproc/filetypes_edge_cases_test.go
index 10eb083..c234a4e 100644
--- a/fileproc/filetypes_edge_cases_test.go
+++ b/fileproc/filetypes_edge_cases_test.go
@@ -2,11 +2,13 @@ package fileproc
import (
"testing"
+
+ "github.com/ivuorinen/gibidify/shared"
)
// TestFileTypeRegistry_EdgeCases tests edge cases and boundary conditions.
-func TestFileTypeRegistry_EdgeCases(t *testing.T) {
- registry := GetDefaultRegistry()
+func TestFileTypeRegistryEdgeCases(t *testing.T) {
+ registry := DefaultRegistry()
// Test various edge cases for filename handling
edgeCases := []struct {
@@ -35,19 +37,19 @@ func TestFileTypeRegistry_EdgeCases(t *testing.T) {
// These should not panic
_ = registry.IsImage(tc.filename)
_ = registry.IsBinary(tc.filename)
- _ = registry.GetLanguage(tc.filename)
+ _ = registry.Language(tc.filename)
// Global functions should also not panic
_ = IsImage(tc.filename)
_ = IsBinary(tc.filename)
- _ = GetLanguage(tc.filename)
+ _ = Language(tc.filename)
})
}
}
// TestFileTypeRegistry_MinimumExtensionLength tests the minimum extension length requirement.
-func TestFileTypeRegistry_MinimumExtensionLength(t *testing.T) {
- registry := GetDefaultRegistry()
+func TestFileTypeRegistryMinimumExtensionLength(t *testing.T) {
+ registry := DefaultRegistry()
tests := []struct {
filename string
@@ -65,18 +67,18 @@ func TestFileTypeRegistry_MinimumExtensionLength(t *testing.T) {
for _, tt := range tests {
t.Run(tt.filename, func(t *testing.T) {
- result := registry.GetLanguage(tt.filename)
+ result := registry.Language(tt.filename)
if result != tt.expected {
- t.Errorf("GetLanguage(%q) = %q, expected %q", tt.filename, result, tt.expected)
+ t.Errorf("Language(%q) = %q, expected %q", tt.filename, result, tt.expected)
}
})
}
}
-// Benchmark tests for performance validation
-func BenchmarkFileTypeRegistry_IsImage(b *testing.B) {
- registry := GetDefaultRegistry()
- filename := "test.png"
+// Benchmark tests for performance validation.
+func BenchmarkFileTypeRegistryIsImage(b *testing.B) {
+ registry := DefaultRegistry()
+ filename := shared.TestFilePNG
b.ResetTimer()
for i := 0; i < b.N; i++ {
@@ -84,9 +86,9 @@ func BenchmarkFileTypeRegistry_IsImage(b *testing.B) {
}
}
-func BenchmarkFileTypeRegistry_IsBinary(b *testing.B) {
- registry := GetDefaultRegistry()
- filename := "test.exe"
+func BenchmarkFileTypeRegistryIsBinary(b *testing.B) {
+ registry := DefaultRegistry()
+ filename := shared.TestFileEXE
b.ResetTimer()
for i := 0; i < b.N; i++ {
@@ -94,35 +96,35 @@ func BenchmarkFileTypeRegistry_IsBinary(b *testing.B) {
}
}
-func BenchmarkFileTypeRegistry_GetLanguage(b *testing.B) {
- registry := GetDefaultRegistry()
- filename := "test.go"
+func BenchmarkFileTypeRegistryLanguage(b *testing.B) {
+ registry := DefaultRegistry()
+ filename := shared.TestFileGo
b.ResetTimer()
for i := 0; i < b.N; i++ {
- _ = registry.GetLanguage(filename)
+ _ = registry.Language(filename)
}
}
-func BenchmarkFileTypeRegistry_GlobalFunctions(b *testing.B) {
- filename := "test.go"
+func BenchmarkFileTypeRegistryGlobalFunctions(b *testing.B) {
+ filename := shared.TestFileGo
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = IsImage(filename)
_ = IsBinary(filename)
- _ = GetLanguage(filename)
+ _ = Language(filename)
}
}
-func BenchmarkFileTypeRegistry_ConcurrentAccess(b *testing.B) {
- filename := "test.go"
+func BenchmarkFileTypeRegistryConcurrentAccess(b *testing.B) {
+ filename := shared.TestFileGo
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
_ = IsImage(filename)
_ = IsBinary(filename)
- _ = GetLanguage(filename)
+ _ = Language(filename)
}
})
}
diff --git a/fileproc/filetypes_registry_test.go b/fileproc/filetypes_registry_test.go
index 57b80fb..336f267 100644
--- a/fileproc/filetypes_registry_test.go
+++ b/fileproc/filetypes_registry_test.go
@@ -2,136 +2,254 @@ package fileproc
import (
"testing"
+
+ "github.com/ivuorinen/gibidify/shared"
)
-// TestFileTypeRegistry_ModificationMethods tests the modification methods of FileTypeRegistry.
-func TestFileTypeRegistry_ModificationMethods(t *testing.T) {
- // Create a new registry instance for testing
- registry := &FileTypeRegistry{
- imageExts: make(map[string]bool),
- binaryExts: make(map[string]bool),
- languageMap: make(map[string]string),
- }
+// TestFileTypeRegistryAddImageExtension tests adding image extensions.
+func TestFileTypeRegistryAddImageExtension(t *testing.T) {
+ registry := createModificationTestRegistry()
- // Test AddImageExtension
- t.Run("AddImageExtension", func(t *testing.T) {
- // Add a new image extension
- registry.AddImageExtension(".webp")
- if !registry.IsImage("test.webp") {
- t.Errorf("Expected .webp to be recognized as image after adding")
- }
-
- // Test case-insensitive addition
- registry.AddImageExtension(".AVIF")
- if !registry.IsImage("test.avif") {
- t.Errorf("Expected .avif to be recognized as image after adding .AVIF")
- }
- if !registry.IsImage("test.AVIF") {
- t.Errorf("Expected .AVIF to be recognized as image")
- }
-
- // Test with dot prefix
- registry.AddImageExtension("heic")
- if registry.IsImage("test.heic") {
- t.Errorf("Expected extension without dot to not work")
- }
-
- // Test with proper dot prefix
- registry.AddImageExtension(".heic")
- if !registry.IsImage("test.heic") {
- t.Errorf("Expected .heic to be recognized as image")
- }
- })
-
- // Test AddBinaryExtension
- t.Run("AddBinaryExtension", func(t *testing.T) {
- // Add a new binary extension
- registry.AddBinaryExtension(".custom")
- if !registry.IsBinary("file.custom") {
- t.Errorf("Expected .custom to be recognized as binary after adding")
- }
-
- // Test case-insensitive addition
- registry.AddBinaryExtension(".SPECIAL")
- if !registry.IsBinary("file.special") {
- t.Errorf("Expected .special to be recognized as binary after adding .SPECIAL")
- }
- if !registry.IsBinary("file.SPECIAL") {
- t.Errorf("Expected .SPECIAL to be recognized as binary")
- }
-
- // Test with dot prefix
- registry.AddBinaryExtension("bin")
- if registry.IsBinary("file.bin") {
- t.Errorf("Expected extension without dot to not work")
- }
-
- // Test with proper dot prefix
- registry.AddBinaryExtension(".bin")
- if !registry.IsBinary("file.bin") {
- t.Errorf("Expected .bin to be recognized as binary")
- }
- })
-
- // Test AddLanguageMapping
- t.Run("AddLanguageMapping", func(t *testing.T) {
- // Add a new language mapping
- registry.AddLanguageMapping(".xyz", "CustomLang")
- if lang := registry.GetLanguage("file.xyz"); lang != "CustomLang" {
- t.Errorf("Expected CustomLang, got %s", lang)
- }
-
- // Test case-insensitive addition
- registry.AddLanguageMapping(".ABC", "UpperLang")
- if lang := registry.GetLanguage("file.abc"); lang != "UpperLang" {
- t.Errorf("Expected UpperLang, got %s", lang)
- }
- if lang := registry.GetLanguage("file.ABC"); lang != "UpperLang" {
- t.Errorf("Expected UpperLang for uppercase, got %s", lang)
- }
-
- // Test with dot prefix
- registry.AddLanguageMapping("nolang", "NoLang")
- if lang := registry.GetLanguage("file.nolang"); lang == "NoLang" {
- t.Errorf("Expected extension without dot to not work")
- }
-
- // Test with proper dot prefix
- registry.AddLanguageMapping(".nolang", "NoLang")
- if lang := registry.GetLanguage("file.nolang"); lang != "NoLang" {
- t.Errorf("Expected NoLang, got %s", lang)
- }
-
- // Test overriding existing mapping
- registry.AddLanguageMapping(".xyz", "NewCustomLang")
- if lang := registry.GetLanguage("file.xyz"); lang != "NewCustomLang" {
- t.Errorf("Expected NewCustomLang after override, got %s", lang)
- }
- })
+ testImageExtensionModifications(t, registry)
}
-// TestFileTypeRegistry_DefaultRegistryConsistency tests default registry behavior.
-func TestFileTypeRegistry_DefaultRegistryConsistency(t *testing.T) {
- registry := GetDefaultRegistry()
+// TestFileTypeRegistryAddBinaryExtension tests adding binary extensions.
+func TestFileTypeRegistryAddBinaryExtension(t *testing.T) {
+ registry := createModificationTestRegistry()
+
+ testBinaryExtensionModifications(t, registry)
+}
+
+// TestFileTypeRegistryAddLanguageMapping tests adding language mappings.
+func TestFileTypeRegistryAddLanguageMapping(t *testing.T) {
+ registry := createModificationTestRegistry()
+
+ testLanguageMappingModifications(t, registry)
+}
+
+// createModificationTestRegistry creates a registry for modification tests.
+func createModificationTestRegistry() *FileTypeRegistry {
+ return &FileTypeRegistry{
+ imageExts: make(map[string]bool),
+ binaryExts: make(map[string]bool),
+ languageMap: make(map[string]string),
+ extCache: make(map[string]string, shared.FileTypeRegistryMaxCacheSize),
+ resultCache: make(map[string]FileTypeResult, shared.FileTypeRegistryMaxCacheSize),
+ maxCacheSize: shared.FileTypeRegistryMaxCacheSize,
+ }
+}
+
+// testImageExtensionModifications tests image extension modifications.
+func testImageExtensionModifications(t *testing.T, registry *FileTypeRegistry) {
+ t.Helper()
+
+ // Add a new image extension
+ registry.AddImageExtension(".webp")
+ verifyImageExtension(t, registry, ".webp", shared.TestFileWebP, true)
+
+ // Test case-insensitive addition
+ registry.AddImageExtension(".AVIF")
+ verifyImageExtension(t, registry, ".AVIF", "test.avif", true)
+ verifyImageExtension(t, registry, ".AVIF", "test.AVIF", true)
+
+ // Test with dot prefix
+ registry.AddImageExtension("heic")
+ verifyImageExtension(t, registry, "heic", "test.heic", false)
+
+ // Test with proper dot prefix
+ registry.AddImageExtension(".heic")
+ verifyImageExtension(t, registry, ".heic", "test.heic", true)
+}
+
+// testBinaryExtensionModifications tests binary extension modifications.
+func testBinaryExtensionModifications(t *testing.T, registry *FileTypeRegistry) {
+ t.Helper()
+
+ // Add a new binary extension
+ registry.AddBinaryExtension(".custom")
+ verifyBinaryExtension(t, registry, ".custom", "file.custom", true)
+
+ // Test case-insensitive addition
+ registry.AddBinaryExtension(shared.TestExtensionSpecial)
+ verifyBinaryExtension(t, registry, shared.TestExtensionSpecial, "file.special", true)
+ verifyBinaryExtension(t, registry, shared.TestExtensionSpecial, "file.SPECIAL", true)
+
+ // Test with dot prefix
+ registry.AddBinaryExtension("bin")
+ verifyBinaryExtension(t, registry, "bin", "file.bin", false)
+
+ // Test with proper dot prefix
+ registry.AddBinaryExtension(".bin")
+ verifyBinaryExtension(t, registry, ".bin", "file.bin", true)
+}
+
+// testLanguageMappingModifications tests language mapping modifications.
+func testLanguageMappingModifications(t *testing.T, registry *FileTypeRegistry) {
+ t.Helper()
+
+ // Add a new language mapping
+ registry.AddLanguageMapping(".xyz", "CustomLang")
+ verifyLanguageMapping(t, registry, "file.xyz", "CustomLang")
+
+ // Test case-insensitive addition
+ registry.AddLanguageMapping(".ABC", "UpperLang")
+ verifyLanguageMapping(t, registry, "file.abc", "UpperLang")
+ verifyLanguageMapping(t, registry, "file.ABC", "UpperLang")
+
+ // Test with dot prefix (should not work)
+ registry.AddLanguageMapping("nolang", "NoLang")
+ verifyLanguageMappingAbsent(t, registry, "nolang", "file.nolang")
+
+ // Test with proper dot prefix
+ registry.AddLanguageMapping(".nolang", "NoLang")
+ verifyLanguageMapping(t, registry, "file.nolang", "NoLang")
+
+ // Test overriding existing mapping
+ registry.AddLanguageMapping(".xyz", "NewCustomLang")
+ verifyLanguageMapping(t, registry, "file.xyz", "NewCustomLang")
+}
+
+// verifyImageExtension verifies image extension behavior.
+func verifyImageExtension(t *testing.T, registry *FileTypeRegistry, ext, filename string, expected bool) {
+ t.Helper()
+
+ if registry.IsImage(filename) != expected {
+ if expected {
+ t.Errorf("Expected %s to be recognized as image after adding %s", filename, ext)
+ } else {
+ t.Errorf(shared.TestMsgExpectedExtensionWithoutDot)
+ }
+ }
+}
+
+// verifyBinaryExtension verifies binary extension behavior.
+func verifyBinaryExtension(t *testing.T, registry *FileTypeRegistry, ext, filename string, expected bool) {
+ t.Helper()
+
+ if registry.IsBinary(filename) != expected {
+ if expected {
+ t.Errorf("Expected %s to be recognized as binary after adding %s", filename, ext)
+ } else {
+ t.Errorf(shared.TestMsgExpectedExtensionWithoutDot)
+ }
+ }
+}
+
+// verifyLanguageMapping verifies language mapping behavior.
+func verifyLanguageMapping(t *testing.T, registry *FileTypeRegistry, filename, expectedLang string) {
+ t.Helper()
+
+ lang := registry.Language(filename)
+ if lang != expectedLang {
+ t.Errorf("Expected %s, got %s for %s", expectedLang, lang, filename)
+ }
+}
+
+// verifyLanguageMappingAbsent verifies that a language mapping is absent.
+func verifyLanguageMappingAbsent(t *testing.T, registry *FileTypeRegistry, _ string, filename string) {
+ t.Helper()
+
+ lang := registry.Language(filename)
+ if lang != "" {
+ t.Errorf(shared.TestMsgExpectedExtensionWithoutDot+", but got %s", lang)
+ }
+}
+
+// TestFileTypeRegistryDefaultRegistryConsistency tests default registry behavior.
+func TestFileTypeRegistryDefaultRegistryConsistency(t *testing.T) {
+ registry := DefaultRegistry()
// Test that registry methods work consistently
- if !registry.IsImage("test.png") {
+ if !registry.IsImage(shared.TestFilePNG) {
t.Error("Expected .png to be recognized as image")
}
- if !registry.IsBinary("test.exe") {
+ if !registry.IsBinary(shared.TestFileEXE) {
t.Error("Expected .exe to be recognized as binary")
}
- if lang := registry.GetLanguage("test.go"); lang != "go" {
+ if lang := registry.Language(shared.TestFileGo); lang != "go" {
t.Errorf("Expected go, got %s", lang)
}
// Test that multiple calls return consistent results
for i := 0; i < 5; i++ {
- if !registry.IsImage("test.jpg") {
+ if !registry.IsImage(shared.TestFileJPG) {
t.Errorf("Iteration %d: Expected .jpg to be recognized as image", i)
}
- if registry.IsBinary("test.txt") {
+ if registry.IsBinary(shared.TestFileTXT) {
t.Errorf("Iteration %d: Expected .txt to not be recognized as binary", i)
}
}
}
+
+// TestFileTypeRegistryGetStats tests the GetStats method.
+func TestFileTypeRegistryGetStats(t *testing.T) {
+ // Ensure clean, isolated state
+ ResetRegistryForTesting()
+ t.Cleanup(ResetRegistryForTesting)
+ registry := DefaultRegistry()
+
+ // Call some methods to populate cache and update stats
+ registry.IsImage(shared.TestFilePNG)
+ registry.IsBinary(shared.TestFileEXE)
+ registry.Language(shared.TestFileGo)
+ // Repeat to generate cache hits
+ registry.IsImage(shared.TestFilePNG)
+ registry.IsBinary(shared.TestFileEXE)
+ registry.Language(shared.TestFileGo)
+
+ // Get stats
+ stats := registry.Stats()
+
+ // Verify stats structure - all values are uint64 and therefore non-negative by definition
+ // We can verify they exist and are properly initialized
+
+ // Test that stats include our calls
+ if stats.TotalLookups < 6 { // We made at least 6 calls above
+ t.Errorf("Expected at least 6 total lookups, got %d", stats.TotalLookups)
+ }
+
+ // Total lookups should equal hits + misses
+ if stats.TotalLookups != stats.CacheHits+stats.CacheMisses {
+ t.Errorf("Total lookups (%d) should equal hits (%d) + misses (%d)",
+ stats.TotalLookups, stats.CacheHits, stats.CacheMisses)
+ }
+ // With repeated lookups we should see some cache hits
+ if stats.CacheHits == 0 {
+ t.Error("Expected some cache hits after repeated lookups")
+ }
+}
+
+// TestFileTypeRegistryGetCacheInfo tests the GetCacheInfo method.
+func TestFileTypeRegistryGetCacheInfo(t *testing.T) {
+ // Ensure clean, isolated state
+ ResetRegistryForTesting()
+ t.Cleanup(ResetRegistryForTesting)
+ registry := DefaultRegistry()
+
+ // Call some methods to populate cache
+ registry.IsImage("test1.png")
+ registry.IsBinary("test2.exe")
+ registry.Language("test3.go")
+ registry.IsImage("test4.jpg")
+ registry.IsBinary("test5.dll")
+
+ // Get cache info
+ extCacheSize, resultCacheSize, maxCacheSize := registry.CacheInfo()
+
+ // Verify cache info
+ if extCacheSize < 0 {
+ t.Error("Expected non-negative extension cache size")
+ }
+ if resultCacheSize < 0 {
+ t.Error("Expected non-negative result cache size")
+ }
+ if maxCacheSize <= 0 {
+ t.Error("Expected positive max cache size")
+ }
+
+ // We should have some cache entries from our calls
+ totalCacheSize := extCacheSize + resultCacheSize
+ if totalCacheSize == 0 {
+ t.Error("Expected some cache entries after multiple calls")
+ }
+}
diff --git a/fileproc/formats.go b/fileproc/formats.go
index 86795af..89f95de 100644
--- a/fileproc/formats.go
+++ b/fileproc/formats.go
@@ -1,3 +1,4 @@
+// Package fileproc handles file processing, collection, and output formatting.
package fileproc
// FileData represents a single file's path and content.
@@ -23,6 +24,7 @@ type FormatWriter interface {
// detectLanguage tries to infer the code block language from the file extension.
func detectLanguage(filePath string) string {
- registry := GetDefaultRegistry()
- return registry.GetLanguage(filePath)
+ registry := DefaultRegistry()
+
+ return registry.Language(filePath)
}
diff --git a/fileproc/ignore_rules.go b/fileproc/ignore_rules.go
index 827a0f0..842bc56 100644
--- a/fileproc/ignore_rules.go
+++ b/fileproc/ignore_rules.go
@@ -1,3 +1,4 @@
+// Package fileproc handles file processing, collection, and output formatting.
package fileproc
import (
@@ -34,6 +35,7 @@ func loadIgnoreRules(currentDir string, parentRules []ignoreRule) []ignoreRule {
func tryLoadIgnoreFile(dir, fileName string) *ignoreRule {
ignorePath := filepath.Join(dir, fileName)
if info, err := os.Stat(ignorePath); err == nil && !info.IsDir() {
+ //nolint:errcheck // Regex compile error handled by validation, safe to ignore here
if gi, err := ignore.CompileIgnoreFile(ignorePath); err == nil {
return &ignoreRule{
base: dir,
@@ -41,6 +43,7 @@ func tryLoadIgnoreFile(dir, fileName string) *ignoreRule {
}
}
}
+
return nil
}
@@ -51,6 +54,7 @@ func matchesIgnoreRules(fullPath string, rules []ignoreRule) bool {
return true
}
}
+
return false
}
diff --git a/fileproc/json_writer.go b/fileproc/json_writer.go
index 59abb2b..3fa7cf3 100644
--- a/fileproc/json_writer.go
+++ b/fileproc/json_writer.go
@@ -1,3 +1,4 @@
+// Package fileproc handles file processing, collection, and output formatting.
package fileproc
import (
@@ -6,7 +7,7 @@ import (
"io"
"os"
- "github.com/ivuorinen/gibidify/gibidiutils"
+ "github.com/ivuorinen/gibidify/shared"
)
// JSONWriter handles JSON format output with streaming support.
@@ -27,42 +28,27 @@ func NewJSONWriter(outFile *os.File) *JSONWriter {
func (w *JSONWriter) Start(prefix, suffix string) error {
// Start JSON structure
if _, err := w.outFile.WriteString(`{"prefix":"`); err != nil {
- return gibidiutils.WrapError(
- err,
- gibidiutils.ErrorTypeIO,
- gibidiutils.CodeIOWrite,
- "failed to write JSON start",
- )
+ return shared.WrapError(err, shared.ErrorTypeIO, shared.CodeIOWrite, "failed to write JSON start")
}
// Write escaped prefix
- escapedPrefix := gibidiutils.EscapeForJSON(prefix)
- if err := gibidiutils.WriteWithErrorWrap(w.outFile, escapedPrefix, "failed to write JSON prefix", ""); err != nil {
- return err
+ escapedPrefix := shared.EscapeForJSON(prefix)
+ if err := shared.WriteWithErrorWrap(w.outFile, escapedPrefix, "failed to write JSON prefix", ""); err != nil {
+ return fmt.Errorf("writing JSON prefix: %w", err)
}
if _, err := w.outFile.WriteString(`","suffix":"`); err != nil {
- return gibidiutils.WrapError(
- err,
- gibidiutils.ErrorTypeIO,
- gibidiutils.CodeIOWrite,
- "failed to write JSON middle",
- )
+ return shared.WrapError(err, shared.ErrorTypeIO, shared.CodeIOWrite, "failed to write JSON middle")
}
// Write escaped suffix
- escapedSuffix := gibidiutils.EscapeForJSON(suffix)
- if err := gibidiutils.WriteWithErrorWrap(w.outFile, escapedSuffix, "failed to write JSON suffix", ""); err != nil {
- return err
+ escapedSuffix := shared.EscapeForJSON(suffix)
+ if err := shared.WriteWithErrorWrap(w.outFile, escapedSuffix, "failed to write JSON suffix", ""); err != nil {
+ return fmt.Errorf("writing JSON suffix: %w", err)
}
if _, err := w.outFile.WriteString(`","files":[`); err != nil {
- return gibidiutils.WrapError(
- err,
- gibidiutils.ErrorTypeIO,
- gibidiutils.CodeIOWrite,
- "failed to write JSON files start",
- )
+ return shared.WrapError(err, shared.ErrorTypeIO, shared.CodeIOWrite, "failed to write JSON files start")
}
return nil
@@ -72,12 +58,7 @@ func (w *JSONWriter) Start(prefix, suffix string) error {
func (w *JSONWriter) WriteFile(req WriteRequest) error {
if !w.firstFile {
if _, err := w.outFile.WriteString(","); err != nil {
- return gibidiutils.WrapError(
- err,
- gibidiutils.ErrorTypeIO,
- gibidiutils.CodeIOWrite,
- "failed to write JSON separator",
- )
+ return shared.WrapError(err, shared.ErrorTypeIO, shared.CodeIOWrite, "failed to write JSON separator")
}
}
w.firstFile = false
@@ -85,6 +66,7 @@ func (w *JSONWriter) WriteFile(req WriteRequest) error {
if req.IsStream {
return w.writeStreaming(req)
}
+
return w.writeInline(req)
}
@@ -92,22 +74,25 @@ func (w *JSONWriter) WriteFile(req WriteRequest) error {
func (w *JSONWriter) Close() error {
// Close JSON structure
if _, err := w.outFile.WriteString("]}"); err != nil {
- return gibidiutils.WrapError(err, gibidiutils.ErrorTypeIO, gibidiutils.CodeIOWrite, "failed to write JSON end")
+ return shared.WrapError(err, shared.ErrorTypeIO, shared.CodeIOWrite, "failed to write JSON end")
}
+
return nil
}
// writeStreaming writes a large file as JSON in streaming chunks.
func (w *JSONWriter) writeStreaming(req WriteRequest) error {
- defer gibidiutils.SafeCloseReader(req.Reader, req.Path)
+ defer shared.SafeCloseReader(req.Reader, req.Path)
language := detectLanguage(req.Path)
// Write file start
- escapedPath := gibidiutils.EscapeForJSON(req.Path)
+ escapedPath := shared.EscapeForJSON(req.Path)
if _, err := fmt.Fprintf(w.outFile, `{"path":"%s","language":"%s","content":"`, escapedPath, language); err != nil {
- return gibidiutils.WrapError(
- err, gibidiutils.ErrorTypeIO, gibidiutils.CodeIOWrite,
+ return shared.WrapError(
+ err,
+ shared.ErrorTypeIO,
+ shared.CodeIOWrite,
"failed to write JSON file start",
).WithFilePath(req.Path)
}
@@ -119,8 +104,10 @@ func (w *JSONWriter) writeStreaming(req WriteRequest) error {
// Write file end
if _, err := w.outFile.WriteString(`"}`); err != nil {
- return gibidiutils.WrapError(
- err, gibidiutils.ErrorTypeIO, gibidiutils.CodeIOWrite,
+ return shared.WrapError(
+ err,
+ shared.ErrorTypeIO,
+ shared.CodeIOWrite,
"failed to write JSON file end",
).WithFilePath(req.Path)
}
@@ -139,50 +126,44 @@ func (w *JSONWriter) writeInline(req WriteRequest) error {
encoded, err := json.Marshal(fileData)
if err != nil {
- return gibidiutils.WrapError(
- err, gibidiutils.ErrorTypeProcessing, gibidiutils.CodeProcessingEncode,
+ return shared.WrapError(
+ err,
+ shared.ErrorTypeProcessing,
+ shared.CodeProcessingEncode,
"failed to marshal JSON",
).WithFilePath(req.Path)
}
if _, err := w.outFile.Write(encoded); err != nil {
- return gibidiutils.WrapError(
- err, gibidiutils.ErrorTypeIO, gibidiutils.CodeIOWrite,
+ return shared.WrapError(
+ err,
+ shared.ErrorTypeIO,
+ shared.CodeIOWrite,
"failed to write JSON file",
).WithFilePath(req.Path)
}
+
return nil
}
// streamJSONContent streams content with JSON escaping.
func (w *JSONWriter) streamJSONContent(reader io.Reader, path string) error {
- return gibidiutils.StreamContent(reader, w.outFile, StreamChunkSize, path, func(chunk []byte) []byte {
- escaped := gibidiutils.EscapeForJSON(string(chunk))
- return []byte(escaped)
- })
+ if err := shared.StreamContent(
+ reader, w.outFile, shared.FileProcessingStreamChunkSize, path, func(chunk []byte) []byte {
+ escaped := shared.EscapeForJSON(string(chunk))
+
+ return []byte(escaped)
+ },
+ ); err != nil {
+ return fmt.Errorf("streaming JSON content: %w", err)
+ }
+
+ return nil
}
// startJSONWriter handles JSON format output with streaming support.
func startJSONWriter(outFile *os.File, writeCh <-chan WriteRequest, done chan<- struct{}, prefix, suffix string) {
- defer close(done)
-
- writer := NewJSONWriter(outFile)
-
- // Start writing
- if err := writer.Start(prefix, suffix); err != nil {
- gibidiutils.LogError("Failed to write JSON start", err)
- return
- }
-
- // Process files
- for req := range writeCh {
- if err := writer.WriteFile(req); err != nil {
- gibidiutils.LogError("Failed to write JSON file", err)
- }
- }
-
- // Close writer
- if err := writer.Close(); err != nil {
- gibidiutils.LogError("Failed to write JSON end", err)
- }
+ startFormatWriter(outFile, writeCh, done, prefix, suffix, func(f *os.File) FormatWriter {
+ return NewJSONWriter(f)
+ })
}
diff --git a/fileproc/markdown_writer.go b/fileproc/markdown_writer.go
index 0f5ebd2..09c86f7 100644
--- a/fileproc/markdown_writer.go
+++ b/fileproc/markdown_writer.go
@@ -1,18 +1,17 @@
+// Package fileproc handles file processing, collection, and output formatting.
package fileproc
import (
"fmt"
- "io"
"os"
- "path/filepath"
- "strings"
- "github.com/ivuorinen/gibidify/gibidiutils"
+ "github.com/ivuorinen/gibidify/shared"
)
// MarkdownWriter handles Markdown format output with streaming support.
type MarkdownWriter struct {
outFile *os.File
+ suffix string
}
// NewMarkdownWriter creates a new markdown writer.
@@ -20,18 +19,17 @@ func NewMarkdownWriter(outFile *os.File) *MarkdownWriter {
return &MarkdownWriter{outFile: outFile}
}
-// Start writes the markdown header.
-func (w *MarkdownWriter) Start(prefix, _ string) error {
+// Start writes the markdown header and stores the suffix for later use.
+func (w *MarkdownWriter) Start(prefix, suffix string) error {
+ // Store suffix for use in Close method
+ w.suffix = suffix
+
if prefix != "" {
if _, err := fmt.Fprintf(w.outFile, "# %s\n\n", prefix); err != nil {
- return gibidiutils.WrapError(
- err,
- gibidiutils.ErrorTypeIO,
- gibidiutils.CodeIOWrite,
- "failed to write prefix",
- )
+ return shared.WrapError(err, shared.ErrorTypeIO, shared.CodeIOWrite, "failed to write prefix")
}
}
+
return nil
}
@@ -40,71 +38,15 @@ func (w *MarkdownWriter) WriteFile(req WriteRequest) error {
if req.IsStream {
return w.writeStreaming(req)
}
+
return w.writeInline(req)
}
-// Close writes the markdown footer.
-func (w *MarkdownWriter) Close(suffix string) error {
- if suffix != "" {
- if _, err := fmt.Fprintf(w.outFile, "\n# %s\n", suffix); err != nil {
- return gibidiutils.WrapError(
- err,
- gibidiutils.ErrorTypeIO,
- gibidiutils.CodeIOWrite,
- "failed to write suffix",
- )
- }
- }
- return nil
-}
-
-// validateMarkdownPath validates a file path for markdown output.
-func validateMarkdownPath(path string) error {
- trimmed := strings.TrimSpace(path)
- if trimmed == "" {
- return gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeValidation,
- gibidiutils.CodeValidationRequired,
- "file path cannot be empty",
- "",
- nil,
- )
- }
-
- // Reject absolute paths
- if filepath.IsAbs(trimmed) {
- return gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeValidation,
- gibidiutils.CodeValidationPath,
- "absolute paths are not allowed",
- trimmed,
- map[string]any{"path": trimmed},
- )
- }
-
- // Clean and validate path components
- cleaned := filepath.Clean(trimmed)
- if filepath.IsAbs(cleaned) || strings.HasPrefix(cleaned, "/") {
- return gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeValidation,
- gibidiutils.CodeValidationPath,
- "path must be relative",
- trimmed,
- map[string]any{"path": trimmed, "cleaned": cleaned},
- )
- }
-
- // Check for path traversal in components
- components := strings.Split(filepath.ToSlash(cleaned), "/")
- for _, component := range components {
- if component == ".." {
- return gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeValidation,
- gibidiutils.CodeValidationPath,
- "path traversal not allowed",
- trimmed,
- map[string]any{"path": trimmed, "cleaned": cleaned},
- )
+// Close writes the markdown footer using the suffix stored in Start.
+func (w *MarkdownWriter) Close() error {
+ if w.suffix != "" {
+ if _, err := fmt.Fprintf(w.outFile, "\n# %s\n", w.suffix); err != nil {
+ return shared.WrapError(err, shared.ErrorTypeIO, shared.CodeIOWrite, "failed to write suffix")
}
}
@@ -113,44 +55,32 @@ func validateMarkdownPath(path string) error {
// writeStreaming writes a large file in streaming chunks.
func (w *MarkdownWriter) writeStreaming(req WriteRequest) error {
- // Validate path before use
- if err := validateMarkdownPath(req.Path); err != nil {
- return err
- }
-
- // Check for nil reader
- if req.Reader == nil {
- return gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeValidation,
- gibidiutils.CodeValidationRequired,
- "nil reader in write request",
- "",
- nil,
- ).WithFilePath(req.Path)
- }
-
- defer gibidiutils.SafeCloseReader(req.Reader, req.Path)
+ defer shared.SafeCloseReader(req.Reader, req.Path)
language := detectLanguage(req.Path)
// Write file header
- safePath := gibidiutils.EscapeForMarkdown(req.Path)
- if _, err := fmt.Fprintf(w.outFile, "## File: `%s`\n```%s\n", safePath, language); err != nil {
- return gibidiutils.WrapError(
- err, gibidiutils.ErrorTypeIO, gibidiutils.CodeIOWrite,
+ if _, err := fmt.Fprintf(w.outFile, "## File: `%s`\n```%s\n", req.Path, language); err != nil {
+ return shared.WrapError(
+ err,
+ shared.ErrorTypeIO,
+ shared.CodeIOWrite,
"failed to write file header",
).WithFilePath(req.Path)
}
// Stream file content in chunks
- if err := w.streamContent(req.Reader, req.Path); err != nil {
- return err
+ chunkSize := shared.FileProcessingStreamChunkSize
+ if err := shared.StreamContent(req.Reader, w.outFile, chunkSize, req.Path, nil); err != nil {
+ return shared.WrapError(err, shared.ErrorTypeIO, shared.CodeIOWrite, "streaming content for markdown file")
}
// Write file footer
if _, err := w.outFile.WriteString("\n```\n\n"); err != nil {
- return gibidiutils.WrapError(
- err, gibidiutils.ErrorTypeIO, gibidiutils.CodeIOWrite,
+ return shared.WrapError(
+ err,
+ shared.ErrorTypeIO,
+ shared.CodeIOWrite,
"failed to write file footer",
).WithFilePath(req.Path)
}
@@ -160,55 +90,24 @@ func (w *MarkdownWriter) writeStreaming(req WriteRequest) error {
// writeInline writes a small file directly from content.
func (w *MarkdownWriter) writeInline(req WriteRequest) error {
- // Validate path before use
- if err := validateMarkdownPath(req.Path); err != nil {
- return err
- }
-
language := detectLanguage(req.Path)
- safePath := gibidiutils.EscapeForMarkdown(req.Path)
- formatted := fmt.Sprintf("## File: `%s`\n```%s\n%s\n```\n\n", safePath, language, req.Content)
+ formatted := fmt.Sprintf("## File: `%s`\n```%s\n%s\n```\n\n", req.Path, language, req.Content)
if _, err := w.outFile.WriteString(formatted); err != nil {
- return gibidiutils.WrapError(
- err, gibidiutils.ErrorTypeIO, gibidiutils.CodeIOWrite,
+ return shared.WrapError(
+ err,
+ shared.ErrorTypeIO,
+ shared.CodeIOWrite,
"failed to write inline content",
).WithFilePath(req.Path)
}
+
return nil
}
-// streamContent streams file content in chunks.
-func (w *MarkdownWriter) streamContent(reader io.Reader, path string) error {
- return gibidiutils.StreamContent(reader, w.outFile, StreamChunkSize, path, nil)
-}
-
// startMarkdownWriter handles Markdown format output with streaming support.
-func startMarkdownWriter(
- outFile *os.File,
- writeCh <-chan WriteRequest,
- done chan<- struct{},
- prefix, suffix string,
-) {
- defer close(done)
-
- writer := NewMarkdownWriter(outFile)
-
- // Start writing
- if err := writer.Start(prefix, suffix); err != nil {
- gibidiutils.LogError("Failed to write markdown prefix", err)
- return
- }
-
- // Process files
- for req := range writeCh {
- if err := writer.WriteFile(req); err != nil {
- gibidiutils.LogError("Failed to write markdown file", err)
- }
- }
-
- // Close writer
- if err := writer.Close(suffix); err != nil {
- gibidiutils.LogError("Failed to write markdown suffix", err)
- }
+func startMarkdownWriter(outFile *os.File, writeCh <-chan WriteRequest, done chan<- struct{}, prefix, suffix string) {
+ startFormatWriter(outFile, writeCh, done, prefix, suffix, func(f *os.File) FormatWriter {
+ return NewMarkdownWriter(f)
+ })
}
diff --git a/fileproc/processor.go b/fileproc/processor.go
index bfa7e8d..97a89b9 100644
--- a/fileproc/processor.go
+++ b/fileproc/processor.go
@@ -9,21 +9,11 @@ import (
"os"
"path/filepath"
"strings"
+ "sync"
"time"
- "github.com/sirupsen/logrus"
-
"github.com/ivuorinen/gibidify/config"
- "github.com/ivuorinen/gibidify/gibidiutils"
-)
-
-const (
- // StreamChunkSize is the size of chunks when streaming large files (64KB).
- StreamChunkSize = 65536
- // StreamThreshold is the file size above which we use streaming (1MB).
- StreamThreshold = 1048576
- // MaxMemoryBuffer is the maximum memory to use for buffering content (10MB).
- MaxMemoryBuffer = 10485760
+ "github.com/ivuorinen/gibidify/shared"
)
// WriteRequest represents the content to be written.
@@ -32,26 +22,7 @@ type WriteRequest struct {
Content string
IsStream bool
Reader io.Reader
-}
-
-// multiReaderCloser wraps an io.Reader with a Close method that closes underlying closers.
-type multiReaderCloser struct {
- reader io.Reader
- closers []io.Closer
-}
-
-func (m *multiReaderCloser) Read(p []byte) (n int, err error) {
- return m.reader.Read(p)
-}
-
-func (m *multiReaderCloser) Close() error {
- var firstErr error
- for _, c := range m.closers {
- if err := c.Close(); err != nil && firstErr == nil {
- firstErr = err
- }
- }
- return firstErr
+ Size int64 // File size for streaming files
}
// FileProcessor handles file processing operations.
@@ -65,7 +36,7 @@ type FileProcessor struct {
func NewFileProcessor(rootPath string) *FileProcessor {
return &FileProcessor{
rootPath: rootPath,
- sizeLimit: config.GetFileSizeLimit(),
+ sizeLimit: config.FileSizeLimit(),
resourceMonitor: NewResourceMonitor(),
}
}
@@ -74,45 +45,19 @@ func NewFileProcessor(rootPath string) *FileProcessor {
func NewFileProcessorWithMonitor(rootPath string, monitor *ResourceMonitor) *FileProcessor {
return &FileProcessor{
rootPath: rootPath,
- sizeLimit: config.GetFileSizeLimit(),
+ sizeLimit: config.FileSizeLimit(),
resourceMonitor: monitor,
}
}
-// checkContextCancellation checks if context is cancelled and logs an error if so.
-// Returns true if context is cancelled, false otherwise.
-func (p *FileProcessor) checkContextCancellation(ctx context.Context, filePath, stage string) bool {
- select {
- case <-ctx.Done():
- // Format stage with leading space if provided
- stageMsg := stage
- if stage != "" {
- stageMsg = " " + stage
- }
- gibidiutils.LogErrorf(
- gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeValidation,
- gibidiutils.CodeResourceLimitTimeout,
- fmt.Sprintf("file processing cancelled%s", stageMsg),
- filePath,
- nil,
- ),
- "File processing cancelled%s: %s",
- stageMsg,
- filePath,
- )
- return true
- default:
- return false
- }
-}
-
// ProcessFile reads the file at filePath and sends a formatted output to outCh.
// It automatically chooses between loading the entire file or streaming based on file size.
func ProcessFile(filePath string, outCh chan<- WriteRequest, rootPath string) {
processor := NewFileProcessor(rootPath)
ctx := context.Background()
- processor.ProcessWithContext(ctx, filePath, outCh)
+ if err := processor.ProcessWithContext(ctx, filePath, outCh); err != nil {
+ shared.LogErrorf(err, shared.FileProcessingMsgFailedToProcess, filePath)
+ }
}
// ProcessFileWithMonitor processes a file using a shared resource monitor.
@@ -122,19 +67,25 @@ func ProcessFileWithMonitor(
outCh chan<- WriteRequest,
rootPath string,
monitor *ResourceMonitor,
-) {
+) error {
+ if monitor == nil {
+ monitor = NewResourceMonitor()
+ }
processor := NewFileProcessorWithMonitor(rootPath, monitor)
- processor.ProcessWithContext(ctx, filePath, outCh)
+
+ return processor.ProcessWithContext(ctx, filePath, outCh)
}
// Process handles file processing with the configured settings.
func (p *FileProcessor) Process(filePath string, outCh chan<- WriteRequest) {
ctx := context.Background()
- p.ProcessWithContext(ctx, filePath, outCh)
+ if err := p.ProcessWithContext(ctx, filePath, outCh); err != nil {
+ shared.LogErrorf(err, shared.FileProcessingMsgFailedToProcess, filePath)
+ }
}
// ProcessWithContext handles file processing with context and resource monitoring.
-func (p *FileProcessor) ProcessWithContext(ctx context.Context, filePath string, outCh chan<- WriteRequest) {
+func (p *FileProcessor) ProcessWithContext(ctx context.Context, filePath string, outCh chan<- WriteRequest) error {
// Create file processing context with timeout
fileCtx, fileCancel := p.resourceMonitor.CreateFileProcessingContext(ctx)
defer fileCancel()
@@ -142,50 +93,51 @@ func (p *FileProcessor) ProcessWithContext(ctx context.Context, filePath string,
// Wait for rate limiting
if err := p.resourceMonitor.WaitForRateLimit(fileCtx); err != nil {
if errors.Is(err, context.DeadlineExceeded) {
- gibidiutils.LogErrorf(
- gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeValidation,
- gibidiutils.CodeResourceLimitTimeout,
- "file processing timeout during rate limiting",
- filePath,
- nil,
- ),
- "File processing timeout during rate limiting: %s",
+ structErr := shared.NewStructuredError(
+ shared.ErrorTypeValidation,
+ shared.CodeResourceLimitTimeout,
+ "file processing timeout during rate limiting",
filePath,
+ nil,
)
+ shared.LogErrorf(structErr, "File processing timeout during rate limiting: %s", filePath)
+
+ return structErr
}
- return
+
+ return err
}
// Validate file and check resource limits
fileInfo, err := p.validateFileWithLimits(fileCtx, filePath)
if err != nil {
- return // Error already logged
+ return err // Error already logged
}
// Acquire read slot for concurrent processing
if err := p.resourceMonitor.AcquireReadSlot(fileCtx); err != nil {
if errors.Is(err, context.DeadlineExceeded) {
- gibidiutils.LogErrorf(
- gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeValidation,
- gibidiutils.CodeResourceLimitTimeout,
- "file processing timeout waiting for read slot",
- filePath,
- nil,
- ),
- "File processing timeout waiting for read slot: %s",
+ structErr := shared.NewStructuredError(
+ shared.ErrorTypeValidation,
+ shared.CodeResourceLimitTimeout,
+ "file processing timeout waiting for read slot",
filePath,
+ nil,
)
+ shared.LogErrorf(structErr, "File processing timeout waiting for read slot: %s", filePath)
+
+ return structErr
}
- return
+
+ return err
}
defer p.resourceMonitor.ReleaseReadSlot()
// Check hard memory limits before processing
if err := p.resourceMonitor.CheckHardMemoryLimit(); err != nil {
- gibidiutils.LogErrorf(err, "Hard memory limit check failed for file: %s", filePath)
- return
+ shared.LogErrorf(err, "Hard memory limit check failed for file: %s", filePath)
+
+ return err
}
// Get relative path
@@ -193,61 +145,69 @@ func (p *FileProcessor) ProcessWithContext(ctx context.Context, filePath string,
// Process file with timeout
processStart := time.Now()
- defer func() {
- // Record successful processing
- p.resourceMonitor.RecordFileProcessed(fileInfo.Size())
- logrus.Debugf("File processed in %v: %s", time.Since(processStart), filePath)
- }()
// Choose processing strategy based on file size
- if fileInfo.Size() <= StreamThreshold {
- p.processInMemoryWithContext(fileCtx, filePath, relPath, outCh)
+ if fileInfo.Size() <= shared.FileProcessingStreamThreshold {
+ err = p.processInMemoryWithContext(fileCtx, filePath, relPath, outCh)
} else {
- p.processStreamingWithContext(fileCtx, filePath, relPath, outCh)
+ err = p.processStreamingWithContext(fileCtx, filePath, relPath, outCh, fileInfo.Size())
}
+
+ // Only record success if processing completed without error
+ if err != nil {
+ return err
+ }
+
+ // Record successful processing only on success path
+ p.resourceMonitor.RecordFileProcessed(fileInfo.Size())
+ logger := shared.GetLogger()
+ logger.Debugf("File processed in %v: %s", time.Since(processStart), filePath)
+
+ return nil
}
// validateFileWithLimits checks if the file can be processed with resource limits.
func (p *FileProcessor) validateFileWithLimits(ctx context.Context, filePath string) (os.FileInfo, error) {
// Check context cancellation
- select {
- case <-ctx.Done():
- return nil, ctx.Err()
- default:
+ if err := shared.CheckContextCancellation(ctx, "file validation"); err != nil {
+ return nil, fmt.Errorf("context check during file validation: %w", err)
}
fileInfo, err := os.Stat(filePath)
if err != nil {
- structErr := gibidiutils.WrapError(
- err, gibidiutils.ErrorTypeFileSystem, gibidiutils.CodeFSAccess,
+ structErr := shared.WrapError(
+ err,
+ shared.ErrorTypeFileSystem,
+ shared.CodeFSAccess,
"failed to stat file",
).WithFilePath(filePath)
- gibidiutils.LogErrorf(structErr, "Failed to stat file %s", filePath)
+ shared.LogErrorf(structErr, "Failed to stat file %s", filePath)
+
return nil, structErr
}
// Check traditional size limit
if fileInfo.Size() > p.sizeLimit {
- filesizeContext := map[string]interface{}{
+ c := map[string]any{
"file_size": fileInfo.Size(),
"size_limit": p.sizeLimit,
}
- gibidiutils.LogErrorf(
- gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeValidation,
- gibidiutils.CodeValidationSize,
- fmt.Sprintf("file size (%d bytes) exceeds limit (%d bytes)", fileInfo.Size(), p.sizeLimit),
- filePath,
- filesizeContext,
- ),
- "Skipping large file %s", filePath,
+ structErr := shared.NewStructuredError(
+ shared.ErrorTypeValidation,
+ shared.CodeValidationSize,
+ fmt.Sprintf(shared.FileProcessingMsgSizeExceeds, fileInfo.Size(), p.sizeLimit),
+ filePath,
+ c,
)
- return nil, fmt.Errorf("file too large")
+ shared.LogErrorf(structErr, "Skipping large file %s", filePath)
+
+ return nil, structErr
}
// Check resource limits
if err := p.resourceMonitor.ValidateFileProcessing(filePath, fileInfo.Size()); err != nil {
- gibidiutils.LogErrorf(err, "Resource limit validation failed for file: %s", filePath)
+ shared.LogErrorf(err, "Resource limit validation failed for file: %s", filePath)
+
return nil, err
}
@@ -260,6 +220,7 @@ func (p *FileProcessor) getRelativePath(filePath string) string {
if err != nil {
return filePath // Fallback
}
+
return relPath
}
@@ -268,38 +229,74 @@ func (p *FileProcessor) processInMemoryWithContext(
ctx context.Context,
filePath, relPath string,
outCh chan<- WriteRequest,
-) {
+) error {
// Check context before reading
- if p.checkContextCancellation(ctx, filePath, "") {
- return
+ select {
+ case <-ctx.Done():
+ structErr := shared.NewStructuredError(
+ shared.ErrorTypeValidation,
+ shared.CodeResourceLimitTimeout,
+ "file processing canceled",
+ filePath,
+ nil,
+ )
+ shared.LogErrorf(structErr, "File processing canceled: %s", filePath)
+
+ return structErr
+ default:
}
- // #nosec G304 - filePath is validated by walker
- content, err := os.ReadFile(filePath)
+ content, err := os.ReadFile(filePath) // #nosec G304 - filePath is validated by walker
if err != nil {
- structErr := gibidiutils.WrapError(
- err, gibidiutils.ErrorTypeProcessing, gibidiutils.CodeProcessingFileRead,
+ structErr := shared.WrapError(
+ err,
+ shared.ErrorTypeProcessing,
+ shared.CodeProcessingFileRead,
"failed to read file",
).WithFilePath(filePath)
- gibidiutils.LogErrorf(structErr, "Failed to read file %s", filePath)
- return
+ shared.LogErrorf(structErr, "Failed to read file %s", filePath)
+
+ return structErr
}
// Check context again after reading
- if p.checkContextCancellation(ctx, filePath, "after read") {
- return
+ select {
+ case <-ctx.Done():
+ structErr := shared.NewStructuredError(
+ shared.ErrorTypeValidation,
+ shared.CodeResourceLimitTimeout,
+ "file processing canceled after read",
+ filePath,
+ nil,
+ )
+ shared.LogErrorf(structErr, "File processing canceled after read: %s", filePath)
+
+ return structErr
+ default:
}
- // Check context before sending output
- if p.checkContextCancellation(ctx, filePath, "before output") {
- return
- }
+ // Try to send the result, but respect context cancellation
+ select {
+ case <-ctx.Done():
+ structErr := shared.NewStructuredError(
+ shared.ErrorTypeValidation,
+ shared.CodeResourceLimitTimeout,
+ "file processing canceled before output",
+ filePath,
+ nil,
+ )
+ shared.LogErrorf(structErr, "File processing canceled before output: %s", filePath)
- outCh <- WriteRequest{
+ return structErr
+ case outCh <- WriteRequest{
Path: relPath,
Content: p.formatContent(relPath, string(content)),
IsStream: false,
+ Size: int64(len(content)),
+ }:
}
+
+ return nil
}
// processStreamingWithContext creates a streaming reader for large files with context awareness.
@@ -307,58 +304,87 @@ func (p *FileProcessor) processStreamingWithContext(
ctx context.Context,
filePath, relPath string,
outCh chan<- WriteRequest,
-) {
+ size int64,
+) error {
// Check context before creating reader
- if p.checkContextCancellation(ctx, filePath, "before streaming") {
- return
+ select {
+ case <-ctx.Done():
+ structErr := shared.NewStructuredError(
+ shared.ErrorTypeValidation,
+ shared.CodeResourceLimitTimeout,
+ "streaming processing canceled",
+ filePath,
+ nil,
+ )
+ shared.LogErrorf(structErr, "Streaming processing canceled: %s", filePath)
+
+ return structErr
+ default:
}
reader := p.createStreamReaderWithContext(ctx, filePath, relPath)
if reader == nil {
- return // Error already logged
+ // Error already logged, create and return error
+ return shared.NewStructuredError(
+ shared.ErrorTypeProcessing,
+ shared.CodeProcessingFileRead,
+ "failed to create stream reader",
+ filePath,
+ nil,
+ )
}
- // Check context before sending output
- if p.checkContextCancellation(ctx, filePath, "before streaming output") {
- // Close the reader to prevent file descriptor leak
- if closer, ok := reader.(io.Closer); ok {
- _ = closer.Close()
- }
- return
- }
+ // Try to send the result, but respect context cancellation
+ select {
+ case <-ctx.Done():
+ structErr := shared.NewStructuredError(
+ shared.ErrorTypeValidation,
+ shared.CodeResourceLimitTimeout,
+ "streaming processing canceled before output",
+ filePath,
+ nil,
+ )
+ shared.LogErrorf(structErr, "Streaming processing canceled before output: %s", filePath)
- outCh <- WriteRequest{
+ return structErr
+ case outCh <- WriteRequest{
Path: relPath,
Content: "", // Empty since content is in Reader
IsStream: true,
Reader: reader,
+ Size: size,
+ }:
}
+
+ return nil
}
// createStreamReaderWithContext creates a reader that combines header and file content with context awareness.
-func (p *FileProcessor) createStreamReaderWithContext(ctx context.Context, filePath, relPath string) io.Reader {
+func (p *FileProcessor) createStreamReaderWithContext(
+ ctx context.Context, filePath, relPath string,
+) io.Reader {
// Check context before opening file
- if p.checkContextCancellation(ctx, filePath, "before opening file") {
+ select {
+ case <-ctx.Done():
return nil
+ default:
}
- // #nosec G304 - filePath is validated by walker
- file, err := os.Open(filePath)
+ file, err := os.Open(filePath) // #nosec G304 - filePath is validated by walker
if err != nil {
- structErr := gibidiutils.WrapError(
- err, gibidiutils.ErrorTypeProcessing, gibidiutils.CodeProcessingFileRead,
+ structErr := shared.WrapError(
+ err,
+ shared.ErrorTypeProcessing,
+ shared.CodeProcessingFileRead,
"failed to open file for streaming",
).WithFilePath(filePath)
- gibidiutils.LogErrorf(structErr, "Failed to open file for streaming %s", filePath)
+ shared.LogErrorf(structErr, "Failed to open file for streaming %s", filePath)
+
return nil
}
-
header := p.formatHeader(relPath)
- // Wrap in multiReaderCloser to ensure file is closed even on cancellation
- return &multiReaderCloser{
- reader: io.MultiReader(header, file),
- closers: []io.Closer{file},
- }
+
+ return newHeaderFileReader(header, file)
}
// formatContent formats the file content with header.
@@ -370,3 +396,66 @@ func (p *FileProcessor) formatContent(relPath, content string) string {
func (p *FileProcessor) formatHeader(relPath string) io.Reader {
return strings.NewReader(fmt.Sprintf("\n---\n%s\n", relPath))
}
+
+// headerFileReader wraps a MultiReader and closes the file when EOF is reached.
+type headerFileReader struct {
+ reader io.Reader
+ file *os.File
+ mu sync.Mutex
+ closed bool
+}
+
+// newHeaderFileReader creates a new headerFileReader.
+func newHeaderFileReader(header io.Reader, file *os.File) *headerFileReader {
+ return &headerFileReader{
+ reader: io.MultiReader(header, file),
+ file: file,
+ }
+}
+
+// Read implements io.Reader and closes the file on EOF.
+func (r *headerFileReader) Read(p []byte) (n int, err error) {
+ n, err = r.reader.Read(p)
+ if err == io.EOF {
+ r.closeFile()
+ // EOF is a sentinel value that must be passed through unchanged for io.Reader interface
+ return n, err //nolint:wrapcheck // EOF must not be wrapped
+ }
+ if err != nil {
+ return n, shared.WrapError(
+ err, shared.ErrorTypeIO, shared.CodeIORead,
+ "failed to read from header file reader",
+ )
+ }
+
+ return n, nil
+}
+
+// closeFile closes the file once.
+func (r *headerFileReader) closeFile() {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ if !r.closed && r.file != nil {
+ if err := r.file.Close(); err != nil {
+ shared.LogError("Failed to close file", err)
+ }
+ r.closed = true
+ }
+}
+
+// Close implements io.Closer and ensures the underlying file is closed.
+// This allows explicit cleanup when consumers stop reading before EOF.
+func (r *headerFileReader) Close() error {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ if r.closed || r.file == nil {
+ return nil
+ }
+ err := r.file.Close()
+ if err != nil {
+ shared.LogError("Failed to close file", err)
+ }
+ r.closed = true
+
+ return err
+}
diff --git a/fileproc/processor_test.go b/fileproc/processor_test.go
index e825399..d3ed849 100644
--- a/fileproc/processor_test.go
+++ b/fileproc/processor_test.go
@@ -1,15 +1,84 @@
package fileproc_test
import (
+ "context"
+ "fmt"
+ "io"
"os"
+ "path/filepath"
"strings"
"sync"
"testing"
+ "time"
+ "github.com/spf13/viper"
+
+ "github.com/ivuorinen/gibidify/config"
"github.com/ivuorinen/gibidify/fileproc"
+ "github.com/ivuorinen/gibidify/shared"
"github.com/ivuorinen/gibidify/testutil"
)
+// writeTempConfig creates a temporary config file with the given YAML content
+// and returns the directory path containing the config file.
+func writeTempConfig(t *testing.T, content string) string {
+ t.Helper()
+ dir := t.TempDir()
+ configPath := filepath.Join(dir, "config.yaml")
+ if err := os.WriteFile(configPath, []byte(content), 0o600); err != nil {
+ t.Fatalf("Failed to create temp config: %v", err)
+ }
+ return dir
+}
+
+// collectWriteRequests runs a processing function and collects all WriteRequests.
+// This helper wraps the common pattern of channel + goroutine + WaitGroup.
+func collectWriteRequests(t *testing.T, process func(ch chan fileproc.WriteRequest)) []fileproc.WriteRequest {
+ t.Helper()
+
+ ch := make(chan fileproc.WriteRequest, 10)
+
+ var wg sync.WaitGroup
+ wg.Go(func() {
+ defer close(ch)
+ process(ch)
+ })
+
+ results := make([]fileproc.WriteRequest, 0)
+ for req := range ch {
+ results = append(results, req)
+ }
+ wg.Wait()
+
+ return results
+}
+
+// collectWriteRequestsWithContext runs a processing function with context and collects all WriteRequests.
+func collectWriteRequestsWithContext(
+ ctx context.Context,
+ t *testing.T,
+ process func(ctx context.Context, ch chan fileproc.WriteRequest) error,
+) ([]fileproc.WriteRequest, error) {
+ t.Helper()
+
+ ch := make(chan fileproc.WriteRequest, 10)
+ var processErr error
+
+ var wg sync.WaitGroup
+ wg.Go(func() {
+ defer close(ch)
+ processErr = process(ctx, ch)
+ })
+
+ results := make([]fileproc.WriteRequest, 0)
+ for req := range ch {
+ results = append(results, req)
+ }
+ wg.Wait()
+
+ return results, processErr
+}
+
func TestProcessFile(t *testing.T) {
// Reset and load default config to ensure proper file size limits
testutil.ResetViperConfig(t, "")
@@ -32,23 +101,20 @@ func TestProcessFile(t *testing.T) {
errTmpFile := tmpFile.Close()
if errTmpFile != nil {
t.Fatal(errTmpFile)
- return
}
ch := make(chan fileproc.WriteRequest, 1)
var wg sync.WaitGroup
- wg.Add(1)
- go func() {
- defer wg.Done()
+ wg.Go(func() {
+ defer close(ch)
fileproc.ProcessFile(tmpFile.Name(), ch, "")
- }()
- wg.Wait()
- close(ch)
+ })
var result string
for req := range ch {
result = req.Content
}
+ wg.Wait()
if !strings.Contains(result, tmpFile.Name()) {
t.Errorf("Output does not contain file path: %s", tmpFile.Name())
@@ -57,3 +123,686 @@ func TestProcessFile(t *testing.T) {
t.Errorf("Output does not contain file content: %s", content)
}
}
+
+// TestNewFileProcessorWithMonitor tests processor creation with resource monitor.
+func TestNewFileProcessorWithMonitor(t *testing.T) {
+ testutil.ResetViperConfig(t, "")
+
+ // Create a resource monitor
+ monitor := fileproc.NewResourceMonitor()
+ defer monitor.Close()
+
+ processor := fileproc.NewFileProcessorWithMonitor("test_source", monitor)
+ if processor == nil {
+ t.Error("Expected processor but got nil")
+ }
+
+ // Exercise the processor to verify monitor integration
+ tmpFile, err := os.CreateTemp(t.TempDir(), "monitor_test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.Remove(tmpFile.Name())
+
+ if _, err := tmpFile.WriteString("test content"); err != nil {
+ t.Fatal(err)
+ }
+ if err := tmpFile.Close(); err != nil {
+ t.Fatal(err)
+ }
+
+ ctx := context.Background()
+ writeCh := make(chan fileproc.WriteRequest, 1)
+
+ var wg sync.WaitGroup
+ wg.Go(func() {
+ defer close(writeCh)
+ if err := processor.ProcessWithContext(ctx, tmpFile.Name(), writeCh); err != nil {
+ t.Errorf("ProcessWithContext failed: %v", err)
+ }
+ })
+
+ // Drain channel first to avoid deadlock if producer sends multiple requests
+ requestCount := 0
+ for range writeCh {
+ requestCount++
+ }
+
+ // Wait for goroutine to finish after channel is drained
+ wg.Wait()
+
+ if requestCount == 0 {
+ t.Error("Expected at least one write request from processor")
+ }
+}
+
+// TestProcessFileWithMonitor tests file processing with resource monitoring.
+func TestProcessFileWithMonitor(t *testing.T) {
+ testutil.ResetViperConfig(t, "")
+
+ // Create temporary file
+ tmpFile, err := os.CreateTemp(t.TempDir(), "testfile_monitor_*")
+ if err != nil {
+ t.Fatalf(shared.TestMsgFailedToCreateFile, err)
+ }
+ defer func() {
+ if err := os.Remove(tmpFile.Name()); err != nil {
+ t.Logf("Failed to remove temp file: %v", err)
+ }
+ }()
+
+ content := "Test content with monitor"
+ if _, err := tmpFile.WriteString(content); err != nil {
+ t.Fatalf(shared.TestMsgFailedToWriteContent, err)
+ }
+ if err := tmpFile.Close(); err != nil {
+ t.Fatalf(shared.TestMsgFailedToCloseFile, err)
+ }
+
+ // Create resource monitor
+ monitor := fileproc.NewResourceMonitor()
+ defer monitor.Close()
+
+ ch := make(chan fileproc.WriteRequest, 1)
+ ctx := context.Background()
+
+ // Test ProcessFileWithMonitor
+ var wg sync.WaitGroup
+ var result string
+
+ // Start reader goroutine first to prevent deadlock
+ wg.Go(func() {
+ for req := range ch {
+ result = req.Content
+ }
+ })
+
+ // Process the file
+ err = fileproc.ProcessFileWithMonitor(ctx, tmpFile.Name(), ch, "", monitor)
+ close(ch)
+
+ if err != nil {
+ t.Fatalf("ProcessFileWithMonitor failed: %v", err)
+ }
+
+ // Wait for reader to finish
+ wg.Wait()
+
+ if !strings.Contains(result, content) {
+ t.Error("Expected content not found in processed result")
+ }
+}
+
+const testContent = "package main\nfunc main() {}\n"
+
+// TestProcess tests the basic Process function.
+func TestProcess(t *testing.T) {
+ testutil.ResetViperConfig(t, "")
+
+ // Create temporary directory
+ tmpDir := t.TempDir()
+
+ // Create test file with .go extension
+ testFile := filepath.Join(tmpDir, "test.go")
+ content := testContent
+ if err := os.WriteFile(testFile, []byte(content), 0o600); err != nil {
+ t.Fatalf(shared.TestMsgFailedToCreateTestFile, err)
+ }
+
+ processor := fileproc.NewFileProcessor(tmpDir)
+ ch := make(chan fileproc.WriteRequest, 10)
+
+ var wg sync.WaitGroup
+ wg.Go(func() {
+ defer close(ch)
+ // Process the specific file, not the directory
+ processor.Process(testFile, ch)
+ })
+
+ // Collect results
+ results := make([]fileproc.WriteRequest, 0, 1) // Pre-allocate with expected capacity
+ for req := range ch {
+ results = append(results, req)
+ }
+ wg.Wait()
+
+ if len(results) == 0 {
+ t.Error("Expected at least one processed file")
+
+ return
+ }
+
+ // Find our test file in results
+ found := false
+ for _, req := range results {
+ if strings.Contains(req.Path, shared.TestFileGo) && strings.Contains(req.Content, content) {
+ found = true
+
+ break
+ }
+ }
+
+ if !found {
+ t.Error("Test file not found in processed results")
+ }
+}
+
+// createLargeTestFile creates a large test file for streaming tests.
+func createLargeTestFile(t *testing.T) *os.File {
+ t.Helper()
+
+ tmpFile, err := os.CreateTemp(t.TempDir(), "large_file_*.go")
+ if err != nil {
+ t.Fatalf(shared.TestMsgFailedToCreateFile, err)
+ }
+
+ lineContent := "// Repeated comment line to exceed streaming threshold\n"
+ repeatCount := (1048576 / len(lineContent)) + 1000
+ largeContent := strings.Repeat(lineContent, repeatCount)
+
+ if _, err := tmpFile.WriteString(largeContent); err != nil {
+ t.Fatalf(shared.TestMsgFailedToWriteContent, err)
+ }
+ if err := tmpFile.Close(); err != nil {
+ t.Fatalf(shared.TestMsgFailedToCloseFile, err)
+ }
+
+ t.Logf("Created test file size: %d bytes", len(largeContent))
+
+ return tmpFile
+}
+
+// processFileForStreaming processes a file and returns streaming/inline requests.
+func processFileForStreaming(t *testing.T, filePath string) (streamingReq, inlineReq *fileproc.WriteRequest) {
+ t.Helper()
+
+ ch := make(chan fileproc.WriteRequest, 1)
+
+ var wg sync.WaitGroup
+ wg.Go(func() {
+ defer close(ch)
+ fileproc.ProcessFile(filePath, ch, "")
+ })
+
+ var streamingRequest *fileproc.WriteRequest
+ var inlineRequest *fileproc.WriteRequest
+
+ for req := range ch {
+ if req.IsStream {
+ reqCopy := req
+ streamingRequest = &reqCopy
+ } else {
+ reqCopy := req
+ inlineRequest = &reqCopy
+ }
+ }
+ wg.Wait()
+
+ return streamingRequest, inlineRequest
+}
+
+// validateStreamingRequest validates a streaming request.
+func validateStreamingRequest(t *testing.T, streamingRequest *fileproc.WriteRequest, tmpFile *os.File) {
+ t.Helper()
+
+ if streamingRequest.Reader == nil {
+ t.Error("Expected reader in streaming request")
+ }
+ if streamingRequest.Content != "" {
+ t.Error("Expected empty content for streaming request")
+ }
+
+ buffer := make([]byte, 1024)
+ n, err := streamingRequest.Reader.Read(buffer)
+ if err != nil && err != io.EOF {
+ t.Errorf("Failed to read from streaming request: %v", err)
+ }
+
+ content := string(buffer[:n])
+ if !strings.Contains(content, tmpFile.Name()) {
+ t.Error("Expected file path in streamed header content")
+ }
+
+ t.Log("Successfully triggered streaming for large file and tested reader")
+}
+
+// TestProcessorStreamingIntegration tests streaming functionality in processor.
+func TestProcessorStreamingIntegration(t *testing.T) {
+ configDir := writeTempConfig(t, `
+max_file_size_mb: 0.001
+streaming_threshold_mb: 0.0001
+`)
+ testutil.ResetViperConfig(t, configDir)
+
+ tmpFile := createLargeTestFile(t)
+ defer func() {
+ if err := os.Remove(tmpFile.Name()); err != nil {
+ t.Logf("Failed to remove temp file: %v", err)
+ }
+ }()
+
+ streamingRequest, inlineRequest := processFileForStreaming(t, tmpFile.Name())
+
+ if streamingRequest == nil && inlineRequest == nil {
+ t.Error("Expected either streaming or inline request but got none")
+ }
+
+ if streamingRequest != nil {
+ validateStreamingRequest(t, streamingRequest, tmpFile)
+ } else {
+ t.Log("File processed inline instead of streaming")
+ }
+}
+
+// TestProcessorContextCancellation tests context cancellation during processing.
+func TestProcessorContextCancellation(t *testing.T) {
+ testutil.ResetViperConfig(t, "")
+
+ // Create temporary directory with files
+ tmpDir := t.TempDir()
+
+ // Create multiple test files
+ for i := 0; i < 5; i++ {
+ testFile := filepath.Join(tmpDir, fmt.Sprintf("test%d.go", i))
+ content := testContent
+ if err := os.WriteFile(testFile, []byte(content), 0o600); err != nil {
+ t.Fatalf(shared.TestMsgFailedToCreateTestFile, err)
+ }
+ }
+
+ processor := fileproc.NewFileProcessor("test_source")
+ ch := make(chan fileproc.WriteRequest, 10)
+
+ // Use ProcessWithContext with immediate cancellation
+ ctx, cancel := context.WithCancel(context.Background())
+ cancel() // Cancel immediately
+
+ var wg sync.WaitGroup
+ wg.Go(func() {
+ defer close(ch)
+ // Error is expected due to cancellation
+ if err := processor.ProcessWithContext(ctx, tmpDir, ch); err != nil {
+ // Log error for debugging, but don't fail test since cancellation is expected
+ t.Logf("Expected error due to cancellation: %v", err)
+ }
+ })
+
+ // Collect results - should be minimal due to cancellation
+ results := make([]fileproc.WriteRequest, 0, 1) // Pre-allocate with expected capacity
+ for req := range ch {
+ results = append(results, req)
+ }
+ wg.Wait()
+
+ // With immediate cancellation, we might get 0 results
+ // This tests that cancellation is respected
+ t.Logf("Processed %d files with immediate cancellation", len(results))
+}
+
+// TestProcessorValidationEdgeCases tests edge cases in file validation.
+func TestProcessorValidationEdgeCases(t *testing.T) {
+ configDir := writeTempConfig(t, `
+max_file_size_mb: 0.001 # 1KB limit for testing
+`)
+ testutil.ResetViperConfig(t, configDir)
+
+ tmpDir := t.TempDir()
+
+ // Test case 1: Non-existent file
+ nonExistentFile := filepath.Join(tmpDir, "does-not-exist.go")
+ processor := fileproc.NewFileProcessor(tmpDir)
+ ch := make(chan fileproc.WriteRequest, 1)
+
+ var wg sync.WaitGroup
+ wg.Go(func() {
+ defer close(ch)
+ processor.Process(nonExistentFile, ch)
+ })
+
+ results := make([]fileproc.WriteRequest, 0)
+ for req := range ch {
+ results = append(results, req)
+ }
+ wg.Wait()
+
+ // Should get no results due to file not existing
+ if len(results) > 0 {
+ t.Error("Expected no results for non-existent file")
+ }
+
+ // Test case 2: File that exceeds size limit
+ largeFile := filepath.Join(tmpDir, "large.go")
+ largeContent := strings.Repeat("// Large file content\n", 100) // > 1KB
+ if err := os.WriteFile(largeFile, []byte(largeContent), 0o600); err != nil {
+ t.Fatalf("Failed to create large file: %v", err)
+ }
+
+ ch2 := make(chan fileproc.WriteRequest, 1)
+ wg.Go(func() {
+ defer close(ch2)
+ processor.Process(largeFile, ch2)
+ })
+
+ results2 := make([]fileproc.WriteRequest, 0)
+ for req := range ch2 {
+ results2 = append(results2, req)
+ }
+ wg.Wait()
+
+ // Should get results because even large files are processed (just different strategy)
+ t.Logf("Large file processing results: %d", len(results2))
+}
+
+// TestProcessorContextCancellationDuringValidation tests context cancellation during file validation.
+func TestProcessorContextCancellationDuringValidation(t *testing.T) {
+ testutil.ResetViperConfig(t, "")
+
+ tmpDir := t.TempDir()
+ testFile := filepath.Join(tmpDir, "test.go")
+ content := testContent
+ if err := os.WriteFile(testFile, []byte(content), 0o600); err != nil {
+ t.Fatalf(shared.TestMsgFailedToCreateTestFile, err)
+ }
+
+ processor := fileproc.NewFileProcessor(tmpDir)
+
+ // Create context that we'll cancel during processing
+ ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
+ defer cancel()
+
+ // Let context expire
+ time.Sleep(1 * time.Millisecond)
+
+ ch := make(chan fileproc.WriteRequest, 1)
+ var wg sync.WaitGroup
+ wg.Go(func() {
+ defer close(ch)
+ if err := processor.ProcessWithContext(ctx, testFile, ch); err != nil {
+ t.Logf("ProcessWithContext error (may be expected): %v", err)
+ }
+ })
+
+ results := make([]fileproc.WriteRequest, 0)
+ for req := range ch {
+ results = append(results, req)
+ }
+ wg.Wait()
+
+ // Should get no results due to context cancellation
+ t.Logf("Results with canceled context: %d", len(results))
+}
+
+// TestProcessorInMemoryProcessingEdgeCases tests edge cases in in-memory processing.
+func TestProcessorInMemoryProcessingEdgeCases(t *testing.T) {
+ testutil.ResetViperConfig(t, "")
+
+ tmpDir := t.TempDir()
+
+ // Test with empty file
+ emptyFile := filepath.Join(tmpDir, "empty.go")
+ if err := os.WriteFile(emptyFile, []byte(""), 0o600); err != nil {
+ t.Fatalf("Failed to create empty file: %v", err)
+ }
+
+ processor := fileproc.NewFileProcessor(tmpDir)
+ ch := make(chan fileproc.WriteRequest, 1)
+
+ var wg sync.WaitGroup
+ wg.Go(func() {
+ defer close(ch)
+ processor.Process(emptyFile, ch)
+ })
+
+ results := make([]fileproc.WriteRequest, 0)
+ for req := range ch {
+ results = append(results, req)
+ }
+ wg.Wait()
+
+ if len(results) != 1 {
+ t.Errorf("Expected 1 result for empty file, got %d", len(results))
+ }
+
+ if len(results) > 0 {
+ result := results[0]
+ if result.Path == "" {
+ t.Error("Expected path in result for empty file")
+ }
+ // Empty file should still be processed
+ }
+}
+
+// TestProcessorStreamingEdgeCases tests edge cases in streaming processing.
+func TestProcessorStreamingEdgeCases(t *testing.T) {
+ testutil.ResetViperConfig(t, "")
+
+ tmpDir := t.TempDir()
+
+ // Create a file larger than streaming threshold but test error conditions
+ largeFile := filepath.Join(tmpDir, "large_stream.go")
+ largeContent := strings.Repeat("// Large streaming file content line\n", 50000) // > 1MB
+ if err := os.WriteFile(largeFile, []byte(largeContent), 0o600); err != nil {
+ t.Fatalf("Failed to create large file: %v", err)
+ }
+
+ processor := fileproc.NewFileProcessor(tmpDir)
+
+ // Test with context that gets canceled during streaming
+ ctx, cancel := context.WithCancel(context.Background())
+ ch := make(chan fileproc.WriteRequest, 1)
+
+ var wg sync.WaitGroup
+ wg.Go(func() {
+ defer close(ch)
+
+ // Start processing
+ // Error is expected due to cancellation
+ if err := processor.ProcessWithContext(ctx, largeFile, ch); err != nil {
+ // Log error for debugging, but don't fail test since cancellation is expected
+ t.Logf("Expected error due to cancellation: %v", err)
+ }
+ })
+
+ // Cancel context after a very short time
+ go func() {
+ time.Sleep(1 * time.Millisecond)
+ cancel()
+ }()
+
+ results := make([]fileproc.WriteRequest, 0)
+ for req := range ch {
+ results = append(results, req)
+
+ // If we get a streaming request, try to read from it with canceled context
+ if req.IsStream && req.Reader != nil {
+ buffer := make([]byte, 1024)
+ _, err := req.Reader.Read(buffer)
+ if err != nil && err != io.EOF {
+ t.Logf("Expected error reading from canceled stream: %v", err)
+ }
+ }
+ }
+ wg.Wait()
+
+ t.Logf("Results with streaming context cancellation: %d", len(results))
+}
+
+// Benchmarks for processor hot paths
+
+// BenchmarkProcessFileInline benchmarks inline file processing for small files.
+func BenchmarkProcessFileInline(b *testing.B) {
+ // Initialize config for file processing
+ viper.Reset()
+ config.LoadConfig()
+
+ // Create a small test file
+ tmpFile, err := os.CreateTemp(b.TempDir(), "bench_inline_*.go")
+ if err != nil {
+ b.Fatalf(shared.TestMsgFailedToCreateFile, err)
+ }
+
+ content := strings.Repeat("// Inline benchmark content\n", 100) // ~2.6KB
+ if _, err := tmpFile.WriteString(content); err != nil {
+ b.Fatalf(shared.TestMsgFailedToWriteContent, err)
+ }
+ if err := tmpFile.Close(); err != nil {
+ b.Fatalf(shared.TestMsgFailedToCloseFile, err)
+ }
+
+ b.ResetTimer()
+ for b.Loop() {
+ ch := make(chan fileproc.WriteRequest, 1)
+ var wg sync.WaitGroup
+ wg.Go(func() {
+ defer close(ch)
+ fileproc.ProcessFile(tmpFile.Name(), ch, "")
+ })
+ for req := range ch {
+ _ = req // Drain channel
+ }
+ wg.Wait()
+ }
+}
+
+// BenchmarkProcessFileStreaming benchmarks streaming file processing for large files.
+func BenchmarkProcessFileStreaming(b *testing.B) {
+ // Initialize config for file processing
+ viper.Reset()
+ config.LoadConfig()
+
+ // Create a large test file that triggers streaming
+ tmpFile, err := os.CreateTemp(b.TempDir(), "bench_streaming_*.go")
+ if err != nil {
+ b.Fatalf(shared.TestMsgFailedToCreateFile, err)
+ }
+
+ // Create content larger than streaming threshold (1MB)
+ lineContent := "// Streaming benchmark content line that will be repeated\n"
+ repeatCount := (1048576 / len(lineContent)) + 1000
+ content := strings.Repeat(lineContent, repeatCount)
+
+ if _, err := tmpFile.WriteString(content); err != nil {
+ b.Fatalf(shared.TestMsgFailedToWriteContent, err)
+ }
+ if err := tmpFile.Close(); err != nil {
+ b.Fatalf(shared.TestMsgFailedToCloseFile, err)
+ }
+
+ b.ResetTimer()
+ for b.Loop() {
+ ch := make(chan fileproc.WriteRequest, 1)
+ var wg sync.WaitGroup
+ wg.Go(func() {
+ defer close(ch)
+ fileproc.ProcessFile(tmpFile.Name(), ch, "")
+ })
+ for req := range ch {
+ // If streaming, read some content to exercise the reader
+ if req.IsStream && req.Reader != nil {
+ buffer := make([]byte, 4096)
+ for {
+ _, err := req.Reader.Read(buffer)
+ if err != nil {
+ break
+ }
+ }
+ }
+ }
+ wg.Wait()
+ }
+}
+
+// BenchmarkProcessorWithContext benchmarks ProcessWithContext for a single file.
+func BenchmarkProcessorWithContext(b *testing.B) {
+ tmpDir := b.TempDir()
+ testFile := filepath.Join(tmpDir, "bench_context.go")
+ content := strings.Repeat("// Benchmark file content\n", 50)
+ if err := os.WriteFile(testFile, []byte(content), 0o600); err != nil {
+ b.Fatalf(shared.TestMsgFailedToCreateTestFile, err)
+ }
+
+ processor := fileproc.NewFileProcessor(tmpDir)
+ ctx := context.Background()
+
+ b.ResetTimer()
+ for b.Loop() {
+ ch := make(chan fileproc.WriteRequest, 1)
+ var wg sync.WaitGroup
+ wg.Go(func() {
+ defer close(ch)
+ _ = processor.ProcessWithContext(ctx, testFile, ch)
+ })
+ for req := range ch {
+ _ = req // Drain channel
+ }
+ wg.Wait()
+ }
+}
+
+// BenchmarkProcessorWithMonitor benchmarks processing with resource monitoring.
+func BenchmarkProcessorWithMonitor(b *testing.B) {
+ tmpDir := b.TempDir()
+ testFile := filepath.Join(tmpDir, "bench_monitor.go")
+ content := strings.Repeat("// Benchmark file content with monitor\n", 50)
+ if err := os.WriteFile(testFile, []byte(content), 0o600); err != nil {
+ b.Fatalf(shared.TestMsgFailedToCreateTestFile, err)
+ }
+
+ monitor := fileproc.NewResourceMonitor()
+ defer monitor.Close()
+
+ processor := fileproc.NewFileProcessorWithMonitor(tmpDir, monitor)
+ ctx := context.Background()
+
+ b.ResetTimer()
+ for b.Loop() {
+ ch := make(chan fileproc.WriteRequest, 1)
+ var wg sync.WaitGroup
+ wg.Go(func() {
+ defer close(ch)
+ _ = processor.ProcessWithContext(ctx, testFile, ch)
+ })
+ for req := range ch {
+ _ = req // Drain channel
+ }
+ wg.Wait()
+ }
+}
+
+// BenchmarkProcessorConcurrent benchmarks concurrent file processing.
+func BenchmarkProcessorConcurrent(b *testing.B) {
+ tmpDir := b.TempDir()
+
+ // Create multiple test files
+ testFiles := make([]string, 10)
+ for i := 0; i < 10; i++ {
+ testFiles[i] = filepath.Join(tmpDir, fmt.Sprintf("bench_concurrent_%d.go", i))
+ content := strings.Repeat(fmt.Sprintf("// Concurrent file %d content\n", i), 50)
+ if err := os.WriteFile(testFiles[i], []byte(content), 0o600); err != nil {
+ b.Fatalf(shared.TestMsgFailedToCreateTestFile, err)
+ }
+ }
+
+ processor := fileproc.NewFileProcessor(tmpDir)
+ ctx := context.Background()
+ fileCount := len(testFiles)
+
+ b.ResetTimer()
+ b.RunParallel(func(pb *testing.PB) {
+ i := 0
+ for pb.Next() {
+ testFile := testFiles[i%fileCount]
+ ch := make(chan fileproc.WriteRequest, 1)
+ var wg sync.WaitGroup
+ wg.Go(func() {
+ defer close(ch)
+ _ = processor.ProcessWithContext(ctx, testFile, ch)
+ })
+ for req := range ch {
+ _ = req // Drain channel
+ }
+ wg.Wait()
+ i++
+ }
+ })
+}
diff --git a/fileproc/registry.go b/fileproc/registry.go
index 1ea6f74..bb32c60 100644
--- a/fileproc/registry.go
+++ b/fileproc/registry.go
@@ -5,6 +5,8 @@ import (
"path/filepath"
"strings"
"sync"
+
+ "github.com/ivuorinen/gibidify/shared"
)
const minExtensionLength = 2
@@ -52,9 +54,9 @@ func initRegistry() *FileTypeRegistry {
imageExts: getImageExtensions(),
binaryExts: getBinaryExtensions(),
languageMap: getLanguageMap(),
- extCache: make(map[string]string, 1000), // Cache for extension normalization
- resultCache: make(map[string]FileTypeResult, 500), // Cache for type results
- maxCacheSize: 500,
+ extCache: make(map[string]string, shared.FileTypeRegistryMaxCacheSize),
+ resultCache: make(map[string]FileTypeResult, shared.FileTypeRegistryMaxCacheSize),
+ maxCacheSize: shared.FileTypeRegistryMaxCacheSize,
}
}
@@ -63,25 +65,28 @@ func getRegistry() *FileTypeRegistry {
registryOnce.Do(func() {
registry = initRegistry()
})
+
return registry
}
-// GetDefaultRegistry returns the default file type registry.
-func GetDefaultRegistry() *FileTypeRegistry {
+// DefaultRegistry returns the default file type registry.
+func DefaultRegistry() *FileTypeRegistry {
return getRegistry()
}
-// GetStats returns a copy of the current registry statistics.
-func (r *FileTypeRegistry) GetStats() RegistryStats {
+// Stats returns a copy of the current registry statistics.
+func (r *FileTypeRegistry) Stats() RegistryStats {
r.cacheMutex.RLock()
defer r.cacheMutex.RUnlock()
+
return r.stats
}
-// GetCacheInfo returns current cache size information.
-func (r *FileTypeRegistry) GetCacheInfo() (extCacheSize, resultCacheSize, maxCacheSize int) {
+// CacheInfo returns current cache size information.
+func (r *FileTypeRegistry) CacheInfo() (extCacheSize, resultCacheSize, maxCacheSize int) {
r.cacheMutex.RLock()
defer r.cacheMutex.RUnlock()
+
return len(r.extCache), len(r.resultCache), r.maxCacheSize
}
@@ -101,7 +106,9 @@ func normalizeExtension(filename string) string {
func isSpecialFile(filename string, extensions map[string]bool) bool {
if filepath.Ext(filename) == "" {
basename := strings.ToLower(filepath.Base(filename))
+
return extensions[basename]
}
+
return false
}
diff --git a/fileproc/resource_monitor_concurrency.go b/fileproc/resource_monitor_concurrency.go
index a66cc4d..822416c 100644
--- a/fileproc/resource_monitor_concurrency.go
+++ b/fileproc/resource_monitor_concurrency.go
@@ -1,7 +1,9 @@
+// Package fileproc handles file processing, collection, and output formatting.
package fileproc
import (
"context"
+ "fmt"
"sync/atomic"
"time"
)
@@ -26,7 +28,7 @@ func (rm *ResourceMonitor) AcquireReadSlot(ctx context.Context) error {
// Wait and retry
select {
case <-ctx.Done():
- return ctx.Err()
+ return fmt.Errorf("context canceled while waiting for read slot: %w", ctx.Err())
case <-time.After(time.Millisecond):
// Continue loop
}
@@ -45,17 +47,22 @@ func (rm *ResourceMonitor) ReleaseReadSlot() {
// CreateFileProcessingContext creates a context with file processing timeout.
func (rm *ResourceMonitor) CreateFileProcessingContext(parent context.Context) (context.Context, context.CancelFunc) {
if !rm.enabled || rm.fileProcessingTimeout <= 0 {
+ // No-op cancel function - monitoring disabled or no timeout configured
return parent, func() {}
}
+
return context.WithTimeout(parent, rm.fileProcessingTimeout)
}
// CreateOverallProcessingContext creates a context with overall processing timeout.
-func (rm *ResourceMonitor) CreateOverallProcessingContext(
- parent context.Context,
-) (context.Context, context.CancelFunc) {
+func (rm *ResourceMonitor) CreateOverallProcessingContext(parent context.Context) (
+ context.Context,
+ context.CancelFunc,
+) {
if !rm.enabled || rm.overallTimeout <= 0 {
+ // No-op cancel function - monitoring disabled or no timeout configured
return parent, func() {}
}
+
return context.WithTimeout(parent, rm.overallTimeout)
}
diff --git a/fileproc/resource_monitor_concurrency_test.go b/fileproc/resource_monitor_concurrency_test.go
index 7a886b9..2f8ae33 100644
--- a/fileproc/resource_monitor_concurrency_test.go
+++ b/fileproc/resource_monitor_concurrency_test.go
@@ -10,7 +10,7 @@ import (
"github.com/ivuorinen/gibidify/testutil"
)
-func TestResourceMonitor_ConcurrentReadsLimit(t *testing.T) {
+func TestResourceMonitorConcurrentReadsLimit(t *testing.T) {
testutil.ResetViperConfig(t, "")
// Set a low concurrent reads limit for testing
@@ -58,7 +58,7 @@ func TestResourceMonitor_ConcurrentReadsLimit(t *testing.T) {
rm.ReleaseReadSlot()
}
-func TestResourceMonitor_TimeoutContexts(t *testing.T) {
+func TestResourceMonitorTimeoutContexts(t *testing.T) {
testutil.ResetViperConfig(t, "")
// Set short timeouts for testing
diff --git a/fileproc/resource_monitor_integration_test.go b/fileproc/resource_monitor_integration_test.go
index eba2bd3..d42155c 100644
--- a/fileproc/resource_monitor_integration_test.go
+++ b/fileproc/resource_monitor_integration_test.go
@@ -11,7 +11,7 @@ import (
"github.com/ivuorinen/gibidify/testutil"
)
-func TestResourceMonitor_Integration(t *testing.T) {
+func TestResourceMonitorIntegration(t *testing.T) {
// Create temporary test directory
tempDir := t.TempDir()
@@ -47,6 +47,7 @@ func TestResourceMonitor_Integration(t *testing.T) {
err = rm.ValidateFileProcessing(filePath, fileInfo.Size())
if err != nil {
t.Errorf("Failed to validate file %s: %v", filePath, err)
+
continue
}
@@ -54,6 +55,7 @@ func TestResourceMonitor_Integration(t *testing.T) {
err = rm.AcquireReadSlot(ctx)
if err != nil {
t.Errorf("Failed to acquire read slot for %s: %v", filePath, err)
+
continue
}
@@ -71,7 +73,7 @@ func TestResourceMonitor_Integration(t *testing.T) {
}
// Verify final metrics
- metrics := rm.GetMetrics()
+ metrics := rm.Metrics()
if metrics.FilesProcessed != int64(len(testFiles)) {
t.Errorf("Expected %d files processed, got %d", len(testFiles), metrics.FilesProcessed)
}
diff --git a/fileproc/resource_monitor_metrics.go b/fileproc/resource_monitor_metrics.go
index 8bfba34..6ad935f 100644
--- a/fileproc/resource_monitor_metrics.go
+++ b/fileproc/resource_monitor_metrics.go
@@ -1,3 +1,4 @@
+// Package fileproc handles file processing, collection, and output formatting.
package fileproc
import (
@@ -5,9 +6,7 @@ import (
"sync/atomic"
"time"
- "github.com/sirupsen/logrus"
-
- "github.com/ivuorinen/gibidify/gibidiutils"
+ "github.com/ivuorinen/gibidify/shared"
)
// RecordFileProcessed records that a file has been successfully processed.
@@ -18,8 +17,8 @@ func (rm *ResourceMonitor) RecordFileProcessed(fileSize int64) {
}
}
-// GetMetrics returns current resource usage metrics.
-func (rm *ResourceMonitor) GetMetrics() ResourceMetrics {
+// Metrics returns current resource usage metrics.
+func (rm *ResourceMonitor) Metrics() ResourceMetrics {
if !rm.enableResourceMon {
return ResourceMetrics{}
}
@@ -54,10 +53,11 @@ func (rm *ResourceMonitor) GetMetrics() ResourceMetrics {
FilesProcessed: filesProcessed,
TotalSizeProcessed: totalSize,
ConcurrentReads: atomic.LoadInt64(&rm.concurrentReads),
+ MaxConcurrentReads: int64(rm.maxConcurrentReads),
ProcessingDuration: duration,
AverageFileSize: avgFileSize,
ProcessingRate: processingRate,
- MemoryUsageMB: gibidiutils.SafeUint64ToInt64WithDefault(m.Alloc, 0) / 1024 / 1024,
+ MemoryUsageMB: shared.BytesToMB(m.Alloc),
MaxMemoryUsageMB: int64(rm.hardMemoryLimitMB),
ViolationsDetected: violations,
DegradationActive: rm.degradationActive,
@@ -68,19 +68,16 @@ func (rm *ResourceMonitor) GetMetrics() ResourceMetrics {
// LogResourceInfo logs current resource limit configuration.
func (rm *ResourceMonitor) LogResourceInfo() {
+ logger := shared.GetLogger()
if rm.enabled {
- logrus.Infof(
- "Resource limits enabled: maxFiles=%d, maxTotalSize=%dMB, fileTimeout=%ds, overallTimeout=%ds",
- rm.maxFiles,
- rm.maxTotalSize/1024/1024,
- int(rm.fileProcessingTimeout.Seconds()),
- int(rm.overallTimeout.Seconds()),
- )
- logrus.Infof("Resource limits: maxConcurrentReads=%d, rateLimitFPS=%d, hardMemoryMB=%d",
+ logger.Infof("Resource limits enabled: maxFiles=%d, maxTotalSize=%dMB, fileTimeout=%ds, overallTimeout=%ds",
+ rm.maxFiles, rm.maxTotalSize/int64(shared.BytesPerMB), int(rm.fileProcessingTimeout.Seconds()),
+ int(rm.overallTimeout.Seconds()))
+ logger.Infof("Resource limits: maxConcurrentReads=%d, rateLimitFPS=%d, hardMemoryMB=%d",
rm.maxConcurrentReads, rm.rateLimitFilesPerSec, rm.hardMemoryLimitMB)
- logrus.Infof("Resource features: gracefulDegradation=%v, monitoring=%v",
+ logger.Infof("Resource features: gracefulDegradation=%v, monitoring=%v",
rm.enableGracefulDegr, rm.enableResourceMon)
} else {
- logrus.Info("Resource limits disabled")
+ logger.Info("Resource limits disabled")
}
}
diff --git a/fileproc/resource_monitor_metrics_test.go b/fileproc/resource_monitor_metrics_test.go
index 1b28786..83c16bb 100644
--- a/fileproc/resource_monitor_metrics_test.go
+++ b/fileproc/resource_monitor_metrics_test.go
@@ -9,7 +9,7 @@ import (
"github.com/ivuorinen/gibidify/testutil"
)
-func TestResourceMonitor_Metrics(t *testing.T) {
+func TestResourceMonitorMetrics(t *testing.T) {
testutil.ResetViperConfig(t, "")
viper.Set("resourceLimits.enabled", true)
@@ -23,7 +23,7 @@ func TestResourceMonitor_Metrics(t *testing.T) {
rm.RecordFileProcessed(2000)
rm.RecordFileProcessed(500)
- metrics := rm.GetMetrics()
+ metrics := rm.Metrics()
// Verify metrics
if metrics.FilesProcessed != 3 {
diff --git a/fileproc/resource_monitor_rate_limiting.go b/fileproc/resource_monitor_rate_limiting.go
index c475777..29a25da 100644
--- a/fileproc/resource_monitor_rate_limiting.go
+++ b/fileproc/resource_monitor_rate_limiting.go
@@ -1,10 +1,12 @@
+// Package fileproc handles file processing, collection, and output formatting.
package fileproc
import (
"context"
+ "fmt"
"time"
- "github.com/sirupsen/logrus"
+ "github.com/ivuorinen/gibidify/shared"
)
// WaitForRateLimit waits for rate limiting if enabled.
@@ -15,22 +17,29 @@ func (rm *ResourceMonitor) WaitForRateLimit(ctx context.Context) error {
select {
case <-ctx.Done():
- return ctx.Err()
+ return fmt.Errorf("context canceled while waiting for rate limit: %w", ctx.Err())
case <-rm.rateLimitChan:
return nil
case <-time.After(time.Second): // Fallback timeout
- logrus.Warn("Rate limiting timeout exceeded, continuing without rate limit")
+ logger := shared.GetLogger()
+ logger.Warn("Rate limiting timeout exceeded, continuing without rate limit")
+
return nil
}
}
// rateLimiterRefill refills the rate limiting channel periodically.
func (rm *ResourceMonitor) rateLimiterRefill() {
- for range rm.rateLimiter.C {
+ for {
select {
- case rm.rateLimitChan <- struct{}{}:
- default:
- // Channel is full, skip
+ case <-rm.done:
+ return
+ case <-rm.rateLimiter.C:
+ select {
+ case rm.rateLimitChan <- struct{}{}:
+ default:
+ // Channel is full, skip
+ }
}
}
}
diff --git a/fileproc/resource_monitor_rate_limiting_test.go b/fileproc/resource_monitor_rate_limiting_test.go
index 4c8e15d..3c686ed 100644
--- a/fileproc/resource_monitor_rate_limiting_test.go
+++ b/fileproc/resource_monitor_rate_limiting_test.go
@@ -10,7 +10,7 @@ import (
"github.com/ivuorinen/gibidify/testutil"
)
-func TestResourceMonitor_RateLimiting(t *testing.T) {
+func TestResourceMonitorRateLimiting(t *testing.T) {
testutil.ResetViperConfig(t, "")
// Enable rate limiting with a low rate for testing
diff --git a/fileproc/resource_monitor_state.go b/fileproc/resource_monitor_state.go
index 1fe544e..803f6ac 100644
--- a/fileproc/resource_monitor_state.go
+++ b/fileproc/resource_monitor_state.go
@@ -1,9 +1,11 @@
+// Package fileproc handles file processing, collection, and output formatting.
package fileproc
// IsEmergencyStopActive returns whether emergency stop is active.
func (rm *ResourceMonitor) IsEmergencyStopActive() bool {
rm.mu.RLock()
defer rm.mu.RUnlock()
+
return rm.emergencyStopRequested
}
@@ -11,11 +13,27 @@ func (rm *ResourceMonitor) IsEmergencyStopActive() bool {
func (rm *ResourceMonitor) IsDegradationActive() bool {
rm.mu.RLock()
defer rm.mu.RUnlock()
+
return rm.degradationActive
}
// Close cleans up the resource monitor.
func (rm *ResourceMonitor) Close() {
+ rm.mu.Lock()
+ defer rm.mu.Unlock()
+
+ // Prevent multiple closes
+ if rm.closed {
+ return
+ }
+ rm.closed = true
+
+ // Signal goroutines to stop
+ if rm.done != nil {
+ close(rm.done)
+ }
+
+ // Stop the ticker
if rm.rateLimiter != nil {
rm.rateLimiter.Stop()
}
diff --git a/fileproc/resource_monitor_types.go b/fileproc/resource_monitor_types.go
index 5b12758..b942f79 100644
--- a/fileproc/resource_monitor_types.go
+++ b/fileproc/resource_monitor_types.go
@@ -1,3 +1,4 @@
+// Package fileproc handles file processing, collection, and output formatting.
package fileproc
import (
@@ -5,6 +6,7 @@ import (
"time"
"github.com/ivuorinen/gibidify/config"
+ "github.com/ivuorinen/gibidify/shared"
)
// ResourceMonitor monitors resource usage and enforces limits to prevent DoS attacks.
@@ -31,12 +33,14 @@ type ResourceMonitor struct {
// Rate limiting
rateLimiter *time.Ticker
rateLimitChan chan struct{}
+ done chan struct{} // Signal to stop goroutines
// Synchronization
mu sync.RWMutex
violationLogged map[string]bool
degradationActive bool
emergencyStopRequested bool
+ closed bool
}
// ResourceMetrics holds comprehensive resource usage metrics.
@@ -44,6 +48,7 @@ type ResourceMetrics struct {
FilesProcessed int64 `json:"files_processed"`
TotalSizeProcessed int64 `json:"total_size_processed"`
ConcurrentReads int64 `json:"concurrent_reads"`
+ MaxConcurrentReads int64 `json:"max_concurrent_reads"`
ProcessingDuration time.Duration `json:"processing_duration"`
AverageFileSize float64 `json:"average_file_size"`
ProcessingRate float64 `json:"processing_rate_files_per_sec"`
@@ -57,31 +62,32 @@ type ResourceMetrics struct {
// ResourceViolation represents a detected resource limit violation.
type ResourceViolation struct {
- Type string `json:"type"`
- Message string `json:"message"`
- Current interface{} `json:"current"`
- Limit interface{} `json:"limit"`
- Timestamp time.Time `json:"timestamp"`
- Context map[string]interface{} `json:"context"`
+ Type string `json:"type"`
+ Message string `json:"message"`
+ Current any `json:"current"`
+ Limit any `json:"limit"`
+ Timestamp time.Time `json:"timestamp"`
+ Context map[string]any `json:"context"`
}
// NewResourceMonitor creates a new resource monitor with configuration.
func NewResourceMonitor() *ResourceMonitor {
rm := &ResourceMonitor{
- enabled: config.GetResourceLimitsEnabled(),
- maxFiles: config.GetMaxFiles(),
- maxTotalSize: config.GetMaxTotalSize(),
- fileProcessingTimeout: time.Duration(config.GetFileProcessingTimeoutSec()) * time.Second,
- overallTimeout: time.Duration(config.GetOverallTimeoutSec()) * time.Second,
- maxConcurrentReads: config.GetMaxConcurrentReads(),
- rateLimitFilesPerSec: config.GetRateLimitFilesPerSec(),
- hardMemoryLimitMB: config.GetHardMemoryLimitMB(),
- enableGracefulDegr: config.GetEnableGracefulDegradation(),
- enableResourceMon: config.GetEnableResourceMonitoring(),
+ enabled: config.ResourceLimitsEnabled(),
+ maxFiles: config.MaxFiles(),
+ maxTotalSize: config.MaxTotalSize(),
+ fileProcessingTimeout: time.Duration(config.FileProcessingTimeoutSec()) * time.Second,
+ overallTimeout: time.Duration(config.OverallTimeoutSec()) * time.Second,
+ maxConcurrentReads: config.MaxConcurrentReads(),
+ rateLimitFilesPerSec: config.RateLimitFilesPerSec(),
+ hardMemoryLimitMB: config.HardMemoryLimitMB(),
+ enableGracefulDegr: config.EnableGracefulDegradation(),
+ enableResourceMon: config.EnableResourceMonitoring(),
startTime: time.Now(),
lastRateLimitCheck: time.Now(),
violationLogged: make(map[string]bool),
- hardMemoryLimitBytes: int64(config.GetHardMemoryLimitMB()) * 1024 * 1024,
+ hardMemoryLimitBytes: int64(config.HardMemoryLimitMB()) * int64(shared.BytesPerMB),
+ done: make(chan struct{}),
}
// Initialize rate limiter if rate limiting is enabled
diff --git a/fileproc/resource_monitor_types_test.go b/fileproc/resource_monitor_types_test.go
index 7d91c44..4fca370 100644
--- a/fileproc/resource_monitor_types_test.go
+++ b/fileproc/resource_monitor_types_test.go
@@ -7,11 +7,11 @@ import (
"github.com/spf13/viper"
- "github.com/ivuorinen/gibidify/config"
+ "github.com/ivuorinen/gibidify/shared"
"github.com/ivuorinen/gibidify/testutil"
)
-func TestResourceMonitor_NewResourceMonitor(t *testing.T) {
+func TestResourceMonitorNewResourceMonitor(t *testing.T) {
// Reset viper for clean test state
testutil.ResetViperConfig(t, "")
@@ -25,24 +25,24 @@ func TestResourceMonitor_NewResourceMonitor(t *testing.T) {
t.Error("Expected resource monitor to be enabled by default")
}
- if rm.maxFiles != config.DefaultMaxFiles {
- t.Errorf("Expected maxFiles to be %d, got %d", config.DefaultMaxFiles, rm.maxFiles)
+ if rm.maxFiles != shared.ConfigMaxFilesDefault {
+ t.Errorf("Expected maxFiles to be %d, got %d", shared.ConfigMaxFilesDefault, rm.maxFiles)
}
- if rm.maxTotalSize != config.DefaultMaxTotalSize {
- t.Errorf("Expected maxTotalSize to be %d, got %d", config.DefaultMaxTotalSize, rm.maxTotalSize)
+ if rm.maxTotalSize != shared.ConfigMaxTotalSizeDefault {
+ t.Errorf("Expected maxTotalSize to be %d, got %d", shared.ConfigMaxTotalSizeDefault, rm.maxTotalSize)
}
- if rm.fileProcessingTimeout != time.Duration(config.DefaultFileProcessingTimeoutSec)*time.Second {
+ if rm.fileProcessingTimeout != time.Duration(shared.ConfigFileProcessingTimeoutSecDefault)*time.Second {
t.Errorf("Expected fileProcessingTimeout to be %v, got %v",
- time.Duration(config.DefaultFileProcessingTimeoutSec)*time.Second, rm.fileProcessingTimeout)
+ time.Duration(shared.ConfigFileProcessingTimeoutSecDefault)*time.Second, rm.fileProcessingTimeout)
}
// Clean up
rm.Close()
}
-func TestResourceMonitor_DisabledResourceLimits(t *testing.T) {
+func TestResourceMonitorDisabledResourceLimits(t *testing.T) {
// Reset viper for clean test state
testutil.ResetViperConfig(t, "")
@@ -72,3 +72,77 @@ func TestResourceMonitor_DisabledResourceLimits(t *testing.T) {
t.Errorf("Expected no error when rate limiting disabled, got %v", err)
}
}
+
+// TestResourceMonitorStateQueries tests state query functions.
+func TestResourceMonitorStateQueries(t *testing.T) {
+ testutil.ResetViperConfig(t, "")
+
+ rm := NewResourceMonitor()
+ defer rm.Close()
+
+ // Test IsEmergencyStopActive - should be false initially
+ if rm.IsEmergencyStopActive() {
+ t.Error("Expected emergency stop to be inactive initially")
+ }
+
+ // Test IsDegradationActive - should be false initially
+ if rm.IsDegradationActive() {
+ t.Error("Expected degradation mode to be inactive initially")
+ }
+}
+
+// TestResourceMonitorIsEmergencyStopActive tests the IsEmergencyStopActive method.
+func TestResourceMonitorIsEmergencyStopActive(t *testing.T) {
+ testutil.ResetViperConfig(t, "")
+
+ rm := NewResourceMonitor()
+ defer rm.Close()
+
+ // Test initial state
+ active := rm.IsEmergencyStopActive()
+ if active {
+ t.Error("Expected emergency stop to be inactive initially")
+ }
+
+ // The method should return a consistent value on multiple calls
+ for i := 0; i < 5; i++ {
+ if rm.IsEmergencyStopActive() != active {
+ t.Error("IsEmergencyStopActive should return consistent values")
+ }
+ }
+}
+
+// TestResourceMonitorIsDegradationActive tests the IsDegradationActive method.
+func TestResourceMonitorIsDegradationActive(t *testing.T) {
+ testutil.ResetViperConfig(t, "")
+
+ rm := NewResourceMonitor()
+ defer rm.Close()
+
+ // Test initial state
+ active := rm.IsDegradationActive()
+ if active {
+ t.Error("Expected degradation mode to be inactive initially")
+ }
+
+ // The method should return a consistent value on multiple calls
+ for i := 0; i < 5; i++ {
+ if rm.IsDegradationActive() != active {
+ t.Error("IsDegradationActive should return consistent values")
+ }
+ }
+}
+
+// TestResourceMonitorClose tests the Close method.
+func TestResourceMonitorClose(t *testing.T) {
+ testutil.ResetViperConfig(t, "")
+
+ rm := NewResourceMonitor()
+
+ // Close should not panic
+ rm.Close()
+
+ // Multiple closes should be safe
+ rm.Close()
+ rm.Close()
+}
diff --git a/fileproc/resource_monitor_validation.go b/fileproc/resource_monitor_validation.go
index 7170862..9c93393 100644
--- a/fileproc/resource_monitor_validation.go
+++ b/fileproc/resource_monitor_validation.go
@@ -1,3 +1,4 @@
+// Package fileproc handles file processing, collection, and output formatting.
package fileproc
import (
@@ -5,9 +6,7 @@ import (
"sync/atomic"
"time"
- "github.com/sirupsen/logrus"
-
- "github.com/ivuorinen/gibidify/gibidiutils"
+ "github.com/ivuorinen/gibidify/shared"
)
// ValidateFileProcessing checks if a file can be processed based on resource limits.
@@ -21,12 +20,12 @@ func (rm *ResourceMonitor) ValidateFileProcessing(filePath string, fileSize int6
// Check if emergency stop is active
if rm.emergencyStopRequested {
- return gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeValidation,
- gibidiutils.CodeResourceLimitMemory,
+ return shared.NewStructuredError(
+ shared.ErrorTypeValidation,
+ shared.CodeResourceLimitMemory,
"processing stopped due to emergency memory condition",
filePath,
- map[string]interface{}{
+ map[string]any{
"emergency_stop_active": true,
},
)
@@ -35,12 +34,12 @@ func (rm *ResourceMonitor) ValidateFileProcessing(filePath string, fileSize int6
// Check file count limit
currentFiles := atomic.LoadInt64(&rm.filesProcessed)
if int(currentFiles) >= rm.maxFiles {
- return gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeValidation,
- gibidiutils.CodeResourceLimitFiles,
+ return shared.NewStructuredError(
+ shared.ErrorTypeValidation,
+ shared.CodeResourceLimitFiles,
"maximum file count limit exceeded",
filePath,
- map[string]interface{}{
+ map[string]any{
"current_files": currentFiles,
"max_files": rm.maxFiles,
},
@@ -50,12 +49,12 @@ func (rm *ResourceMonitor) ValidateFileProcessing(filePath string, fileSize int6
// Check total size limit
currentTotalSize := atomic.LoadInt64(&rm.totalSizeProcessed)
if currentTotalSize+fileSize > rm.maxTotalSize {
- return gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeValidation,
- gibidiutils.CodeResourceLimitTotalSize,
+ return shared.NewStructuredError(
+ shared.ErrorTypeValidation,
+ shared.CodeResourceLimitTotalSize,
"maximum total size limit would be exceeded",
filePath,
- map[string]interface{}{
+ map[string]any{
"current_total_size": currentTotalSize,
"file_size": fileSize,
"max_total_size": rm.maxTotalSize,
@@ -65,12 +64,12 @@ func (rm *ResourceMonitor) ValidateFileProcessing(filePath string, fileSize int6
// Check overall timeout
if time.Since(rm.startTime) > rm.overallTimeout {
- return gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeValidation,
- gibidiutils.CodeResourceLimitTimeout,
+ return shared.NewStructuredError(
+ shared.ErrorTypeValidation,
+ shared.CodeResourceLimitTimeout,
"overall processing timeout exceeded",
filePath,
- map[string]interface{}{
+ map[string]any{
"processing_duration": time.Since(rm.startTime),
"overall_timeout": rm.overallTimeout,
},
@@ -88,60 +87,93 @@ func (rm *ResourceMonitor) CheckHardMemoryLimit() error {
var m runtime.MemStats
runtime.ReadMemStats(&m)
- currentMemory := gibidiutils.SafeUint64ToInt64WithDefault(m.Alloc, 0)
+ currentMemory := shared.SafeUint64ToInt64WithDefault(m.Alloc, 0)
- if currentMemory > rm.hardMemoryLimitBytes {
- rm.mu.Lock()
- defer rm.mu.Unlock()
-
- // Log violation if not already logged
- violationKey := "hard_memory_limit"
- if !rm.violationLogged[violationKey] {
- logrus.Errorf("Hard memory limit exceeded: %dMB > %dMB",
- currentMemory/1024/1024, rm.hardMemoryLimitMB)
- rm.violationLogged[violationKey] = true
- }
-
- if rm.enableGracefulDegr {
- // Force garbage collection
- runtime.GC()
-
- // Check again after GC
- runtime.ReadMemStats(&m)
- currentMemory = gibidiutils.SafeUint64ToInt64WithDefault(m.Alloc, 0)
-
- if currentMemory > rm.hardMemoryLimitBytes {
- // Still over limit, activate emergency stop
- rm.emergencyStopRequested = true
- return gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeValidation,
- gibidiutils.CodeResourceLimitMemory,
- "hard memory limit exceeded, emergency stop activated",
- "",
- map[string]interface{}{
- "current_memory_mb": currentMemory / 1024 / 1024,
- "limit_mb": rm.hardMemoryLimitMB,
- "emergency_stop": true,
- },
- )
- }
- // Memory freed by GC, continue with degradation
- rm.degradationActive = true
- logrus.Info("Memory freed by garbage collection, continuing with degradation mode")
- } else {
- // No graceful degradation, hard stop
- return gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeValidation,
- gibidiutils.CodeResourceLimitMemory,
- "hard memory limit exceeded",
- "",
- map[string]interface{}{
- "current_memory_mb": currentMemory / 1024 / 1024,
- "limit_mb": rm.hardMemoryLimitMB,
- },
- )
- }
+ if currentMemory <= rm.hardMemoryLimitBytes {
+ return nil
}
+ return rm.handleMemoryLimitExceeded(currentMemory)
+}
+
+// handleMemoryLimitExceeded handles the case when hard memory limit is exceeded.
+func (rm *ResourceMonitor) handleMemoryLimitExceeded(currentMemory int64) error {
+ rm.mu.Lock()
+ defer rm.mu.Unlock()
+
+ rm.logMemoryViolation(currentMemory)
+
+ if !rm.enableGracefulDegr {
+ return rm.createHardMemoryLimitError(currentMemory, false)
+ }
+
+ return rm.tryGracefulRecovery(currentMemory)
+}
+
+// logMemoryViolation logs memory limit violation if not already logged.
+func (rm *ResourceMonitor) logMemoryViolation(currentMemory int64) {
+ violationKey := "hard_memory_limit"
+
+ // Ensure map is initialized
+ if rm.violationLogged == nil {
+ rm.violationLogged = make(map[string]bool)
+ }
+
+ if rm.violationLogged[violationKey] {
+ return
+ }
+
+ logger := shared.GetLogger()
+ logger.Errorf("Hard memory limit exceeded: %dMB > %dMB",
+ currentMemory/int64(shared.BytesPerMB), rm.hardMemoryLimitMB)
+ rm.violationLogged[violationKey] = true
+}
+
+// tryGracefulRecovery attempts graceful recovery by forcing GC.
+func (rm *ResourceMonitor) tryGracefulRecovery(_ int64) error {
+ // Force garbage collection
+ runtime.GC()
+
+ // Check again after GC
+ var m runtime.MemStats
+ runtime.ReadMemStats(&m)
+ newMemory := shared.SafeUint64ToInt64WithDefault(m.Alloc, 0)
+
+ if newMemory > rm.hardMemoryLimitBytes {
+ // Still over limit, activate emergency stop
+ rm.emergencyStopRequested = true
+
+ return rm.createHardMemoryLimitError(newMemory, true)
+ }
+
+ // Memory freed by GC, continue with degradation
+ rm.degradationActive = true
+ logger := shared.GetLogger()
+ logger.Info("Memory freed by garbage collection, continuing with degradation mode")
+
return nil
}
+
+// createHardMemoryLimitError creates a structured error for memory limit exceeded.
+func (rm *ResourceMonitor) createHardMemoryLimitError(currentMemory int64, emergencyStop bool) error {
+ message := "hard memory limit exceeded"
+ if emergencyStop {
+ message = "hard memory limit exceeded, emergency stop activated"
+ }
+
+ context := map[string]any{
+ "current_memory_mb": currentMemory / int64(shared.BytesPerMB),
+ "limit_mb": rm.hardMemoryLimitMB,
+ }
+ if emergencyStop {
+ context["emergency_stop"] = true
+ }
+
+ return shared.NewStructuredError(
+ shared.ErrorTypeValidation,
+ shared.CodeResourceLimitMemory,
+ message,
+ "",
+ context,
+ )
+}
diff --git a/fileproc/resource_monitor_validation_test.go b/fileproc/resource_monitor_validation_test.go
index 26c675c..2f06be4 100644
--- a/fileproc/resource_monitor_validation_test.go
+++ b/fileproc/resource_monitor_validation_test.go
@@ -2,19 +2,46 @@ package fileproc
import (
"errors"
+ "strings"
"testing"
"github.com/spf13/viper"
- "github.com/ivuorinen/gibidify/gibidiutils"
+ "github.com/ivuorinen/gibidify/shared"
"github.com/ivuorinen/gibidify/testutil"
)
-func TestResourceMonitor_FileCountLimit(t *testing.T) {
+// assertStructuredError verifies that an error is a StructuredError with the expected code.
+func assertStructuredError(t *testing.T, err error, expectedCode string) {
+ t.Helper()
+ structErr := &shared.StructuredError{}
+ ok := errors.As(err, &structErr)
+ if !ok {
+ t.Errorf("Expected StructuredError, got %T", err)
+ } else if structErr.Code != expectedCode {
+ t.Errorf("Expected error code %s, got %s", expectedCode, structErr.Code)
+ }
+}
+
+// validateMemoryLimitError validates that an error is a proper memory limit StructuredError.
+func validateMemoryLimitError(t *testing.T, err error) {
+ t.Helper()
+
+ structErr := &shared.StructuredError{}
+ if errors.As(err, &structErr) {
+ if structErr.Code != shared.CodeResourceLimitMemory {
+ t.Errorf("Expected memory limit error code, got %s", structErr.Code)
+ }
+ } else {
+ t.Errorf("Expected StructuredError, got %T", err)
+ }
+}
+
+func TestResourceMonitorFileCountLimit(t *testing.T) {
testutil.ResetViperConfig(t, "")
// Set a very low file count limit for testing
- viper.Set("resourceLimits.enabled", true)
+ viper.Set(shared.TestCfgResourceLimitsEnabled, true)
viper.Set("resourceLimits.maxFiles", 2)
rm := NewResourceMonitor()
@@ -41,20 +68,14 @@ func TestResourceMonitor_FileCountLimit(t *testing.T) {
}
// Verify it's the correct error type
- var structErr *gibidiutils.StructuredError
- ok := errors.As(err, &structErr)
- if !ok {
- t.Errorf("Expected StructuredError, got %T", err)
- } else if structErr.Code != gibidiutils.CodeResourceLimitFiles {
- t.Errorf("Expected error code %s, got %s", gibidiutils.CodeResourceLimitFiles, structErr.Code)
- }
+ assertStructuredError(t, err, shared.CodeResourceLimitFiles)
}
-func TestResourceMonitor_TotalSizeLimit(t *testing.T) {
+func TestResourceMonitorTotalSizeLimit(t *testing.T) {
testutil.ResetViperConfig(t, "")
// Set a low total size limit for testing (1KB)
- viper.Set("resourceLimits.enabled", true)
+ viper.Set(shared.TestCfgResourceLimitsEnabled, true)
viper.Set("resourceLimits.maxTotalSize", 1024)
rm := NewResourceMonitor()
@@ -81,11 +102,103 @@ func TestResourceMonitor_TotalSizeLimit(t *testing.T) {
}
// Verify it's the correct error type
- var structErr *gibidiutils.StructuredError
- ok := errors.As(err, &structErr)
- if !ok {
- t.Errorf("Expected StructuredError, got %T", err)
- } else if structErr.Code != gibidiutils.CodeResourceLimitTotalSize {
- t.Errorf("Expected error code %s, got %s", gibidiutils.CodeResourceLimitTotalSize, structErr.Code)
+ assertStructuredError(t, err, shared.CodeResourceLimitTotalSize)
+}
+
+// TestResourceMonitor_MemoryLimitExceeded tests memory limit violation scenarios.
+func TestResourceMonitorMemoryLimitExceeded(t *testing.T) {
+ testutil.ResetViperConfig(t, "")
+
+ // Set very low memory limit to try to force violations
+ viper.Set(shared.TestCfgResourceLimitsEnabled, true)
+ viper.Set("resourceLimits.hardMemoryLimitMB", 0.001) // 1KB - extremely low
+
+ rm := NewResourceMonitor()
+ defer rm.Close()
+
+ // Allocate large buffer to increase memory usage before check
+ largeBuffer := make([]byte, 10*1024*1024) // 10MB allocation
+ _ = largeBuffer[0] // Use the buffer to prevent optimization
+
+ // Check hard memory limit - might trigger if actual memory is high enough
+ err := rm.CheckHardMemoryLimit()
+
+ // Note: This test might not always fail since it depends on actual runtime memory
+ // But if it does fail, verify it's the correct error type
+ if err != nil {
+ validateMemoryLimitError(t, err)
+ t.Log("Successfully triggered memory limit violation")
+ } else {
+ t.Log("Memory limit check passed - actual memory usage may be within limits")
+ }
+}
+
+// TestResourceMonitor_MemoryLimitHandling tests the memory violation detection.
+func TestResourceMonitorMemoryLimitHandling(t *testing.T) {
+ testutil.ResetViperConfig(t, "")
+
+ // Enable resource limits with very small hard limit
+ viper.Set(shared.TestCfgResourceLimitsEnabled, true)
+ viper.Set("resourceLimits.hardMemoryLimitMB", 0.0001) // Very tiny limit
+ viper.Set("resourceLimits.enableGracefulDegradation", true)
+
+ rm := NewResourceMonitor()
+ defer rm.Close()
+
+ // Allocate more memory to increase chances of triggering limit
+ buffers := make([][]byte, 0, 100) // Pre-allocate capacity
+ for i := 0; i < 100; i++ {
+ buffer := make([]byte, 1024*1024) // 1MB each
+ buffers = append(buffers, buffer)
+ _ = buffer[0] // Use buffer
+ _ = buffers // Use the slice to prevent unused variable warning
+
+ // Check periodically
+ if i%10 == 0 {
+ err := rm.CheckHardMemoryLimit()
+ if err != nil {
+ // Successfully triggered memory limit
+ if !strings.Contains(err.Error(), "memory limit") {
+ t.Errorf("Expected error message to mention memory limit, got: %v", err)
+ }
+ t.Log("Successfully triggered memory limit handling")
+
+ return
+ }
+ }
+ }
+
+ t.Log("Could not trigger memory limit - actual memory usage may be lower than limit")
+}
+
+// TestResourceMonitorGracefulRecovery tests graceful recovery attempts.
+func TestResourceMonitorGracefulRecovery(t *testing.T) {
+ testutil.ResetViperConfig(t, "")
+
+ // Set memory limits that will trigger recovery
+ viper.Set(shared.TestCfgResourceLimitsEnabled, true)
+
+ rm := NewResourceMonitor()
+ defer rm.Close()
+
+ // Force a deterministic 1-byte hard memory limit to trigger recovery
+ rm.hardMemoryLimitBytes = 1
+
+ // Process multiple files to accumulate memory usage
+ for i := 0; i < 3; i++ {
+ filePath := "/tmp/test" + string(rune('1'+i)) + ".txt"
+ fileSize := int64(400) // Each file is 400 bytes
+
+ // First few might pass, but eventually should trigger recovery mechanisms
+ err := rm.ValidateFileProcessing(filePath, fileSize)
+ if err != nil {
+ // Once we hit the limit, test that the error is appropriate
+ if !strings.Contains(err.Error(), "resource") && !strings.Contains(err.Error(), "limit") {
+ t.Errorf("Expected resource limit error, got: %v", err)
+ }
+
+ break
+ }
+ rm.RecordFileProcessed(fileSize)
}
}
diff --git a/fileproc/walker.go b/fileproc/walker.go
index 95ea0b8..b0f92d6 100644
--- a/fileproc/walker.go
+++ b/fileproc/walker.go
@@ -5,7 +5,7 @@ import (
"os"
"path/filepath"
- "github.com/ivuorinen/gibidify/gibidiutils"
+ "github.com/ivuorinen/gibidify/shared"
)
// Walker defines an interface for scanning directories.
@@ -30,13 +30,16 @@ func NewProdWalker() *ProdWalker {
// Walk scans the given root directory recursively and returns a slice of file paths
// that are not ignored based on .gitignore/.ignore files, the configuration, or the default binary/image filter.
func (w *ProdWalker) Walk(root string) ([]string, error) {
- absRoot, err := gibidiutils.GetAbsolutePath(root)
+ absRoot, err := shared.AbsolutePath(root)
if err != nil {
- return nil, gibidiutils.WrapError(
- err, gibidiutils.ErrorTypeFileSystem, gibidiutils.CodeFSPathResolution,
+ return nil, shared.WrapError(
+ err,
+ shared.ErrorTypeFileSystem,
+ shared.CodeFSPathResolution,
"failed to resolve root path",
).WithFilePath(root)
}
+
return w.walkDir(absRoot, []ignoreRule{})
}
@@ -50,8 +53,10 @@ func (w *ProdWalker) walkDir(currentDir string, parentRules []ignoreRule) ([]str
entries, err := os.ReadDir(currentDir)
if err != nil {
- return nil, gibidiutils.WrapError(
- err, gibidiutils.ErrorTypeFileSystem, gibidiutils.CodeFSAccess,
+ return nil, shared.WrapError(
+ err,
+ shared.ErrorTypeFileSystem,
+ shared.CodeFSAccess,
"failed to read directory",
).WithFilePath(currentDir)
}
@@ -69,8 +74,10 @@ func (w *ProdWalker) walkDir(currentDir string, parentRules []ignoreRule) ([]str
if entry.IsDir() {
subFiles, err := w.walkDir(fullPath, rules)
if err != nil {
- return nil, gibidiutils.WrapError(
- err, gibidiutils.ErrorTypeProcessing, gibidiutils.CodeProcessingTraversal,
+ return nil, shared.WrapError(
+ err,
+ shared.ErrorTypeProcessing,
+ shared.CodeProcessingTraversal,
"failed to traverse subdirectory",
).WithFilePath(fullPath)
}
diff --git a/fileproc/walker_test.go b/fileproc/walker_test.go
index bf8ef81..dfee038 100644
--- a/fileproc/walker_test.go
+++ b/fileproc/walker_test.go
@@ -61,8 +61,6 @@ func TestProdWalkerBinaryCheck(t *testing.T) {
// Reset FileTypeRegistry to ensure clean state
fileproc.ResetRegistryForTesting()
- // Ensure cleanup runs even if test fails
- t.Cleanup(fileproc.ResetRegistryForTesting)
// Run walker
w := fileproc.NewProdWalker()
diff --git a/fileproc/writer.go b/fileproc/writer.go
index 66192b2..3d0937b 100644
--- a/fileproc/writer.go
+++ b/fileproc/writer.go
@@ -2,103 +2,66 @@
package fileproc
import (
- "fmt"
"os"
- "github.com/ivuorinen/gibidify/gibidiutils"
+ "github.com/ivuorinen/gibidify/shared"
)
-// WriterConfig holds configuration for the writer.
-type WriterConfig struct {
- Format string
- Prefix string
- Suffix string
-}
+// startFormatWriter handles generic writer orchestration for any format.
+// This eliminates code duplication across format-specific writer functions.
+// Uses the FormatWriter interface defined in formats.go.
+func startFormatWriter(
+ outFile *os.File,
+ writeCh <-chan WriteRequest,
+ done chan<- struct{},
+ prefix, suffix string,
+ writerFactory func(*os.File) FormatWriter,
+) {
+ defer close(done)
-// Validate checks if the WriterConfig is valid.
-func (c WriterConfig) Validate() error {
- if c.Format == "" {
- return gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeValidation,
- gibidiutils.CodeValidationFormat,
- "format cannot be empty",
- "",
- nil,
- )
+ writer := writerFactory(outFile)
+
+ // Start writing
+ if err := writer.Start(prefix, suffix); err != nil {
+ shared.LogError("Failed to start writer", err)
+
+ return
}
- switch c.Format {
- case "markdown", "json", "yaml":
- return nil
- default:
- context := map[string]any{
- "format": c.Format,
+ // Process files
+ for req := range writeCh {
+ if err := writer.WriteFile(req); err != nil {
+ shared.LogError("Failed to write file", err)
}
- return gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeValidation,
- gibidiutils.CodeValidationFormat,
- fmt.Sprintf("unsupported format: %s", c.Format),
- "",
- context,
- )
+ }
+
+ // Close writer
+ if err := writer.Close(); err != nil {
+ shared.LogError("Failed to close writer", err)
}
}
// StartWriter writes the output in the specified format with memory optimization.
-func StartWriter(outFile *os.File, writeCh <-chan WriteRequest, done chan<- struct{}, config WriterConfig) {
- // Validate config
- if err := config.Validate(); err != nil {
- gibidiutils.LogError("Invalid writer configuration", err)
- close(done)
- return
- }
-
- // Validate outFile is not nil
- if outFile == nil {
- err := gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeIO,
- gibidiutils.CodeIOFileWrite,
- "output file is nil",
- "",
- nil,
- )
- gibidiutils.LogError("Failed to write output", err)
- close(done)
- return
- }
-
- // Validate outFile is accessible
- if _, err := outFile.Stat(); err != nil {
- structErr := gibidiutils.WrapError(
- err,
- gibidiutils.ErrorTypeIO,
- gibidiutils.CodeIOFileWrite,
- "failed to stat output file",
- )
- gibidiutils.LogError("Failed to validate output file", structErr)
- close(done)
- return
- }
-
- switch config.Format {
- case "markdown":
- startMarkdownWriter(outFile, writeCh, done, config.Prefix, config.Suffix)
- case "json":
- startJSONWriter(outFile, writeCh, done, config.Prefix, config.Suffix)
- case "yaml":
- startYAMLWriter(outFile, writeCh, done, config.Prefix, config.Suffix)
+func StartWriter(outFile *os.File, writeCh <-chan WriteRequest, done chan<- struct{}, format, prefix, suffix string) {
+ switch format {
+ case shared.FormatMarkdown:
+ startMarkdownWriter(outFile, writeCh, done, prefix, suffix)
+ case shared.FormatJSON:
+ startJSONWriter(outFile, writeCh, done, prefix, suffix)
+ case shared.FormatYAML:
+ startYAMLWriter(outFile, writeCh, done, prefix, suffix)
default:
- context := map[string]interface{}{
- "format": config.Format,
+ context := map[string]any{
+ "format": format,
}
- err := gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeValidation,
- gibidiutils.CodeValidationFormat,
- fmt.Sprintf("unsupported format: %s", config.Format),
+ err := shared.NewStructuredError(
+ shared.ErrorTypeValidation,
+ shared.CodeValidationFormat,
+ "unsupported format: "+format,
"",
context,
)
- gibidiutils.LogError("Failed to encode output", err)
+ shared.LogError("Failed to encode output", err)
close(done)
}
}
diff --git a/fileproc/writer_test.go b/fileproc/writer_test.go
index 88313ce..681a352 100644
--- a/fileproc/writer_test.go
+++ b/fileproc/writer_test.go
@@ -2,17 +2,23 @@ package fileproc_test
import (
"encoding/json"
+ "errors"
+ "fmt"
+ "io"
"os"
+ "path/filepath"
"strings"
"sync"
"testing"
+ "time"
"gopkg.in/yaml.v3"
"github.com/ivuorinen/gibidify/fileproc"
+ "github.com/ivuorinen/gibidify/shared"
)
-func TestStartWriter_Formats(t *testing.T) {
+func TestStartWriterFormats(t *testing.T) {
// Define table-driven test cases
tests := []struct {
name string
@@ -26,15 +32,17 @@ func TestStartWriter_Formats(t *testing.T) {
}
for _, tc := range tests {
- t.Run(tc.name, func(t *testing.T) {
- data := runWriterTest(t, tc.format)
- if tc.expectError {
- verifyErrorOutput(t, data)
- } else {
- verifyValidOutput(t, data, tc.format)
- verifyPrefixSuffix(t, data)
- }
- })
+ t.Run(
+ tc.name, func(t *testing.T) {
+ data := runWriterTest(t, tc.format)
+ if tc.expectError {
+ verifyErrorOutput(t, data)
+ } else {
+ verifyValidOutput(t, data, tc.format)
+ verifyPrefixSuffix(t, data)
+ }
+ },
+ )
}
}
@@ -43,7 +51,7 @@ func runWriterTest(t *testing.T, format string) []byte {
t.Helper()
outFile, err := os.CreateTemp(t.TempDir(), "gibidify_test_output")
if err != nil {
- t.Fatalf("Failed to create temp file: %v", err)
+ t.Fatalf(shared.TestMsgFailedToCreateFile, err)
}
defer func() {
if closeErr := outFile.Close(); closeErr != nil {
@@ -59,25 +67,23 @@ func runWriterTest(t *testing.T, format string) []byte {
doneCh := make(chan struct{})
// Write a couple of sample requests
- writeCh <- fileproc.WriteRequest{Path: "sample.go", Content: "package main"}
+ writeCh <- fileproc.WriteRequest{Path: "sample.go", Content: shared.LiteralPackageMain}
writeCh <- fileproc.WriteRequest{Path: "example.py", Content: "def foo(): pass"}
close(writeCh)
// Start the writer
var wg sync.WaitGroup
- wg.Add(1)
- go func() {
- defer wg.Done()
- fileproc.StartWriter(outFile, writeCh, doneCh, fileproc.WriterConfig{
- Format: format,
- Prefix: "PREFIX",
- Suffix: "SUFFIX",
- })
- }()
+ wg.Go(func() {
+ fileproc.StartWriter(outFile, writeCh, doneCh, format, "PREFIX", "SUFFIX")
+ })
// Wait until writer signals completion
wg.Wait()
- <-doneCh // make sure all writes finished
+ select {
+ case <-doneCh: // make sure all writes finished
+ case <-time.After(3 * time.Second):
+ t.Fatal(shared.TestMsgTimeoutWriterCompletion)
+ }
// Read output
data, err := os.ReadFile(outFile.Name())
@@ -115,6 +121,11 @@ func verifyValidOutput(t *testing.T, data []byte, format string) {
if !strings.Contains(content, "```") {
t.Error("Expected markdown code fences not found")
}
+ default:
+ // Unknown format - basic validation that we have content
+ if len(content) == 0 {
+ t.Errorf("Unexpected format %s with empty content", format)
+ }
}
}
@@ -129,3 +140,490 @@ func verifyPrefixSuffix(t *testing.T, data []byte) {
t.Errorf("Missing suffix in output: %s", data)
}
}
+
+// verifyPrefixSuffixWith checks that output contains expected custom prefix and suffix.
+func verifyPrefixSuffixWith(t *testing.T, data []byte, expectedPrefix, expectedSuffix string) {
+ t.Helper()
+ content := string(data)
+ if !strings.Contains(content, expectedPrefix) {
+ t.Errorf("Missing prefix '%s' in output: %s", expectedPrefix, data)
+ }
+ if !strings.Contains(content, expectedSuffix) {
+ t.Errorf("Missing suffix '%s' in output: %s", expectedSuffix, data)
+ }
+}
+
+// TestStartWriterStreamingFormats tests streaming functionality in all writers.
+func TestStartWriterStreamingFormats(t *testing.T) {
+ tests := []struct {
+ name string
+ format string
+ content string
+ }{
+ {"JSON streaming", "json", strings.Repeat("line\n", 1000)},
+ {"YAML streaming", "yaml", strings.Repeat("data: value\n", 1000)},
+ {"Markdown streaming", "markdown", strings.Repeat("# Header\nContent\n", 1000)},
+ }
+
+ for _, tc := range tests {
+ t.Run(
+ tc.name, func(t *testing.T) {
+ data := runStreamingWriterTest(t, tc.format, tc.content)
+
+ // Verify output is not empty
+ if len(data) == 0 {
+ t.Error("Expected streaming output but got empty result")
+ }
+
+ // Format-specific validation
+ verifyValidOutput(t, data, tc.format)
+ verifyPrefixSuffixWith(t, data, "STREAM_PREFIX", "STREAM_SUFFIX")
+
+ // Verify content was written
+ content := string(data)
+ if !strings.Contains(content, shared.TestFileStreamTest) {
+ t.Error("Expected file path in streaming output")
+ }
+ },
+ )
+ }
+}
+
+// runStreamingWriterTest executes the writer with streaming content.
+func runStreamingWriterTest(t *testing.T, format, content string) []byte {
+ t.Helper()
+
+ // Create temp file with content for streaming
+ contentFile, err := os.CreateTemp(t.TempDir(), "content_*.txt")
+ if err != nil {
+ t.Fatalf("Failed to create content file: %v", err)
+ }
+ defer func() {
+ if err := os.Remove(contentFile.Name()); err != nil {
+ t.Logf("Failed to remove content file: %v", err)
+ }
+ }()
+
+ if _, err := contentFile.WriteString(content); err != nil {
+ t.Fatalf("Failed to write content file: %v", err)
+ }
+ if err := contentFile.Close(); err != nil {
+ t.Fatalf("Failed to close content file: %v", err)
+ }
+
+ // Create output file
+ outFile, err := os.CreateTemp(t.TempDir(), "gibidify_stream_test_output")
+ if err != nil {
+ t.Fatalf(shared.TestMsgFailedToCreateFile, err)
+ }
+ defer func() {
+ if closeErr := outFile.Close(); closeErr != nil {
+ t.Errorf("close temp file: %v", closeErr)
+ }
+ if removeErr := os.Remove(outFile.Name()); removeErr != nil {
+ t.Errorf("remove temp file: %v", removeErr)
+ }
+ }()
+
+ // Prepare channels with streaming request
+ writeCh := make(chan fileproc.WriteRequest, 1)
+ doneCh := make(chan struct{})
+
+ // Create reader for streaming
+ reader, err := os.Open(contentFile.Name())
+ if err != nil {
+ t.Fatalf("Failed to open content file for reading: %v", err)
+ }
+ defer func() {
+ if err := reader.Close(); err != nil {
+ t.Logf("Failed to close reader: %v", err)
+ }
+ }()
+
+ // Write streaming request
+ writeCh <- fileproc.WriteRequest{
+ Path: shared.TestFileStreamTest,
+ Content: "", // Empty for streaming
+ IsStream: true,
+ Reader: reader,
+ }
+ close(writeCh)
+
+ // Start the writer
+ var wg sync.WaitGroup
+ wg.Go(func() {
+ fileproc.StartWriter(outFile, writeCh, doneCh, format, "STREAM_PREFIX", "STREAM_SUFFIX")
+ })
+
+ // Wait until writer signals completion
+ wg.Wait()
+ select {
+ case <-doneCh:
+ case <-time.After(3 * time.Second):
+ t.Fatal(shared.TestMsgTimeoutWriterCompletion)
+ }
+
+ // Read output
+ data, err := os.ReadFile(outFile.Name())
+ if err != nil {
+ t.Fatalf("Error reading output file: %v", err)
+ }
+
+ return data
+}
+
+// setupReadOnlyFile creates a read-only file for error testing.
+func setupReadOnlyFile(t *testing.T) (*os.File, chan fileproc.WriteRequest, chan struct{}) {
+ t.Helper()
+
+ outPath := filepath.Join(t.TempDir(), "readonly_out")
+ outFile, err := os.Create(outPath)
+ if err != nil {
+ t.Fatalf(shared.TestMsgFailedToCreateFile, err)
+ }
+
+ // Close writable FD and reopen as read-only so writes will fail
+ _ = outFile.Close()
+ outFile, err = os.OpenFile(outPath, os.O_RDONLY, 0)
+ if err != nil {
+ t.Fatalf("Failed to reopen as read-only: %v", err)
+ }
+
+ writeCh := make(chan fileproc.WriteRequest, 1)
+ doneCh := make(chan struct{})
+
+ writeCh <- fileproc.WriteRequest{
+ Path: shared.TestFileGo,
+ Content: shared.LiteralPackageMain,
+ }
+ close(writeCh)
+
+ return outFile, writeCh, doneCh
+}
+
+// setupStreamingError creates a streaming request with a failing reader.
+func setupStreamingError(t *testing.T) (*os.File, chan fileproc.WriteRequest, chan struct{}) {
+ t.Helper()
+
+ outFile, err := os.CreateTemp(t.TempDir(), "yaml_stream_*")
+ if err != nil {
+ t.Fatalf(shared.TestMsgFailedToCreateFile, err)
+ }
+
+ writeCh := make(chan fileproc.WriteRequest, 1)
+ doneCh := make(chan struct{})
+
+ pr, pw := io.Pipe()
+ if err := pw.CloseWithError(errors.New("simulated stream error")); err != nil {
+ t.Fatalf("failed to set pipe error: %v", err)
+ }
+
+ writeCh <- fileproc.WriteRequest{
+ Path: "stream_fail.yaml",
+ Content: "", // Empty for streaming
+ IsStream: true,
+ Reader: pr,
+ }
+ close(writeCh)
+
+ return outFile, writeCh, doneCh
+}
+
+// setupSpecialCharacters creates requests with special characters.
+func setupSpecialCharacters(t *testing.T) (*os.File, chan fileproc.WriteRequest, chan struct{}) {
+ t.Helper()
+
+ outFile, err := os.CreateTemp(t.TempDir(), "markdown_special_*")
+ if err != nil {
+ t.Fatalf(shared.TestMsgFailedToCreateFile, err)
+ }
+
+ writeCh := make(chan fileproc.WriteRequest, 2)
+ doneCh := make(chan struct{})
+
+ writeCh <- fileproc.WriteRequest{
+ Path: "special\ncharacters.md",
+ Content: "Content with\x00null bytes and\ttabs",
+ }
+
+ writeCh <- fileproc.WriteRequest{
+ Path: "empty.md",
+ Content: "",
+ }
+ close(writeCh)
+
+ return outFile, writeCh, doneCh
+}
+
+// runErrorHandlingTest runs a single error handling test.
+func runErrorHandlingTest(
+ t *testing.T,
+ outFile *os.File,
+ writeCh chan fileproc.WriteRequest,
+ doneCh chan struct{},
+ format string,
+ expectEmpty bool,
+) {
+ t.Helper()
+
+ defer func() {
+ if err := os.Remove(outFile.Name()); err != nil {
+ t.Logf("Failed to remove temp file: %v", err)
+ }
+ }()
+ defer func() {
+ if err := outFile.Close(); err != nil {
+ t.Logf("Failed to close temp file: %v", err)
+ }
+ }()
+
+ var wg sync.WaitGroup
+ wg.Go(func() {
+ fileproc.StartWriter(outFile, writeCh, doneCh, format, "PREFIX", "SUFFIX")
+ })
+
+ wg.Wait()
+
+ // Wait for doneCh with timeout to prevent test hangs
+ select {
+ case <-doneCh:
+ case <-time.After(3 * time.Second):
+ t.Fatal(shared.TestMsgTimeoutWriterCompletion)
+ }
+
+ // Read output file and verify based on expectation
+ data, err := os.ReadFile(outFile.Name())
+ if err != nil {
+ t.Fatalf("Failed to read output file: %v", err)
+ }
+
+ if expectEmpty && len(data) != 0 {
+ t.Errorf("expected empty output on error, got %d bytes", len(data))
+ }
+ if !expectEmpty && len(data) == 0 {
+ t.Error("expected non-empty output, got empty")
+ }
+}
+
+// TestStartWriterErrorHandling tests error scenarios in writers.
+func TestStartWriterErrorHandling(t *testing.T) {
+ tests := []struct {
+ name string
+ format string
+ setupError func(t *testing.T) (*os.File, chan fileproc.WriteRequest, chan struct{})
+ expectEmptyOutput bool
+ }{
+ {
+ name: "JSON writer with read-only file",
+ format: "json",
+ setupError: setupReadOnlyFile,
+ expectEmptyOutput: true,
+ },
+ {
+ name: "YAML writer with streaming error",
+ format: "yaml",
+ setupError: setupStreamingError,
+ expectEmptyOutput: false, // Partial writes are acceptable before streaming errors
+ },
+ {
+ name: "Markdown writer with special characters",
+ format: "markdown",
+ setupError: setupSpecialCharacters,
+ expectEmptyOutput: false,
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(
+ tc.name, func(t *testing.T) {
+ outFile, writeCh, doneCh := tc.setupError(t)
+ runErrorHandlingTest(t, outFile, writeCh, doneCh, tc.format, tc.expectEmptyOutput)
+ },
+ )
+ }
+}
+
+// setupCloseTest sets up files and channels for close testing.
+func setupCloseTest(t *testing.T) (*os.File, chan fileproc.WriteRequest, chan struct{}) {
+ t.Helper()
+
+ outFile, err := os.CreateTemp(t.TempDir(), "close_test_*")
+ if err != nil {
+ t.Fatalf(shared.TestMsgFailedToCreateFile, err)
+ }
+
+ writeCh := make(chan fileproc.WriteRequest, 5)
+ doneCh := make(chan struct{})
+
+ for i := 0; i < 5; i++ {
+ writeCh <- fileproc.WriteRequest{
+ Path: fmt.Sprintf("file%d.txt", i),
+ Content: fmt.Sprintf("Content %d", i),
+ }
+ }
+ close(writeCh)
+
+ return outFile, writeCh, doneCh
+}
+
+// runCloseTest executes writer and validates output.
+func runCloseTest(
+ t *testing.T,
+ outFile *os.File,
+ writeCh chan fileproc.WriteRequest,
+ doneCh chan struct{},
+ format string,
+) {
+ t.Helper()
+
+ defer func() {
+ if err := os.Remove(outFile.Name()); err != nil {
+ t.Logf("Failed to remove temp file: %v", err)
+ }
+ }()
+ defer func() {
+ if err := outFile.Close(); err != nil {
+ t.Logf("Failed to close temp file: %v", err)
+ }
+ }()
+
+ var wg sync.WaitGroup
+ wg.Go(func() {
+ fileproc.StartWriter(outFile, writeCh, doneCh, format, "TEST_PREFIX", "TEST_SUFFIX")
+ })
+
+ wg.Wait()
+ select {
+ case <-doneCh:
+ case <-time.After(3 * time.Second):
+ t.Fatal(shared.TestMsgTimeoutWriterCompletion)
+ }
+
+ data, err := os.ReadFile(outFile.Name())
+ if err != nil {
+ t.Fatalf("Failed to read output file: %v", err)
+ }
+
+ if len(data) == 0 {
+ t.Error("Expected non-empty output file")
+ }
+
+ verifyPrefixSuffixWith(t, data, "TEST_PREFIX", "TEST_SUFFIX")
+}
+
+// TestStartWriterWriterCloseErrors tests error handling during writer close operations.
+func TestStartWriterWriterCloseErrors(t *testing.T) {
+ tests := []struct {
+ name string
+ format string
+ }{
+ {"JSON close handling", "json"},
+ {"YAML close handling", "yaml"},
+ {"Markdown close handling", "markdown"},
+ }
+
+ for _, tc := range tests {
+ t.Run(
+ tc.name, func(t *testing.T) {
+ outFile, writeCh, doneCh := setupCloseTest(t)
+ runCloseTest(t, outFile, writeCh, doneCh, tc.format)
+ },
+ )
+ }
+}
+
+// Benchmarks for writer performance
+
+// BenchmarkStartWriter benchmarks basic writer operations across formats.
+func BenchmarkStartWriter(b *testing.B) {
+ formats := []string{"json", "yaml", "markdown"}
+
+ for _, format := range formats {
+ b.Run(format, func(b *testing.B) {
+ for b.Loop() {
+ outFile, err := os.CreateTemp(b.TempDir(), "bench_output_*")
+ if err != nil {
+ b.Fatalf("Failed to create temp file: %v", err)
+ }
+
+ writeCh := make(chan fileproc.WriteRequest, 2)
+ doneCh := make(chan struct{})
+
+ writeCh <- fileproc.WriteRequest{Path: "sample.go", Content: shared.LiteralPackageMain}
+ writeCh <- fileproc.WriteRequest{Path: "example.py", Content: "def foo(): pass"}
+ close(writeCh)
+
+ fileproc.StartWriter(outFile, writeCh, doneCh, format, "PREFIX", "SUFFIX")
+ <-doneCh
+
+ _ = outFile.Close()
+ }
+ })
+ }
+}
+
+// benchStreamingIteration runs a single streaming benchmark iteration.
+func benchStreamingIteration(b *testing.B, format, content string) {
+ b.Helper()
+
+ contentFile := createBenchContentFile(b, content)
+ defer func() { _ = os.Remove(contentFile) }()
+
+ reader, err := os.Open(contentFile)
+ if err != nil {
+ b.Fatalf("Failed to open content file: %v", err)
+ }
+ defer func() { _ = reader.Close() }()
+
+ outFile, err := os.CreateTemp(b.TempDir(), "bench_stream_output_*")
+ if err != nil {
+ b.Fatalf("Failed to create output file: %v", err)
+ }
+ defer func() { _ = outFile.Close() }()
+
+ writeCh := make(chan fileproc.WriteRequest, 1)
+ doneCh := make(chan struct{})
+
+ writeCh <- fileproc.WriteRequest{
+ Path: shared.TestFileStreamTest,
+ Content: "",
+ IsStream: true,
+ Reader: reader,
+ }
+ close(writeCh)
+
+ fileproc.StartWriter(outFile, writeCh, doneCh, format, "PREFIX", "SUFFIX")
+ <-doneCh
+}
+
+// createBenchContentFile creates a temp file with content for benchmarks.
+func createBenchContentFile(b *testing.B, content string) string {
+ b.Helper()
+
+ contentFile, err := os.CreateTemp(b.TempDir(), "content_*")
+ if err != nil {
+ b.Fatalf("Failed to create content file: %v", err)
+ }
+ if _, err := contentFile.WriteString(content); err != nil {
+ b.Fatalf("Failed to write content: %v", err)
+ }
+ if err := contentFile.Close(); err != nil {
+ b.Fatalf("Failed to close content file: %v", err)
+ }
+
+ return contentFile.Name()
+}
+
+// BenchmarkStartWriterStreaming benchmarks streaming writer operations across formats.
+func BenchmarkStartWriterStreaming(b *testing.B) {
+ formats := []string{"json", "yaml", "markdown"}
+ content := strings.Repeat("line content\n", 1000)
+
+ for _, format := range formats {
+ b.Run(format, func(b *testing.B) {
+ for b.Loop() {
+ benchStreamingIteration(b, format, content)
+ }
+ })
+ }
+}
diff --git a/fileproc/yaml_writer.go b/fileproc/yaml_writer.go
index 0a4dc75..8441479 100644
--- a/fileproc/yaml_writer.go
+++ b/fileproc/yaml_writer.go
@@ -1,14 +1,12 @@
+// Package fileproc handles file processing, collection, and output formatting.
package fileproc
import (
- "bufio"
"fmt"
- "io"
"os"
- "path/filepath"
"strings"
- "github.com/ivuorinen/gibidify/gibidiutils"
+ "github.com/ivuorinen/gibidify/shared"
)
// YAMLWriter handles YAML format output with streaming support.
@@ -21,152 +19,18 @@ func NewYAMLWriter(outFile *os.File) *YAMLWriter {
return &YAMLWriter{outFile: outFile}
}
-const (
- maxPathLength = 4096 // Maximum total path length
- maxFilenameLength = 255 // Maximum individual filename component length
-)
-
-// validatePathComponents validates individual path components for security issues.
-func validatePathComponents(trimmed, cleaned string, components []string) error {
- for i, component := range components {
- // Reject path components that are exactly ".." (path traversal)
- if component == ".." {
- return gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeValidation,
- gibidiutils.CodeValidationPath,
- "path traversal not allowed",
- trimmed,
- map[string]any{
- "path": trimmed,
- "cleaned": cleaned,
- "invalid_component": component,
- "component_index": i,
- },
- )
- }
-
- // Reject empty components (e.g., from "foo//bar")
- if component == "" && i > 0 && i < len(components)-1 {
- return gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeValidation,
- gibidiutils.CodeValidationPath,
- "path contains empty component",
- trimmed,
- map[string]any{
- "path": trimmed,
- "cleaned": cleaned,
- "component_index": i,
- },
- )
- }
-
- // Enforce maximum filename length for each component
- if len(component) > maxFilenameLength {
- return gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeValidation,
- gibidiutils.CodeValidationPath,
- "path component exceeds maximum length",
- trimmed,
- map[string]any{
- "component": component,
- "component_length": len(component),
- "max_length": maxFilenameLength,
- "component_index": i,
- },
- )
- }
- }
- return nil
-}
-
-// validatePath validates and sanitizes a file path for safe output.
-// It rejects absolute paths, path traversal attempts, empty paths, and overly long paths.
-func validatePath(path string) error {
- // Reject empty paths
- trimmed := strings.TrimSpace(path)
- if trimmed == "" {
- return gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeValidation,
- gibidiutils.CodeValidationRequired,
- "file path cannot be empty",
- "",
- nil,
- )
- }
-
- // Enforce maximum path length to prevent resource abuse
- if len(trimmed) > maxPathLength {
- return gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeValidation,
- gibidiutils.CodeValidationPath,
- "path exceeds maximum length",
- trimmed,
- map[string]any{
- "path_length": len(trimmed),
- "max_length": maxPathLength,
- },
- )
- }
-
- // Reject absolute paths
- if filepath.IsAbs(trimmed) {
- return gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeValidation,
- gibidiutils.CodeValidationPath,
- "absolute paths are not allowed",
- trimmed,
- map[string]any{"path": trimmed},
- )
- }
-
- // Validate original trimmed path components before cleaning
- origComponents := strings.Split(filepath.ToSlash(trimmed), "/")
- for _, comp := range origComponents {
- if comp == "" || comp == "." || comp == ".." {
- return gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeValidation,
- gibidiutils.CodeValidationPath,
- "invalid or traversal path component in original path",
- trimmed,
- map[string]any{"path": trimmed, "component": comp},
- )
- }
- }
-
- // Clean the path to normalize it
- cleaned := filepath.Clean(trimmed)
-
- // After cleaning, ensure it's still relative and doesn't start with /
- if filepath.IsAbs(cleaned) || strings.HasPrefix(cleaned, "/") {
- return gibidiutils.NewStructuredError(
- gibidiutils.ErrorTypeValidation,
- gibidiutils.CodeValidationPath,
- "path must be relative",
- trimmed,
- map[string]any{"path": trimmed, "cleaned": cleaned},
- )
- }
-
- // Split into components and validate each one
- // Use ToSlash to normalize for cross-platform validation
- components := strings.Split(filepath.ToSlash(cleaned), "/")
- return validatePathComponents(trimmed, cleaned, components)
-}
-
// Start writes the YAML header.
func (w *YAMLWriter) Start(prefix, suffix string) error {
// Write YAML header
if _, err := fmt.Fprintf(
- w.outFile, "prefix: %s\nsuffix: %s\nfiles:\n",
- gibidiutils.EscapeForYAML(prefix), gibidiutils.EscapeForYAML(suffix),
+ w.outFile,
+ "prefix: %s\nsuffix: %s\nfiles:\n",
+ shared.EscapeForYAML(prefix),
+ shared.EscapeForYAML(suffix),
); err != nil {
- return gibidiutils.WrapError(
- err,
- gibidiutils.ErrorTypeIO,
- gibidiutils.CodeIOWrite,
- "failed to write YAML header",
- )
+ return shared.WrapError(err, shared.ErrorTypeIO, shared.CodeIOWrite, "failed to write YAML header")
}
+
return nil
}
@@ -175,6 +39,7 @@ func (w *YAMLWriter) WriteFile(req WriteRequest) error {
if req.IsStream {
return w.writeStreaming(req)
}
+
return w.writeInline(req)
}
@@ -185,45 +50,39 @@ func (w *YAMLWriter) Close() error {
// writeStreaming writes a large file as YAML in streaming chunks.
func (w *YAMLWriter) writeStreaming(req WriteRequest) error {
- // Validate path before using it
- if err := validatePath(req.Path); err != nil {
- return err
- }
-
- // Check for nil reader
- if req.Reader == nil {
- return gibidiutils.WrapError(
- nil, gibidiutils.ErrorTypeValidation, gibidiutils.CodeValidationRequired,
- "nil reader in write request",
- ).WithFilePath(req.Path)
- }
-
- defer gibidiutils.SafeCloseReader(req.Reader, req.Path)
+ defer shared.SafeCloseReader(req.Reader, req.Path)
language := detectLanguage(req.Path)
// Write YAML file entry start
if _, err := fmt.Fprintf(
- w.outFile, " - path: %s\n language: %s\n content: |\n",
- gibidiutils.EscapeForYAML(req.Path), language,
+ w.outFile,
+ shared.YAMLFmtFileEntry,
+ shared.EscapeForYAML(req.Path),
+ language,
); err != nil {
- return gibidiutils.WrapError(
- err, gibidiutils.ErrorTypeIO, gibidiutils.CodeIOWrite,
+ return shared.WrapError(
+ err,
+ shared.ErrorTypeIO,
+ shared.CodeIOWrite,
"failed to write YAML file start",
).WithFilePath(req.Path)
}
// Stream content with YAML indentation
- return w.streamYAMLContent(req.Reader, req.Path)
+ if err := shared.StreamLines(
+ req.Reader, w.outFile, req.Path, func(line string) string {
+ return " " + line
+ },
+ ); err != nil {
+ return shared.WrapError(err, shared.ErrorTypeIO, shared.CodeIOWrite, "streaming YAML content")
+ }
+
+ return nil
}
// writeInline writes a small file directly as YAML.
func (w *YAMLWriter) writeInline(req WriteRequest) error {
- // Validate path before using it
- if err := validatePath(req.Path); err != nil {
- return err
- }
-
language := detectLanguage(req.Path)
fileData := FileData{
Path: req.Path,
@@ -233,11 +92,15 @@ func (w *YAMLWriter) writeInline(req WriteRequest) error {
// Write YAML entry
if _, err := fmt.Fprintf(
- w.outFile, " - path: %s\n language: %s\n content: |\n",
- gibidiutils.EscapeForYAML(fileData.Path), fileData.Language,
+ w.outFile,
+ shared.YAMLFmtFileEntry,
+ shared.EscapeForYAML(fileData.Path),
+ fileData.Language,
); err != nil {
- return gibidiutils.WrapError(
- err, gibidiutils.ErrorTypeIO, gibidiutils.CodeIOWrite,
+ return shared.WrapError(
+ err,
+ shared.ErrorTypeIO,
+ shared.CodeIOWrite,
"failed to write YAML entry start",
).WithFilePath(req.Path)
}
@@ -246,8 +109,10 @@ func (w *YAMLWriter) writeInline(req WriteRequest) error {
lines := strings.Split(fileData.Content, "\n")
for _, line := range lines {
if _, err := fmt.Fprintf(w.outFile, " %s\n", line); err != nil {
- return gibidiutils.WrapError(
- err, gibidiutils.ErrorTypeIO, gibidiutils.CodeIOWrite,
+ return shared.WrapError(
+ err,
+ shared.ErrorTypeIO,
+ shared.CodeIOWrite,
"failed to write YAML content line",
).WithFilePath(req.Path)
}
@@ -256,53 +121,9 @@ func (w *YAMLWriter) writeInline(req WriteRequest) error {
return nil
}
-// streamYAMLContent streams content with YAML indentation.
-func (w *YAMLWriter) streamYAMLContent(reader io.Reader, path string) error {
- scanner := bufio.NewScanner(reader)
- // Increase buffer size to handle long lines (up to 10MB per line)
- buf := make([]byte, 0, 64*1024)
- scanner.Buffer(buf, 10*1024*1024)
-
- for scanner.Scan() {
- line := scanner.Text()
- if _, err := fmt.Fprintf(w.outFile, " %s\n", line); err != nil {
- return gibidiutils.WrapError(
- err, gibidiutils.ErrorTypeIO, gibidiutils.CodeIOWrite,
- "failed to write YAML line",
- ).WithFilePath(path)
- }
- }
-
- if err := scanner.Err(); err != nil {
- return gibidiutils.WrapError(
- err, gibidiutils.ErrorTypeIO, gibidiutils.CodeIOFileRead,
- "failed to scan YAML content",
- ).WithFilePath(path)
- }
- return nil
-}
-
// startYAMLWriter handles YAML format output with streaming support.
func startYAMLWriter(outFile *os.File, writeCh <-chan WriteRequest, done chan<- struct{}, prefix, suffix string) {
- defer close(done)
-
- writer := NewYAMLWriter(outFile)
-
- // Start writing
- if err := writer.Start(prefix, suffix); err != nil {
- gibidiutils.LogError("Failed to write YAML header", err)
- return
- }
-
- // Process files
- for req := range writeCh {
- if err := writer.WriteFile(req); err != nil {
- gibidiutils.LogError("Failed to write YAML file", err)
- }
- }
-
- // Close writer
- if err := writer.Close(); err != nil {
- gibidiutils.LogError("Failed to write YAML end", err)
- }
+ startFormatWriter(outFile, writeCh, done, prefix, suffix, func(f *os.File) FormatWriter {
+ return NewYAMLWriter(f)
+ })
}
diff --git a/gibidiutils/errors_additional_test.go b/gibidiutils/errors_additional_test.go
deleted file mode 100644
index 8d42f6b..0000000
--- a/gibidiutils/errors_additional_test.go
+++ /dev/null
@@ -1,367 +0,0 @@
-package gibidiutils
-
-import (
- "errors"
- "testing"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestErrorTypeString(t *testing.T) {
- tests := []struct {
- name string
- errType ErrorType
- expected string
- }{
- {
- name: "CLI error type",
- errType: ErrorTypeCLI,
- expected: "CLI",
- },
- {
- name: "FileSystem error type",
- errType: ErrorTypeFileSystem,
- expected: "FileSystem",
- },
- {
- name: "Processing error type",
- errType: ErrorTypeProcessing,
- expected: "Processing",
- },
- {
- name: "Configuration error type",
- errType: ErrorTypeConfiguration,
- expected: "Configuration",
- },
- {
- name: "IO error type",
- errType: ErrorTypeIO,
- expected: "IO",
- },
- {
- name: "Validation error type",
- errType: ErrorTypeValidation,
- expected: "Validation",
- },
- {
- name: "Unknown error type",
- errType: ErrorTypeUnknown,
- expected: "Unknown",
- },
- {
- name: "Invalid error type",
- errType: ErrorType(999),
- expected: "Unknown",
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- result := tt.errType.String()
- assert.Equal(t, tt.expected, result)
- })
- }
-}
-
-func TestStructuredErrorMethods(t *testing.T) {
- t.Run("Error method", func(t *testing.T) {
- err := &StructuredError{
- Type: ErrorTypeValidation,
- Code: CodeValidationRequired,
- Message: "field is required",
- }
- expected := "Validation [REQUIRED]: field is required"
- assert.Equal(t, expected, err.Error())
- })
-
- t.Run("Error method with context", func(t *testing.T) {
- err := &StructuredError{
- Type: ErrorTypeFileSystem,
- Code: CodeFSNotFound,
- Message: testErrFileNotFound,
- Context: map[string]interface{}{
- "path": "/test/file.txt",
- },
- }
- errStr := err.Error()
- assert.Contains(t, errStr, "FileSystem")
- assert.Contains(t, errStr, "NOT_FOUND")
- assert.Contains(t, errStr, testErrFileNotFound)
- assert.Contains(t, errStr, "/test/file.txt")
- assert.Contains(t, errStr, "path")
- })
-
- t.Run("Unwrap method", func(t *testing.T) {
- innerErr := errors.New("inner error")
- err := &StructuredError{
- Type: ErrorTypeIO,
- Code: CodeIOFileWrite,
- Message: testErrWriteFailed,
- Cause: innerErr,
- }
- assert.Equal(t, innerErr, err.Unwrap())
- })
-
- t.Run("Unwrap with nil cause", func(t *testing.T) {
- err := &StructuredError{
- Type: ErrorTypeIO,
- Code: CodeIOFileWrite,
- Message: testErrWriteFailed,
- }
- assert.Nil(t, err.Unwrap())
- })
-}
-
-func TestWithContextMethods(t *testing.T) {
- t.Run("WithContext", func(t *testing.T) {
- err := &StructuredError{
- Type: ErrorTypeValidation,
- Code: CodeValidationFormat,
- Message: testErrInvalidFormat,
- }
-
- err = err.WithContext("format", "xml")
- err = err.WithContext("expected", "json")
-
- assert.NotNil(t, err.Context)
- assert.Equal(t, "xml", err.Context["format"])
- assert.Equal(t, "json", err.Context["expected"])
- })
-
- t.Run("WithFilePath", func(t *testing.T) {
- err := &StructuredError{
- Type: ErrorTypeFileSystem,
- Code: CodeFSPermission,
- Message: "permission denied",
- }
-
- err = err.WithFilePath("/etc/passwd")
-
- assert.Equal(t, "/etc/passwd", err.FilePath)
- })
-
- t.Run("WithLine", func(t *testing.T) {
- err := &StructuredError{
- Type: ErrorTypeProcessing,
- Code: CodeProcessingFileRead,
- Message: "read error",
- }
-
- err = err.WithLine(42)
-
- assert.Equal(t, 42, err.Line)
- })
-}
-
-func TestNewStructuredError(t *testing.T) {
- tests := []struct {
- name string
- errType ErrorType
- code string
- message string
- filePath string
- context map[string]interface{}
- }{
- {
- name: "basic error",
- errType: ErrorTypeValidation,
- code: CodeValidationRequired,
- message: "field is required",
- filePath: "",
- context: nil,
- },
- {
- name: "error with file path",
- errType: ErrorTypeFileSystem,
- code: CodeFSNotFound,
- message: testErrFileNotFound,
- filePath: "/test/missing.txt",
- context: nil,
- },
- {
- name: "error with context",
- errType: ErrorTypeIO,
- code: CodeIOFileWrite,
- message: testErrWriteFailed,
- context: map[string]interface{}{
- "size": 1024,
- "error": "disk full",
- },
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- err := NewStructuredError(tt.errType, tt.code, tt.message, tt.filePath, tt.context)
-
- assert.NotNil(t, err)
- assert.Equal(t, tt.errType, err.Type)
- assert.Equal(t, tt.code, err.Code)
- assert.Equal(t, tt.message, err.Message)
- assert.Equal(t, tt.filePath, err.FilePath)
- assert.Equal(t, tt.context, err.Context)
- })
- }
-}
-
-func TestNewStructuredErrorf(t *testing.T) {
- err := NewStructuredErrorf(
- ErrorTypeValidation,
- CodeValidationSize,
- "file size %d exceeds limit %d",
- 2048, 1024,
- )
-
- assert.NotNil(t, err)
- assert.Equal(t, ErrorTypeValidation, err.Type)
- assert.Equal(t, CodeValidationSize, err.Code)
- assert.Equal(t, "file size 2048 exceeds limit 1024", err.Message)
-}
-
-func TestWrapError(t *testing.T) {
- innerErr := errors.New("original error")
- wrappedErr := WrapError(
- innerErr,
- ErrorTypeProcessing,
- CodeProcessingFileRead,
- "failed to process file",
- )
-
- assert.NotNil(t, wrappedErr)
- assert.Equal(t, ErrorTypeProcessing, wrappedErr.Type)
- assert.Equal(t, CodeProcessingFileRead, wrappedErr.Code)
- assert.Equal(t, "failed to process file", wrappedErr.Message)
- assert.Equal(t, innerErr, wrappedErr.Cause)
-}
-
-func TestWrapErrorf(t *testing.T) {
- innerErr := errors.New("original error")
- wrappedErr := WrapErrorf(
- innerErr,
- ErrorTypeIO,
- CodeIOFileCreate,
- "failed to create %s in %s",
- "output.txt", "/tmp",
- )
-
- assert.NotNil(t, wrappedErr)
- assert.Equal(t, ErrorTypeIO, wrappedErr.Type)
- assert.Equal(t, CodeIOFileCreate, wrappedErr.Code)
- assert.Equal(t, "failed to create output.txt in /tmp", wrappedErr.Message)
- assert.Equal(t, innerErr, wrappedErr.Cause)
-}
-
-func TestSpecificErrorConstructors(t *testing.T) {
- t.Run("NewMissingSourceError", func(t *testing.T) {
- err := NewMissingSourceError()
- assert.NotNil(t, err)
- assert.Equal(t, ErrorTypeCLI, err.Type)
- assert.Equal(t, CodeCLIMissingSource, err.Code)
- assert.Contains(t, err.Message, "source")
- })
-
- t.Run("NewFileSystemError", func(t *testing.T) {
- err := NewFileSystemError(CodeFSPermission, "access denied")
- assert.NotNil(t, err)
- assert.Equal(t, ErrorTypeFileSystem, err.Type)
- assert.Equal(t, CodeFSPermission, err.Code)
- assert.Equal(t, "access denied", err.Message)
- })
-
- t.Run("NewProcessingError", func(t *testing.T) {
- err := NewProcessingError(CodeProcessingCollection, "collection failed")
- assert.NotNil(t, err)
- assert.Equal(t, ErrorTypeProcessing, err.Type)
- assert.Equal(t, CodeProcessingCollection, err.Code)
- assert.Equal(t, "collection failed", err.Message)
- })
-
- t.Run("NewIOError", func(t *testing.T) {
- err := NewIOError(CodeIOFileWrite, testErrWriteFailed)
- assert.NotNil(t, err)
- assert.Equal(t, ErrorTypeIO, err.Type)
- assert.Equal(t, CodeIOFileWrite, err.Code)
- assert.Equal(t, testErrWriteFailed, err.Message)
- })
-
- t.Run("NewValidationError", func(t *testing.T) {
- err := NewValidationError(CodeValidationFormat, testErrInvalidFormat)
- assert.NotNil(t, err)
- assert.Equal(t, ErrorTypeValidation, err.Type)
- assert.Equal(t, CodeValidationFormat, err.Code)
- assert.Equal(t, testErrInvalidFormat, err.Message)
- })
-}
-
-// TestLogErrorf is already covered in errors_test.go
-
-func TestStructuredErrorChaining(t *testing.T) {
- // Test method chaining
- err := NewStructuredError(
- ErrorTypeFileSystem,
- CodeFSNotFound,
- testErrFileNotFound,
- "",
- nil,
- ).WithFilePath("/test.txt").WithLine(10).WithContext("operation", "read")
-
- assert.Equal(t, "/test.txt", err.FilePath)
- assert.Equal(t, 10, err.Line)
- assert.Equal(t, "read", err.Context["operation"])
-}
-
-func TestErrorCodes(t *testing.T) {
- // Test that all error codes are defined
- codes := []string{
- CodeCLIMissingSource,
- CodeCLIInvalidArgs,
- CodeFSPathResolution,
- CodeFSPermission,
- CodeFSNotFound,
- CodeFSAccess,
- CodeProcessingFileRead,
- CodeProcessingCollection,
- CodeProcessingTraversal,
- CodeProcessingEncode,
- CodeConfigValidation,
- CodeConfigMissing,
- CodeIOFileCreate,
- CodeIOFileWrite,
- CodeIOEncoding,
- CodeIOWrite,
- CodeIOFileRead,
- CodeIOClose,
- CodeValidationRequired,
- CodeValidationFormat,
- CodeValidationSize,
- CodeValidationPath,
- CodeResourceLimitFiles,
- CodeResourceLimitTotalSize,
- CodeResourceLimitMemory,
- CodeResourceLimitTimeout,
- }
-
- // All codes should be non-empty strings
- for _, code := range codes {
- assert.NotEmpty(t, code, "Error code should not be empty")
- assert.NotEqual(t, "", code, "Error code should be defined")
- }
-}
-
-func TestErrorUnwrapChain(t *testing.T) {
- // Test unwrapping through multiple levels
- innermost := errors.New("innermost error")
- middle := WrapError(innermost, ErrorTypeIO, CodeIOFileRead, "read failed")
- outer := WrapError(middle, ErrorTypeProcessing, CodeProcessingFileRead, "processing failed")
-
- // Test unwrapping
- assert.Equal(t, middle, outer.Unwrap())
- assert.Equal(t, innermost, middle.Unwrap())
-
- // innermost is a plain error, doesn't have Unwrap() method
- // No need to test it
-
- // Test error chain messages
- assert.Contains(t, outer.Error(), "Processing")
- assert.Contains(t, middle.Error(), "IO")
-}
diff --git a/gibidiutils/errors_test.go b/gibidiutils/errors_test.go
deleted file mode 100644
index c401f2f..0000000
--- a/gibidiutils/errors_test.go
+++ /dev/null
@@ -1,243 +0,0 @@
-// Package gibidiutils provides common utility functions for gibidify.
-package gibidiutils
-
-import (
- "bytes"
- "errors"
- "fmt"
- "strings"
- "testing"
-
- "github.com/sirupsen/logrus"
-)
-
-// captureLogOutput captures logrus output for testing
-func captureLogOutput(f func()) string {
- var buf bytes.Buffer
- logrus.SetOutput(&buf)
- defer logrus.SetOutput(logrus.StandardLogger().Out)
- f()
- return buf.String()
-}
-
-func TestLogError(t *testing.T) {
- tests := []struct {
- name string
- operation string
- err error
- args []any
- wantLog string
- wantEmpty bool
- }{
- {
- name: "nil error should not log",
- operation: "test operation",
- err: nil,
- args: nil,
- wantEmpty: true,
- },
- {
- name: "basic error logging",
- operation: "failed to read file",
- err: errors.New("permission denied"),
- args: nil,
- wantLog: "failed to read file: permission denied",
- },
- {
- name: "error with formatting args",
- operation: "failed to process file %s",
- err: errors.New("file too large"),
- args: []any{"test.txt"},
- wantLog: "failed to process file test.txt: file too large",
- },
- {
- name: "error with multiple formatting args",
- operation: "failed to copy from %s to %s",
- err: errors.New("disk full"),
- args: []any{"source.txt", "dest.txt"},
- wantLog: "failed to copy from source.txt to dest.txt: disk full",
- },
- {
- name: "wrapped error",
- operation: "database operation failed",
- err: fmt.Errorf("connection error: %w", errors.New("timeout")),
- args: nil,
- wantLog: "database operation failed: connection error: timeout",
- },
- {
- name: "empty operation string",
- operation: "",
- err: errors.New("some error"),
- args: nil,
- wantLog: ": some error",
- },
- {
- name: "operation with percentage sign",
- operation: "processing 50% complete",
- err: errors.New("interrupted"),
- args: nil,
- wantLog: "processing 50% complete: interrupted",
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- output := captureLogOutput(func() {
- LogError(tt.operation, tt.err, tt.args...)
- })
-
- if tt.wantEmpty {
- if output != "" {
- t.Errorf("LogError() logged output when error was nil: %q", output)
- }
- return
- }
-
- if !strings.Contains(output, tt.wantLog) {
- t.Errorf("LogError() output = %q, want to contain %q", output, tt.wantLog)
- }
-
- // Verify it's logged at ERROR level
- if !strings.Contains(output, "level=error") {
- t.Errorf("LogError() should log at ERROR level, got: %q", output)
- }
- })
- }
-}
-
-func TestLogErrorf(t *testing.T) {
- tests := []struct {
- name string
- err error
- format string
- args []any
- wantLog string
- wantEmpty bool
- }{
- {
- name: "nil error should not log",
- err: nil,
- format: "operation %s failed",
- args: []any{"test"},
- wantEmpty: true,
- },
- {
- name: "basic formatted error",
- err: errors.New("not found"),
- format: "file %s not found",
- args: []any{"config.yaml"},
- wantLog: "file config.yaml not found: not found",
- },
- {
- name: "multiple format arguments",
- err: errors.New("invalid range"),
- format: "value %d is not between %d and %d",
- args: []any{150, 0, 100},
- wantLog: "value 150 is not between 0 and 100: invalid range",
- },
- {
- name: "no format arguments",
- err: errors.New("generic error"),
- format: "operation failed",
- args: nil,
- wantLog: "operation failed: generic error",
- },
- {
- name: "format with different types",
- err: errors.New("type mismatch"),
- format: "expected %s but got %d",
- args: []any{"string", 42},
- wantLog: "expected string but got 42: type mismatch",
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- output := captureLogOutput(func() {
- LogErrorf(tt.err, tt.format, tt.args...)
- })
-
- if tt.wantEmpty {
- if output != "" {
- t.Errorf("LogErrorf() logged output when error was nil: %q", output)
- }
- return
- }
-
- if !strings.Contains(output, tt.wantLog) {
- t.Errorf("LogErrorf() output = %q, want to contain %q", output, tt.wantLog)
- }
-
- // Verify it's logged at ERROR level
- if !strings.Contains(output, "level=error") {
- t.Errorf("LogErrorf() should log at ERROR level, got: %q", output)
- }
- })
- }
-}
-
-func TestLogErrorConcurrency(_ *testing.T) {
- // Test that LogError is safe for concurrent use
- done := make(chan bool)
- for i := 0; i < 10; i++ {
- go func(n int) {
- LogError("concurrent operation", fmt.Errorf("error %d", n))
- done <- true
- }(i)
- }
-
- // Wait for all goroutines to complete
- for i := 0; i < 10; i++ {
- <-done
- }
-}
-
-func TestLogErrorfConcurrency(_ *testing.T) {
- // Test that LogErrorf is safe for concurrent use
- done := make(chan bool)
- for i := 0; i < 10; i++ {
- go func(n int) {
- LogErrorf(fmt.Errorf("error %d", n), "concurrent operation %d", n)
- done <- true
- }(i)
- }
-
- // Wait for all goroutines to complete
- for i := 0; i < 10; i++ {
- <-done
- }
-}
-
-// BenchmarkLogError benchmarks the LogError function
-func BenchmarkLogError(b *testing.B) {
- err := errors.New("benchmark error")
- // Disable output during benchmark
- logrus.SetOutput(bytes.NewBuffer(nil))
- defer logrus.SetOutput(logrus.StandardLogger().Out)
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- LogError("benchmark operation", err)
- }
-}
-
-// BenchmarkLogErrorf benchmarks the LogErrorf function
-func BenchmarkLogErrorf(b *testing.B) {
- err := errors.New("benchmark error")
- // Disable output during benchmark
- logrus.SetOutput(bytes.NewBuffer(nil))
- defer logrus.SetOutput(logrus.StandardLogger().Out)
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- LogErrorf(err, "benchmark operation %d", i)
- }
-}
-
-// BenchmarkLogErrorNil benchmarks LogError with nil error (no-op case)
-func BenchmarkLogErrorNil(b *testing.B) {
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- LogError("benchmark operation", nil)
- }
-}
diff --git a/gibidiutils/icons.go b/gibidiutils/icons.go
deleted file mode 100644
index 75a3295..0000000
--- a/gibidiutils/icons.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package gibidiutils
-
-// Unicode icons and symbols for CLI UI and test output.
-const (
- IconSuccess = "✓" // U+2713
- IconError = "✗" // U+2717
- IconWarning = "⚠" // U+26A0
- IconBullet = "•" // U+2022
- IconInfo = "ℹ️" // U+2139 FE0F
-)
diff --git a/gibidiutils/paths.go b/gibidiutils/paths.go
deleted file mode 100644
index 93d5568..0000000
--- a/gibidiutils/paths.go
+++ /dev/null
@@ -1,311 +0,0 @@
-// Package gibidiutils provides common utility functions for gibidify.
-package gibidiutils
-
-import (
- "fmt"
- "os"
- "path/filepath"
- "strings"
-)
-
-// EscapeForMarkdown sanitizes a string for safe use in Markdown code-fence and header lines.
-// It replaces backticks with backslash-escaped backticks and removes/collapses newlines.
-func EscapeForMarkdown(s string) string {
- // Escape backticks
- safe := strings.ReplaceAll(s, "`", "\\`")
- // Remove newlines (collapse to space)
- safe = strings.ReplaceAll(safe, "\n", " ")
- safe = strings.ReplaceAll(safe, "\r", " ")
- return safe
-}
-
-// GetAbsolutePath returns the absolute path for the given path.
-// It wraps filepath.Abs with consistent error handling.
-func GetAbsolutePath(path string) (string, error) {
- abs, err := filepath.Abs(path)
- if err != nil {
- return "", fmt.Errorf("failed to get absolute path for %s: %w", path, err)
- }
- return abs, nil
-}
-
-// GetBaseName returns the base name for the given path, handling special cases.
-func GetBaseName(absPath string) string {
- baseName := filepath.Base(absPath)
- if baseName == "." || baseName == "" {
- return "output"
- }
- return baseName
-}
-
-// checkPathTraversal checks for path traversal patterns and returns an error if found.
-func checkPathTraversal(path, context string) error {
- // Normalize separators without cleaning (to preserve ..)
- normalized := filepath.ToSlash(path)
-
- // Split into components
- components := strings.Split(normalized, "/")
-
- // Check each component for exact ".." match
- for _, component := range components {
- if component == ".." {
- return NewStructuredError(
- ErrorTypeValidation,
- CodeValidationPath,
- fmt.Sprintf("path traversal attempt detected in %s", context),
- path,
- map[string]interface{}{
- "original_path": path,
- },
- )
- }
- }
- return nil
-}
-
-// cleanAndResolveAbsPath cleans a path and resolves it to an absolute path.
-func cleanAndResolveAbsPath(path, context string) (string, error) {
- cleaned := filepath.Clean(path)
- abs, err := filepath.Abs(cleaned)
- if err != nil {
- return "", NewStructuredError(
- ErrorTypeFileSystem,
- CodeFSPathResolution,
- fmt.Sprintf("cannot resolve %s", context),
- path,
- map[string]interface{}{
- "error": err.Error(),
- },
- )
- }
- return abs, nil
-}
-
-// evalSymlinksOrStructuredError wraps filepath.EvalSymlinks with structured error handling.
-func evalSymlinksOrStructuredError(path, context, original string) (string, error) {
- eval, err := filepath.EvalSymlinks(path)
- if err != nil {
- return "", NewStructuredError(
- ErrorTypeValidation,
- CodeValidationPath,
- fmt.Sprintf("cannot resolve symlinks for %s", context),
- original,
- map[string]interface{}{
- "resolved_path": path,
- "context": context,
- "error": err.Error(),
- },
- )
- }
- return eval, nil
-}
-
-// validateWorkingDirectoryBoundary checks if the given absolute path escapes the working directory.
-func validateWorkingDirectoryBoundary(abs, path string) error {
- cwd, err := os.Getwd()
- if err != nil {
- return NewStructuredError(
- ErrorTypeFileSystem,
- CodeFSPathResolution,
- "cannot get current working directory",
- path,
- map[string]interface{}{
- "error": err.Error(),
- },
- )
- }
-
- cwdAbs, err := filepath.Abs(cwd)
- if err != nil {
- return NewStructuredError(
- ErrorTypeFileSystem,
- CodeFSPathResolution,
- "cannot resolve current working directory",
- path,
- map[string]interface{}{
- "error": err.Error(),
- },
- )
- }
-
- absEval, err := evalSymlinksOrStructuredError(abs, "source path", path)
- if err != nil {
- return err
- }
- cwdEval, err := evalSymlinksOrStructuredError(cwdAbs, "working directory", path)
- if err != nil {
- return err
- }
-
- rel, err := filepath.Rel(cwdEval, absEval)
- if err != nil {
- return NewStructuredError(
- ErrorTypeValidation,
- CodeValidationPath,
- "cannot determine relative path",
- path,
- map[string]interface{}{
- "resolved_path": absEval,
- "working_dir": cwdEval,
- "error": err.Error(),
- },
- )
- }
-
- if rel == ".." || strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
- return NewStructuredError(
- ErrorTypeValidation,
- CodeValidationPath,
- "source path attempts to access directories outside current working directory",
- path,
- map[string]interface{}{
- "resolved_path": absEval,
- "working_dir": cwdEval,
- "relative_path": rel,
- },
- )
- }
-
- return nil
-}
-
-// ValidateSourcePath validates a source directory path for security.
-// It ensures the path exists, is a directory, and doesn't contain path traversal attempts.
-//
-//revive:disable-next-line:function-length
-func ValidateSourcePath(path string) error {
- if path == "" {
- return NewValidationError(CodeValidationRequired, "source path is required")
- }
-
- // Check for path traversal patterns before cleaning
- if err := checkPathTraversal(path, "source path"); err != nil {
- return err
- }
-
- // Clean and get absolute path
- abs, err := cleanAndResolveAbsPath(path, "source path")
- if err != nil {
- return err
- }
- cleaned := filepath.Clean(path)
-
- // Ensure the resolved path is within or below the current working directory for relative paths
- if !filepath.IsAbs(path) {
- if err := validateWorkingDirectoryBoundary(abs, path); err != nil {
- return err
- }
- }
-
- // Check if path exists and is a directory
- info, err := os.Stat(cleaned)
- if err != nil {
- if os.IsNotExist(err) {
- return NewFileSystemError(CodeFSNotFound, "source directory does not exist").WithFilePath(path)
- }
- return NewStructuredError(
- ErrorTypeFileSystem,
- CodeFSAccess,
- "cannot access source directory",
- path,
- map[string]interface{}{
- "error": err.Error(),
- },
- )
- }
-
- if !info.IsDir() {
- return NewStructuredError(
- ErrorTypeValidation,
- CodeValidationPath,
- "source path must be a directory",
- path,
- map[string]interface{}{
- "is_file": true,
- },
- )
- }
-
- return nil
-}
-
-// ValidateDestinationPath validates a destination file path for security.
-// It ensures the path doesn't contain path traversal attempts and the parent directory exists.
-func ValidateDestinationPath(path string) error {
- if path == "" {
- return NewValidationError(CodeValidationRequired, "destination path is required")
- }
-
- // Check for path traversal patterns before cleaning
- if err := checkPathTraversal(path, "destination path"); err != nil {
- return err
- }
-
- // Get absolute path to ensure it's not trying to escape current working directory
- abs, err := cleanAndResolveAbsPath(path, "destination path")
- if err != nil {
- return err
- }
-
- // Ensure the destination is not a directory
- if info, err := os.Stat(abs); err == nil && info.IsDir() {
- return NewStructuredError(
- ErrorTypeValidation,
- CodeValidationPath,
- "destination cannot be a directory",
- path,
- map[string]interface{}{
- "is_directory": true,
- },
- )
- }
-
- // Check if parent directory exists and is writable
- parentDir := filepath.Dir(abs)
- if parentInfo, err := os.Stat(parentDir); err != nil {
- if os.IsNotExist(err) {
- return NewStructuredError(
- ErrorTypeFileSystem,
- CodeFSNotFound,
- "destination parent directory does not exist",
- path,
- map[string]interface{}{
- "parent_dir": parentDir,
- },
- )
- }
- return NewStructuredError(
- ErrorTypeFileSystem,
- CodeFSAccess,
- "cannot access destination parent directory",
- path,
- map[string]interface{}{
- "parent_dir": parentDir,
- "error": err.Error(),
- },
- )
- } else if !parentInfo.IsDir() {
- return NewStructuredError(
- ErrorTypeValidation,
- CodeValidationPath,
- "destination parent is not a directory",
- path,
- map[string]interface{}{
- "parent_dir": parentDir,
- },
- )
- }
-
- return nil
-}
-
-// ValidateConfigPath validates a configuration file path for security.
-// It ensures the path doesn't contain path traversal attempts.
-func ValidateConfigPath(path string) error {
- if path == "" {
- return nil // Empty path is allowed for config
- }
-
- // Check for path traversal patterns before cleaning
- return checkPathTraversal(path, "config path")
-}
diff --git a/gibidiutils/paths_additional_test.go b/gibidiutils/paths_additional_test.go
deleted file mode 100644
index 9799310..0000000
--- a/gibidiutils/paths_additional_test.go
+++ /dev/null
@@ -1,368 +0,0 @@
-package gibidiutils
-
-import (
- "errors"
- "os"
- "path/filepath"
- "testing"
-
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-)
-
-func TestGetBaseName(t *testing.T) {
- tests := []struct {
- name string
- absPath string
- expected string
- }{
- {
- name: "normal path",
- absPath: "/home/user/project",
- expected: "project",
- },
- {
- name: "path with trailing slash",
- absPath: "/home/user/project/",
- expected: "project",
- },
- {
- name: "root path",
- absPath: "/",
- expected: "/",
- },
- {
- name: "current directory",
- absPath: ".",
- expected: "output",
- },
- {
- name: testEmptyPath,
- absPath: "",
- expected: "output",
- },
- {
- name: "file path",
- absPath: "/home/user/file.txt",
- expected: "file.txt",
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- result := GetBaseName(tt.absPath)
- assert.Equal(t, tt.expected, result)
- })
- }
-}
-
-func TestValidateSourcePath(t *testing.T) {
- // Create a temp directory for testing
- tempDir := t.TempDir()
- tempFile := filepath.Join(tempDir, "test.txt")
- require.NoError(t, os.WriteFile(tempFile, []byte("test"), 0o600))
-
- tests := []struct {
- name string
- path string
- expectedError string
- }{
- {
- name: testEmptyPath,
- path: "",
- expectedError: "source path is required",
- },
- {
- name: testPathTraversalAttempt,
- path: "../../../etc/passwd",
- expectedError: testPathTraversalDetected,
- },
- {
- name: "path with double dots",
- path: "/home/../etc/passwd",
- expectedError: testPathTraversalDetected,
- },
- {
- name: "non-existent path",
- path: "/definitely/does/not/exist",
- expectedError: "does not exist",
- },
- {
- name: "file instead of directory",
- path: tempFile,
- expectedError: "must be a directory",
- },
- {
- name: "valid directory",
- path: tempDir,
- expectedError: "",
- },
- {
- name: "valid relative path",
- path: ".",
- expectedError: "",
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- err := ValidateSourcePath(tt.path)
-
- if tt.expectedError != "" {
- assert.Error(t, err)
- assert.Contains(t, err.Error(), tt.expectedError)
-
- // Check if it's a StructuredError
- var structErr *StructuredError
- if errors.As(err, &structErr) {
- assert.NotEmpty(t, structErr.Code)
- assert.NotEqual(t, ErrorTypeUnknown, structErr.Type)
- }
- } else {
- assert.NoError(t, err)
- }
- })
- }
-}
-
-func TestValidateDestinationPath(t *testing.T) {
- tempDir := t.TempDir()
-
- tests := []struct {
- name string
- path string
- expectedError string
- }{
- {
- name: testEmptyPath,
- path: "",
- expectedError: "destination path is required",
- },
- {
- name: testPathTraversalAttempt,
- path: "../../etc/passwd",
- expectedError: testPathTraversalDetected,
- },
- {
- name: "absolute path traversal",
- path: "/home/../../../etc/passwd",
- expectedError: testPathTraversalDetected,
- },
- {
- name: "valid new file",
- path: filepath.Join(tempDir, "newfile.txt"),
- expectedError: "",
- },
- {
- name: "valid relative path",
- path: "output.txt",
- expectedError: "",
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- err := ValidateDestinationPath(tt.path)
-
- if tt.expectedError != "" {
- assert.Error(t, err)
- assert.Contains(t, err.Error(), tt.expectedError)
- } else {
- assert.NoError(t, err)
- }
- })
- }
-}
-
-func TestValidateConfigPath(t *testing.T) {
- tempDir := t.TempDir()
- validConfig := filepath.Join(tempDir, "config.yaml")
- require.NoError(t, os.WriteFile(validConfig, []byte("key: value"), 0o600))
-
- tests := []struct {
- name string
- path string
- expectedError string
- }{
- {
- name: testEmptyPath,
- path: "",
- expectedError: "", // Empty config path is allowed
- },
- {
- name: testPathTraversalAttempt,
- path: "../../../etc/config.yaml",
- expectedError: testPathTraversalDetected,
- },
- // ValidateConfigPath doesn't check if file exists or is regular file
- // It only checks for path traversal
- {
- name: "valid config file",
- path: validConfig,
- expectedError: "",
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- err := ValidateConfigPath(tt.path)
-
- if tt.expectedError != "" {
- assert.Error(t, err)
- assert.Contains(t, err.Error(), tt.expectedError)
- } else {
- assert.NoError(t, err)
- }
- })
- }
-}
-
-// TestGetAbsolutePath is already covered in paths_test.go
-
-func TestValidationErrorTypes(t *testing.T) {
- t.Run("source path validation errors", func(t *testing.T) {
- // Test empty source
- err := ValidateSourcePath("")
- assert.Error(t, err)
- var structErrEmptyPath *StructuredError
- if errors.As(err, &structErrEmptyPath) {
- assert.Equal(t, ErrorTypeValidation, structErrEmptyPath.Type)
- assert.Equal(t, CodeValidationRequired, structErrEmptyPath.Code)
- }
-
- // Test path traversal
- err = ValidateSourcePath("../../../etc")
- assert.Error(t, err)
- var structErrTraversal *StructuredError
- if errors.As(err, &structErrTraversal) {
- assert.Equal(t, ErrorTypeValidation, structErrTraversal.Type)
- assert.Equal(t, CodeValidationPath, structErrTraversal.Code)
- }
- })
-
- t.Run("destination path validation errors", func(t *testing.T) {
- // Test empty destination
- err := ValidateDestinationPath("")
- assert.Error(t, err)
- var structErrEmptyDest *StructuredError
- if errors.As(err, &structErrEmptyDest) {
- assert.Equal(t, ErrorTypeValidation, structErrEmptyDest.Type)
- assert.Equal(t, CodeValidationRequired, structErrEmptyDest.Code)
- }
- })
-
- t.Run("config path validation errors", func(t *testing.T) {
- // Test path traversal in config
- err := ValidateConfigPath("../../etc/config.yaml")
- assert.Error(t, err)
- var structErrTraversalInConfig *StructuredError
- if errors.As(err, &structErrTraversalInConfig) {
- assert.Equal(t, ErrorTypeValidation, structErrTraversalInConfig.Type)
- assert.Equal(t, CodeValidationPath, structErrTraversalInConfig.Code)
- }
- })
-}
-
-func TestPathSecurityChecks(t *testing.T) {
- // Test various path traversal attempts
- traversalPaths := []string{
- "../etc/passwd",
- "../../root/.ssh/id_rsa",
- "/home/../../../etc/shadow",
- "./../../sensitive/data",
- "foo/../../../bar",
- }
-
- for _, path := range traversalPaths {
- t.Run("source_"+path, func(t *testing.T) {
- err := ValidateSourcePath(path)
- assert.Error(t, err)
- assert.Contains(t, err.Error(), testPathTraversal)
- })
-
- t.Run("dest_"+path, func(t *testing.T) {
- err := ValidateDestinationPath(path)
- assert.Error(t, err)
- assert.Contains(t, err.Error(), testPathTraversal)
- })
-
- t.Run("config_"+path, func(t *testing.T) {
- err := ValidateConfigPath(path)
- assert.Error(t, err)
- assert.Contains(t, err.Error(), testPathTraversal)
- })
- }
-}
-
-func TestSpecialPaths(t *testing.T) {
- t.Run("GetBaseName with special paths", func(t *testing.T) {
- specialPaths := map[string]string{
- "/": "/",
- "": "output",
- ".": "output",
- "..": "..",
- "/.": "output", // filepath.Base("/.") returns "." which matches the output condition
- "/..": "..",
- "//": "/",
- "///": "/",
- }
-
- for path, expected := range specialPaths {
- result := GetBaseName(path)
- assert.Equal(t, expected, result, "Path: %s", path)
- }
- })
-}
-
-func TestPathNormalization(t *testing.T) {
- tempDir := t.TempDir()
-
- t.Run("source path normalization", func(t *testing.T) {
- // Create nested directory
- nestedDir := filepath.Join(tempDir, "a", "b", "c")
- require.NoError(t, os.MkdirAll(nestedDir, 0o750))
-
- // Test path with redundant separators
- redundantPath := tempDir + string(
- os.PathSeparator,
- ) + string(
- os.PathSeparator,
- ) + "a" + string(
- os.PathSeparator,
- ) + "b" + string(
- os.PathSeparator,
- ) + "c"
- err := ValidateSourcePath(redundantPath)
- assert.NoError(t, err)
- })
-}
-
-func TestPathValidationConcurrency(t *testing.T) {
- tempDir := t.TempDir()
-
- // Test concurrent path validation
- paths := []string{
- tempDir,
- ".",
- "/tmp",
- }
-
- errChan := make(chan error, len(paths)*2)
-
- for _, path := range paths {
- go func(p string) {
- errChan <- ValidateSourcePath(p)
- }(path)
-
- go func(p string) {
- errChan <- ValidateDestinationPath(p + "/output.txt")
- }(path)
- }
-
- // Collect results
- for i := 0; i < len(paths)*2; i++ {
- <-errChan
- }
-
- // No assertions needed - test passes if no panic/race
-}
diff --git a/gibidiutils/paths_test.go b/gibidiutils/paths_test.go
deleted file mode 100644
index 9bc7843..0000000
--- a/gibidiutils/paths_test.go
+++ /dev/null
@@ -1,264 +0,0 @@
-// Package gibidiutils provides common utility functions for gibidify.
-package gibidiutils
-
-import (
- "os"
- "path/filepath"
- "runtime"
- "strings"
- "testing"
-)
-
-func TestGetAbsolutePath(t *testing.T) {
- // Get current working directory for tests
- cwd, err := os.Getwd()
- if err != nil {
- t.Fatalf("Failed to get current directory: %v", err)
- }
-
- tests := []struct {
- name string
- path string
- wantPrefix string
- wantErr bool
- wantErrMsg string
- skipWindows bool
- }{
- {
- name: "absolute path unchanged",
- path: cwd,
- wantPrefix: cwd,
- wantErr: false,
- },
- {
- name: "relative path current directory",
- path: ".",
- wantPrefix: cwd,
- wantErr: false,
- },
- {
- name: "relative path parent directory",
- path: "..",
- wantPrefix: filepath.Dir(cwd),
- wantErr: false,
- },
- {
- name: "relative path with file",
- path: "test.txt",
- wantPrefix: filepath.Join(cwd, "test.txt"),
- wantErr: false,
- },
- {
- name: "relative path with subdirectory",
- path: "subdir/file.go",
- wantPrefix: filepath.Join(cwd, "subdir", "file.go"),
- wantErr: false,
- },
- {
- name: "empty path",
- path: "",
- wantPrefix: cwd,
- wantErr: false,
- },
- {
- name: "path with tilde",
- path: "~/test",
- wantPrefix: filepath.Join(cwd, "~", "test"),
- wantErr: false,
- skipWindows: false,
- },
- {
- name: "path with multiple separators",
- path: "path//to///file",
- wantPrefix: filepath.Join(cwd, "path", "to", "file"),
- wantErr: false,
- },
- {
- name: "path with trailing separator",
- path: "path/",
- wantPrefix: filepath.Join(cwd, "path"),
- wantErr: false,
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- if tt.skipWindows && runtime.GOOS == "windows" {
- t.Skip("Skipping test on Windows")
- }
-
- got, err := GetAbsolutePath(tt.path)
-
- if tt.wantErr {
- if err == nil {
- t.Errorf("GetAbsolutePath() error = nil, wantErr %v", tt.wantErr)
- return
- }
- if tt.wantErrMsg != "" && !strings.Contains(err.Error(), tt.wantErrMsg) {
- t.Errorf("GetAbsolutePath() error = %v, want error containing %v", err, tt.wantErrMsg)
- }
- return
- }
-
- if err != nil {
- t.Errorf("GetAbsolutePath() unexpected error = %v", err)
- return
- }
-
- // Clean the expected path for comparison
- wantClean := filepath.Clean(tt.wantPrefix)
- gotClean := filepath.Clean(got)
-
- if gotClean != wantClean {
- t.Errorf("GetAbsolutePath() = %v, want %v", gotClean, wantClean)
- }
-
- // Verify the result is actually absolute
- if !filepath.IsAbs(got) {
- t.Errorf("GetAbsolutePath() returned non-absolute path: %v", got)
- }
- })
- }
-}
-
-func TestGetAbsolutePathSpecialCases(t *testing.T) {
- if runtime.GOOS == "windows" {
- t.Skip("Skipping Unix-specific tests on Windows")
- }
-
- tests := []struct {
- name string
- setup func() (string, func())
- path string
- wantErr bool
- }{
- {
- name: "symlink to directory",
- setup: func() (string, func()) {
- tmpDir := t.TempDir()
- target := filepath.Join(tmpDir, "target")
- link := filepath.Join(tmpDir, "link")
-
- if err := os.Mkdir(target, 0o750); err != nil {
- t.Fatalf("Failed to create target directory: %v", err)
- }
- if err := os.Symlink(target, link); err != nil {
- t.Fatalf("Failed to create symlink: %v", err)
- }
-
- return link, func() {}
- },
- path: "",
- wantErr: false,
- },
- {
- name: "broken symlink",
- setup: func() (string, func()) {
- tmpDir := t.TempDir()
- link := filepath.Join(tmpDir, "broken_link")
-
- if err := os.Symlink("/nonexistent/path", link); err != nil {
- t.Fatalf("Failed to create broken symlink: %v", err)
- }
-
- return link, func() {}
- },
- path: "",
- wantErr: false, // filepath.Abs still works with broken symlinks
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- path, cleanup := tt.setup()
- defer cleanup()
-
- if tt.path == "" {
- tt.path = path
- }
-
- got, err := GetAbsolutePath(tt.path)
- if (err != nil) != tt.wantErr {
- t.Errorf("GetAbsolutePath() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
-
- if err == nil && !filepath.IsAbs(got) {
- t.Errorf("GetAbsolutePath() returned non-absolute path: %v", got)
- }
- })
- }
-}
-
-// TestGetAbsolutePathConcurrency verifies that GetAbsolutePath is safe for concurrent use.
-// The test intentionally does not use assertions - it will panic if there's a race condition.
-// Run with -race flag to detect concurrent access issues.
-func TestGetAbsolutePathConcurrency(_ *testing.T) {
- // Test that GetAbsolutePath is safe for concurrent use
- paths := []string{".", "..", "test.go", "subdir/file.txt", "/tmp/test"}
- done := make(chan bool)
-
- for _, p := range paths {
- go func(path string) {
- _, _ = GetAbsolutePath(path)
- done <- true
- }(p)
- }
-
- // Wait for all goroutines to complete
- for range paths {
- <-done
- }
-}
-
-func TestGetAbsolutePathErrorFormatting(t *testing.T) {
- // This test verifies error message formatting
- // We need to trigger an actual error from filepath.Abs
- // On Unix systems, we can't easily trigger filepath.Abs errors
- // so we'll just verify the error wrapping works correctly
-
- // Create a test that would fail if filepath.Abs returns an error
- path := "test/path"
- got, err := GetAbsolutePath(path)
- if err != nil {
- // If we somehow get an error, verify it's properly formatted
- if !strings.Contains(err.Error(), "failed to get absolute path for") {
- t.Errorf("Error message format incorrect: %v", err)
- }
- if !strings.Contains(err.Error(), path) {
- t.Errorf("Error message should contain original path: %v", err)
- }
- } else if !filepath.IsAbs(got) {
- // Normal case - just verify we got a valid absolute path
- t.Errorf("Expected absolute path, got: %v", got)
- }
-}
-
-// BenchmarkGetAbsolutePath benchmarks the GetAbsolutePath function
-func BenchmarkGetAbsolutePath(b *testing.B) {
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _, _ = GetAbsolutePath("test/path/file.go")
- }
-}
-
-// BenchmarkGetAbsolutePathAbs benchmarks with already absolute path
-func BenchmarkGetAbsolutePathAbs(b *testing.B) {
- absPath := "/home/user/test/file.go"
- if runtime.GOOS == "windows" {
- absPath = "C:\\Users\\test\\file.go"
- }
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _, _ = GetAbsolutePath(absPath)
- }
-}
-
-// BenchmarkGetAbsolutePathCurrent benchmarks with current directory
-func BenchmarkGetAbsolutePathCurrent(b *testing.B) {
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _, _ = GetAbsolutePath(".")
- }
-}
diff --git a/gibidiutils/test_constants.go b/gibidiutils/test_constants.go
deleted file mode 100644
index 3b16302..0000000
--- a/gibidiutils/test_constants.go
+++ /dev/null
@@ -1,18 +0,0 @@
-package gibidiutils
-
-// Test constants to avoid duplication in test files.
-// These constants are used across multiple test files in the gibidiutils package.
-const (
- // Error messages
-
- testErrFileNotFound = "file not found"
- testErrWriteFailed = "write failed"
- testErrInvalidFormat = "invalid format"
-
- // Path validation messages
-
- testEmptyPath = "empty path"
- testPathTraversal = "path traversal"
- testPathTraversalAttempt = "path traversal attempt"
- testPathTraversalDetected = "path traversal attempt detected"
-)
diff --git a/gibidiutils/writers_test.go b/gibidiutils/writers_test.go
deleted file mode 100644
index 36724ee..0000000
--- a/gibidiutils/writers_test.go
+++ /dev/null
@@ -1,111 +0,0 @@
-// Package gibidiutils provides common utility functions for gibidify.
-package gibidiutils
-
-import (
- "math"
- "testing"
-)
-
-func TestSafeUint64ToInt64WithDefault(t *testing.T) {
- tests := []struct {
- name string
- value uint64
- defaultValue int64
- want int64
- }{
- {
- name: "normal value within range",
- value: 1000,
- defaultValue: 0,
- want: 1000,
- },
- {
- name: "zero value",
- value: 0,
- defaultValue: 0,
- want: 0,
- },
- {
- name: "max int64 exactly",
- value: math.MaxInt64,
- defaultValue: 0,
- want: math.MaxInt64,
- },
- {
- name: "overflow with zero default clamps to max",
- value: math.MaxInt64 + 1,
- defaultValue: 0,
- want: math.MaxInt64,
- },
- {
- name: "large overflow with zero default clamps to max",
- value: math.MaxUint64,
- defaultValue: 0,
- want: math.MaxInt64,
- },
- {
- name: "overflow with custom default returns custom",
- value: math.MaxInt64 + 1,
- defaultValue: -1,
- want: -1,
- },
- {
- name: "overflow with custom positive default",
- value: math.MaxUint64,
- defaultValue: 12345,
- want: 12345,
- },
- {
- name: "large value within range",
- value: uint64(math.MaxInt64 - 1000),
- defaultValue: 0,
- want: math.MaxInt64 - 1000,
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- got := SafeUint64ToInt64WithDefault(tt.value, tt.defaultValue)
- if got != tt.want {
- t.Errorf("SafeUint64ToInt64WithDefault(%d, %d) = %d, want %d",
- tt.value, tt.defaultValue, got, tt.want)
- }
- })
- }
-}
-
-func TestSafeUint64ToInt64WithDefaultGuardrailsBehavior(t *testing.T) {
- // Test that overflow with default=0 returns MaxInt64, not 0
- // This is critical for back-pressure and resource monitors
- result := SafeUint64ToInt64WithDefault(math.MaxUint64, 0)
- if result == 0 {
- t.Error("Overflow with default=0 returned 0, which would disable guardrails")
- }
- if result != math.MaxInt64 {
- t.Errorf("Overflow with default=0 should clamp to MaxInt64, got %d", result)
- }
-}
-
-// BenchmarkSafeUint64ToInt64WithDefault benchmarks the conversion function
-func BenchmarkSafeUint64ToInt64WithDefault(b *testing.B) {
- b.Run("normal_value", func(b *testing.B) {
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _ = SafeUint64ToInt64WithDefault(1000, 0)
- }
- })
-
- b.Run("overflow_zero_default", func(b *testing.B) {
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _ = SafeUint64ToInt64WithDefault(math.MaxUint64, 0)
- }
- })
-
- b.Run("overflow_custom_default", func(b *testing.B) {
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _ = SafeUint64ToInt64WithDefault(math.MaxUint64, -1)
- }
- })
-}
diff --git a/go.mod b/go.mod
index 7955d8d..c6c8c28 100644
--- a/go.mod
+++ b/go.mod
@@ -8,19 +8,17 @@ require (
github.com/schollz/progressbar/v3 v3.18.0
github.com/sirupsen/logrus v1.9.3
github.com/spf13/viper v1.21.0
- github.com/stretchr/testify v1.11.1
+ golang.org/x/text v0.32.0
gopkg.in/yaml.v3 v3.0.1
)
require (
- github.com/davecgh/go-spew v1.1.1 // indirect
github.com/fsnotify/fsnotify v1.9.0 // indirect
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
github.com/mattn/go-colorable v0.1.14 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
- github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/sagikazarmark/locafero v0.12.0 // indirect
github.com/spf13/afero v1.15.0 // indirect
@@ -28,8 +26,7 @@ require (
github.com/spf13/pflag v1.0.10 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
- golang.org/x/sys v0.38.0 // indirect
- golang.org/x/term v0.37.0 // indirect
- golang.org/x/text v0.31.0 // indirect
+ golang.org/x/sys v0.39.0 // indirect
+ golang.org/x/term v0.38.0 // indirect
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
)
diff --git a/go.sum b/go.sum
index 63d062e..a53b765 100644
--- a/go.sum
+++ b/go.sum
@@ -60,12 +60,12 @@ go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
-golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
-golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
-golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
-golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
-golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
+golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
+golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
+golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q=
+golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg=
+golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
+golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
diff --git a/interfaces.go b/interfaces.go
new file mode 100644
index 0000000..3936936
--- /dev/null
+++ b/interfaces.go
@@ -0,0 +1,217 @@
+// Package main provides core interfaces for the gibidify application.
+package main
+
+import (
+ "context"
+ "io"
+
+ "github.com/ivuorinen/gibidify/shared"
+)
+
+// Processor defines the interface for file processors.
+// This interface allows for easier testing and mocking of the main processing logic.
+type Processor interface {
+ // Process starts the file processing workflow with the given context.
+ // It returns an error if processing fails at any stage.
+ Process(ctx context.Context) error
+}
+
+// FileProcessorInterface defines the interface for individual file processing.
+// This abstracts the file processing logic for better testability.
+type FileProcessorInterface interface {
+ // ProcessFile processes a single file and sends the result to the output channel.
+ ProcessFile(ctx context.Context, filePath string, outCh chan<- WriteRequest)
+
+ // ProcessWithContext processes a file and returns the content directly.
+ ProcessWithContext(ctx context.Context, filePath string) (string, error)
+}
+
+// ResourceMonitorInterface defines the interface for resource monitoring.
+// This allows for mocking and testing of resource management functionality.
+type ResourceMonitorInterface interface {
+ // Start begins resource monitoring.
+ Start() error
+
+ // Stop stops resource monitoring and cleanup.
+ Stop() error
+
+ // CheckResourceLimits validates current resource usage against limits.
+ CheckResourceLimits() error
+
+ // Metrics returns current resource usage metrics.
+ Metrics() ResourceMetrics
+}
+
+// MetricsCollectorInterface defines the interface for metrics collection.
+// This enables easier testing and different metrics backend implementations.
+type MetricsCollectorInterface interface {
+ // RecordFileProcessed records the processing of a single file.
+ RecordFileProcessed(result FileProcessingResult)
+
+ // IncrementConcurrency increments the current concurrency counter.
+ IncrementConcurrency()
+
+ // DecrementConcurrency decrements the current concurrency counter.
+ DecrementConcurrency()
+
+ // CurrentMetrics returns the current processing metrics.
+ CurrentMetrics() ProcessingMetrics
+
+ // GenerateReport generates a comprehensive processing report.
+ GenerateReport() ProfileReport
+
+ // Reset resets all metrics to initial state.
+ Reset()
+}
+
+// UIManagerInterface defines the interface for user interface management.
+// This abstracts UI operations for better testing and different UI implementations.
+type UIManagerInterface interface {
+ // PrintInfo prints an informational message.
+ PrintInfo(message string)
+
+ // PrintWarning prints a warning message.
+ PrintWarning(message string)
+
+ // PrintError prints an error message.
+ PrintError(message string)
+
+ // PrintSuccess prints a success message.
+ PrintSuccess(message string)
+
+ // SetColorOutput enables or disables colored output.
+ SetColorOutput(enabled bool)
+
+ // SetProgressOutput enables or disables progress indicators.
+ SetProgressOutput(enabled bool)
+}
+
+// WriterInterface defines the interface for output writers.
+// This allows for different output formats and destinations.
+type WriterInterface interface {
+ // Write writes the processed content to the destination.
+ Write(req WriteRequest) error
+
+ // Close finalizes the output and closes any resources.
+ Close() error
+
+ // GetFormat returns the output format supported by this writer.
+ GetFormat() string
+}
+
+// BackpressureManagerInterface defines the interface for backpressure management.
+// This abstracts memory and flow control for better testing.
+type BackpressureManagerInterface interface {
+ // CheckBackpressure returns true if backpressure should be applied.
+ CheckBackpressure() bool
+
+ // UpdateMemoryUsage updates the current memory usage tracking.
+ UpdateMemoryUsage(bytes int64)
+
+ // GetMemoryUsage returns current memory usage statistics.
+ GetMemoryUsage() int64
+
+ // Reset resets backpressure state to initial values.
+ Reset()
+}
+
+// TemplateEngineInterface defines the interface for template processing.
+// This allows for different templating systems and easier testing.
+type TemplateEngineInterface interface {
+ // RenderHeader renders the document header using the configured template.
+ RenderHeader(ctx TemplateContext) (string, error)
+
+ // RenderFooter renders the document footer using the configured template.
+ RenderFooter(ctx TemplateContext) (string, error)
+
+ // RenderFileContent renders individual file content with formatting.
+ RenderFileContent(ctx FileContext) (string, error)
+
+ // RenderMetadata renders metadata section if enabled.
+ RenderMetadata(ctx TemplateContext) (string, error)
+}
+
+// ConfigLoaderInterface defines the interface for configuration management.
+// This enables different configuration sources and easier testing.
+type ConfigLoaderInterface interface {
+ // LoadConfig loads configuration from the appropriate source.
+ LoadConfig() error
+
+ // GetString returns a string configuration value.
+ GetString(key string) string
+
+ // GetInt returns an integer configuration value.
+ GetInt(key string) int
+
+ // GetBool returns a boolean configuration value.
+ GetBool(key string) bool
+
+ // GetStringSlice returns a string slice configuration value.
+ GetStringSlice(key string) []string
+}
+
+// LoggerInterface defines the interface for logging operations.
+// This abstracts logging for better testing and different log backends.
+type LoggerInterface = shared.Logger
+
+// These types are referenced by the interfaces but need to be defined
+// elsewhere in the codebase. They are included here for documentation.
+
+type WriteRequest struct {
+ Path string
+ Content string
+ IsStream bool
+ Reader io.Reader
+ Size int64
+}
+
+type ResourceMetrics struct {
+ FilesProcessed int64
+ TotalSizeProcessed int64
+ ConcurrentReads int64
+ MaxConcurrentReads int64
+}
+
+type FileProcessingResult struct {
+ FilePath string
+ FileSize int64
+ Format string
+ Success bool
+ Error error
+ Skipped bool
+ SkipReason string
+}
+
+type ProcessingMetrics struct {
+ TotalFiles int64
+ ProcessedFiles int64
+ ErrorFiles int64
+ SkippedFiles int64
+ TotalSize int64
+ ProcessedSize int64
+}
+
+type ProfileReport struct {
+ Summary ProcessingMetrics
+ // Additional report fields would be defined in the metrics package
+}
+
+type TemplateContext struct {
+ Files []FileContext
+ // Additional context fields would be defined in the templates package
+}
+
+type FileContext struct {
+ Path string
+ Content string
+ // Additional file context fields would be defined in the templates package
+}
+
+type LogLevel int
+
+const (
+ LogLevelDebug LogLevel = iota
+ LogLevelInfo
+ LogLevelWarn
+ LogLevelError
+)
diff --git a/main.go b/main.go
index 62217b9..110029e 100644
--- a/main.go
+++ b/main.go
@@ -4,12 +4,12 @@ package main
import (
"context"
+ "fmt"
"os"
- "github.com/sirupsen/logrus"
-
"github.com/ivuorinen/gibidify/cli"
"github.com/ivuorinen/gibidify/config"
+ "github.com/ivuorinen/gibidify/shared"
)
func main() {
@@ -24,8 +24,9 @@ func main() {
errorFormatter.FormatError(err)
os.Exit(1)
}
- // System errors still go to logrus for debugging
- logrus.Errorf("System error: %v", err)
+ // System errors still go to logger for debugging
+ logger := shared.GetLogger()
+ logger.Errorf("System error: %v", err)
ui.PrintError("An unexpected error occurred. Please check the logs.")
os.Exit(2)
}
@@ -36,13 +37,22 @@ func run(ctx context.Context) error {
// Parse CLI flags
flags, err := cli.ParseFlags()
if err != nil {
- return err
+ return fmt.Errorf("parsing flags: %w", err)
}
+ // Initialize logger with provided log level
+ logger := shared.GetLogger()
+ logger.SetLevel(shared.ParseLogLevel(flags.LogLevel))
+
// Load configuration
config.LoadConfig()
// Create and run processor
processor := cli.NewProcessor(flags)
- return processor.Process(ctx)
+
+ if err := processor.Process(ctx); err != nil {
+ return fmt.Errorf("processing: %w", err)
+ }
+
+ return nil
}
diff --git a/main_coverage_test.go b/main_coverage_test.go
new file mode 100644
index 0000000..ea0b495
--- /dev/null
+++ b/main_coverage_test.go
@@ -0,0 +1,61 @@
+package main
+
+import (
+ "errors"
+ "testing"
+
+ "github.com/ivuorinen/gibidify/cli"
+)
+
+// TestMainFunctionComponents tests the components used by main() function.
+// Since main() calls os.Exit, we can't test it directly, but we can test
+// the components it uses to increase coverage metrics.
+func TestMainFunctionComponents(t *testing.T) {
+ // Test UI manager creation (used in main())
+ ui := cli.NewUIManager()
+ if ui == nil {
+ t.Error("Expected NewUIManager to return non-nil UIManager")
+ }
+
+ // Test error formatter creation (used in main())
+ errorFormatter := cli.NewErrorFormatter(ui)
+ if errorFormatter == nil {
+ t.Error("Expected NewErrorFormatter to return non-nil ErrorFormatter")
+ }
+}
+
+// TestUserErrorClassification tests the error classification used in main().
+func TestUserErrorClassification(t *testing.T) {
+ // Test the cli.IsUserError function that main() uses for error classification
+
+ // Create a user error (MissingSourceError is a user error)
+ userErr := &cli.MissingSourceError{}
+ if !cli.IsUserError(userErr) {
+ t.Error("Expected cli.IsUserError to return true for MissingSourceError")
+ }
+
+ // Test with a system error (generic error)
+ systemErr := errors.New("test system error")
+ if cli.IsUserError(systemErr) {
+ t.Error("Expected cli.IsUserError to return false for generic error")
+ }
+
+ // Test with nil error
+ if cli.IsUserError(nil) {
+ t.Error("Expected cli.IsUserError to return false for nil error")
+ }
+}
+
+// TestMainPackageExports verifies main package exports are accessible.
+func TestMainPackageExports(t *testing.T) {
+ // The main package exports the run() function for testing
+ // Let's verify it's accessible and has the expected signature
+
+ // This is mainly for documentation and coverage tracking
+ // The actual testing of run() is done in other test files
+
+ t.Log("main package exports verified:")
+ t.Log("- run(context.Context) error function is accessible for testing")
+ t.Log("- main() function follows standard Go main conventions")
+ t.Log("- Package structure supports both execution and testing")
+}
diff --git a/main_run_test.go b/main_run_test.go
new file mode 100644
index 0000000..d95b85b
--- /dev/null
+++ b/main_run_test.go
@@ -0,0 +1,264 @@
+package main
+
+import (
+ "context"
+ "flag"
+ "os"
+ "testing"
+
+ "github.com/ivuorinen/gibidify/cli"
+ "github.com/ivuorinen/gibidify/shared"
+ "github.com/ivuorinen/gibidify/testutil"
+)
+
+// TestRunErrorPaths tests various error paths in the run() function.
+func TestRunErrorPaths(t *testing.T) {
+ tests := []struct {
+ name string
+ setup func(t *testing.T)
+ expectError bool
+ errorSubstr string
+ }{
+ {
+ name: "Invalid flags - missing source",
+ setup: func(_ *testing.T) {
+ // Reset flags and set invalid args
+ cli.ResetFlags()
+ flag.CommandLine = flag.NewFlagSet(os.Args[0], flag.ExitOnError)
+ // Set args with missing source
+ os.Args = []string{
+ "gibidify", shared.TestCLIFlagDestination, shared.TestOutputMD, shared.TestCLIFlagNoUI,
+ }
+ },
+ expectError: true,
+ errorSubstr: "parsing flags",
+ },
+ {
+ name: "Invalid flags - invalid format",
+ setup: func(t *testing.T) {
+ // Reset flags and set invalid args
+ cli.ResetFlags()
+ flag.CommandLine = flag.NewFlagSet(os.Args[0], flag.ExitOnError)
+ srcDir := t.TempDir()
+ outFile, outPath := testutil.CreateTempOutputFile(t, shared.TestOutputMD)
+ testutil.CloseFile(t, outFile)
+ // Set args with invalid format
+ os.Args = []string{
+ "gibidify", shared.TestCLIFlagSource, srcDir, shared.TestCLIFlagDestination, outPath,
+ shared.TestCLIFlagFormat, "invalid", shared.TestCLIFlagNoUI,
+ }
+ },
+ expectError: true,
+ errorSubstr: shared.TestOpParsingFlags,
+ },
+ {
+ name: "Invalid source directory",
+ setup: func(t *testing.T) {
+ // Reset flags and set args with non-existent source
+ cli.ResetFlags()
+ flag.CommandLine = flag.NewFlagSet(os.Args[0], flag.ExitOnError)
+ outFile, outPath := testutil.CreateTempOutputFile(t, shared.TestOutputMD)
+ testutil.CloseFile(t, outFile)
+ os.Args = []string{
+ "gibidify", shared.TestCLIFlagSource, "/nonexistent/directory",
+ shared.TestCLIFlagDestination, outPath, shared.TestCLIFlagNoUI,
+ }
+ },
+ expectError: true,
+ errorSubstr: shared.TestOpParsingFlags, // Flag validation catches this, not processing
+ },
+ {
+ name: "Valid run with minimal setup",
+ setup: func(t *testing.T) {
+ // Reset flags
+ cli.ResetFlags()
+ flag.CommandLine = flag.NewFlagSet(os.Args[0], flag.ExitOnError)
+
+ // Create valid setup
+ srcDir := testutil.SetupTempDirWithStructure(t, []testutil.DirSpec{
+ {
+ Path: "",
+ Files: []testutil.FileSpec{
+ {Name: shared.TestFileTXT, Content: shared.TestContent},
+ },
+ },
+ })
+
+ outFile, outPath := testutil.CreateTempOutputFile(t, shared.TestOutputMD)
+ testutil.CloseFile(t, outFile)
+
+ os.Args = []string{
+ "gibidify", shared.TestCLIFlagSource, srcDir,
+ shared.TestCLIFlagDestination, outPath, shared.TestCLIFlagNoUI,
+ }
+ },
+ expectError: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ // Suppress all output for cleaner test output
+ restore := testutil.SuppressAllOutput(t)
+ defer restore()
+
+ // Setup test case
+ tt.setup(t)
+
+ // Run the function
+ ctx := context.Background()
+ err := run(ctx)
+
+ // Check expectations
+ if tt.expectError {
+ testutil.AssertExpectedError(t, err, "run() with error case")
+ if tt.errorSubstr != "" {
+ testutil.AssertErrorContains(t, err, tt.errorSubstr, "run() error content")
+ }
+ } else {
+ testutil.AssertNoError(t, err, "run() success case")
+ }
+ })
+ }
+}
+
+// TestRunFlagParsing tests the flag parsing path in run() function.
+func TestRunFlagParsing(t *testing.T) {
+ // Suppress logs for cleaner test output
+ restoreLogs := testutil.SuppressLogs(t)
+ defer restoreLogs()
+
+ // Save original args
+ oldArgs := os.Args
+ defer func() { os.Args = oldArgs }()
+
+ // Test with empty args (should use defaults)
+ t.Run("default args", func(t *testing.T) {
+ // Reset flags
+ cli.ResetFlags()
+ flag.CommandLine = flag.NewFlagSet(os.Args[0], flag.ExitOnError)
+
+ // Create minimal valid setup
+ srcDir := testutil.SetupTempDirWithStructure(t, []testutil.DirSpec{
+ {
+ Path: "",
+ Files: []testutil.FileSpec{
+ {Name: shared.TestFileTXT, Content: shared.TestContent},
+ },
+ },
+ })
+
+ outFile, outPath := testutil.CreateTempOutputFile(t, "test_output.json")
+ testutil.CloseFile(t, outFile)
+
+ // Set minimal required args
+ os.Args = []string{
+ "gibidify", shared.TestCLIFlagSource, srcDir,
+ shared.TestCLIFlagDestination, outPath, shared.TestCLIFlagNoUI,
+ }
+
+ // Run and verify it works with defaults
+ ctx := context.Background()
+ err := run(ctx)
+ testutil.AssertNoError(t, err, "run() with default flags")
+ })
+}
+
+// TestRunWithCanceledContext tests run() with pre-canceled context.
+func TestRunWithCanceledContext(t *testing.T) {
+ // Suppress logs for cleaner test output
+ restoreLogs := testutil.SuppressLogs(t)
+ defer restoreLogs()
+
+ // Save original args
+ oldArgs := os.Args
+ defer func() { os.Args = oldArgs }()
+
+ // Reset flags
+ cli.ResetFlags()
+ flag.CommandLine = flag.NewFlagSet(os.Args[0], flag.ExitOnError)
+
+ // Create valid setup
+ srcDir := testutil.SetupTempDirWithStructure(t, []testutil.DirSpec{
+ {
+ Path: "",
+ Files: []testutil.FileSpec{
+ {Name: shared.TestFileGo, Content: shared.LiteralPackageMain + "\nfunc main() {}"},
+ },
+ },
+ })
+
+ outFile, outPath := testutil.CreateTempOutputFile(t, shared.TestOutputMD)
+ testutil.CloseFile(t, outFile)
+
+ os.Args = []string{
+ "gibidify", shared.TestCLIFlagSource, srcDir,
+ shared.TestCLIFlagDestination, outPath, shared.TestCLIFlagNoUI,
+ }
+
+ // Create canceled context
+ ctx, cancel := context.WithCancel(context.Background())
+ cancel() // Cancel immediately
+
+ // Run with canceled context
+ err := run(ctx)
+
+ // Should get processing error due to canceled context
+ testutil.AssertExpectedError(t, err, "run() with canceled context")
+ testutil.AssertErrorContains(t, err, "processing", "run() canceled context error")
+}
+
+// TestRunLogLevel tests the log level setting in run().
+func TestRunLogLevel(t *testing.T) {
+ // Suppress logs for cleaner test output
+ restoreLogs := testutil.SuppressLogs(t)
+ defer restoreLogs()
+
+ // Save original args
+ oldArgs := os.Args
+ defer func() { os.Args = oldArgs }()
+
+ tests := []struct {
+ name string
+ logLevel string
+ }{
+ {"debug level", "debug"},
+ {"info level", "info"},
+ {"warn level", "warn"},
+ {"error level", "error"},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ // Reset flags
+ cli.ResetFlags()
+ flag.CommandLine = flag.NewFlagSet(os.Args[0], flag.ExitOnError)
+
+ // Create valid setup
+ srcDir := testutil.SetupTempDirWithStructure(t, []testutil.DirSpec{
+ {
+ Path: "",
+ Files: []testutil.FileSpec{
+ {Name: shared.TestFileTXT, Content: shared.TestContent},
+ },
+ },
+ })
+
+ outFile, outPath := testutil.CreateTempOutputFile(t, shared.TestOutputMD)
+ testutil.CloseFile(t, outFile)
+
+ // Set args with log level
+ os.Args = []string{
+ "gibidify", shared.TestCLIFlagSource, srcDir, shared.TestCLIFlagDestination, outPath,
+ "-log-level", tt.logLevel, shared.TestCLIFlagNoUI,
+ }
+
+ // Run
+ ctx := context.Background()
+ err := run(ctx)
+
+ // Should succeed
+ testutil.AssertNoError(t, err, "run() with log level "+tt.logLevel)
+ })
+ }
+}
diff --git a/main_simple_test.go b/main_simple_test.go
new file mode 100644
index 0000000..3fa974f
--- /dev/null
+++ b/main_simple_test.go
@@ -0,0 +1,372 @@
+package main
+
+import (
+ "context"
+ "errors"
+ "flag"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "github.com/ivuorinen/gibidify/cli"
+ "github.com/ivuorinen/gibidify/shared"
+ "github.com/ivuorinen/gibidify/testutil"
+)
+
+// withIsolatedFlags sets up isolated flag state for testing and returns a cleanup function.
+// This helper saves the original os.Args and flag.CommandLine, resets CLI flags,
+// and creates a fresh FlagSet to avoid conflicts between tests.
+func withIsolatedFlags(t *testing.T) func() {
+ t.Helper()
+
+ oldArgs := os.Args
+ oldFlag := flag.CommandLine
+
+ cli.ResetFlags()
+ flag.CommandLine = flag.NewFlagSet("test", flag.ContinueOnError)
+
+ return func() {
+ os.Args = oldArgs
+ flag.CommandLine = oldFlag
+ }
+}
+
+// TestRun_FlagParsingErrors tests error handling in flag parsing.
+func TestRunFlagParsingErrors(t *testing.T) {
+ // Test with isolated flag state to avoid conflicts with other tests
+ t.Run("invalid_flag", func(t *testing.T) {
+ cleanup := withIsolatedFlags(t)
+ defer cleanup()
+
+ os.Args = []string{"test", shared.TestCLIFlagNoUI, "value"}
+
+ err := run(context.Background())
+ if err == nil {
+ t.Fatal("Expected error from invalid flag")
+ }
+
+ if !strings.Contains(err.Error(), shared.TestOpParsingFlags) {
+ t.Errorf("Expected 'parsing flags' error, got: %v", err)
+ }
+ })
+
+ t.Run("invalid_format", func(t *testing.T) {
+ cleanup := withIsolatedFlags(t)
+ defer cleanup()
+
+ // Create temporary files for the test
+ srcDir := t.TempDir()
+ testutil.CreateTestFile(t, srcDir, shared.TestFileTXT, []byte("test"))
+
+ outFile, outPath := testutil.CreateTempOutputFile(t, shared.TestMD)
+ testutil.CloseFile(t, outFile)
+ defer func() {
+ if err := os.Remove(outPath); err != nil {
+ t.Logf(shared.TestMsgFailedToRemoveTempFile, err)
+ }
+ }()
+
+ os.Args = []string{
+ "test", shared.TestCLIFlagSource, srcDir, shared.TestCLIFlagDestination, outPath,
+ shared.TestCLIFlagFormat, "invalid", shared.TestCLIFlagNoUI,
+ }
+
+ err := run(context.Background())
+ if err == nil {
+ t.Fatal("Expected error from invalid format")
+ }
+
+ if !strings.Contains(err.Error(), shared.TestOpParsingFlags) {
+ t.Errorf("Expected 'parsing flags' error, got: %v", err)
+ }
+ })
+}
+
+// TestRun_ProcessingErrors tests processing-related error paths.
+func TestRunProcessingErrors(t *testing.T) {
+ t.Run("nonexistent_source", func(t *testing.T) {
+ cleanup := withIsolatedFlags(t)
+ defer cleanup()
+
+ outFile, outPath := testutil.CreateTempOutputFile(t, shared.TestMD)
+ testutil.CloseFile(t, outFile)
+ defer func() {
+ if err := os.Remove(outPath); err != nil {
+ t.Logf(shared.TestMsgFailedToRemoveTempFile, err)
+ }
+ }()
+
+ // Use a path that doesn't exist (subpath under temp dir that was never created)
+ nonExistentDir := filepath.Join(t.TempDir(), "nonexistent", "path")
+ os.Args = []string{
+ "test", shared.TestCLIFlagSource, nonExistentDir,
+ shared.TestCLIFlagDestination, outPath, shared.TestCLIFlagNoUI,
+ }
+
+ err := run(context.Background())
+ if err == nil {
+ t.Fatal("Expected error from nonexistent source")
+ }
+
+ // Could be either parsing flags (validation) or processing error
+ if !strings.Contains(err.Error(), shared.TestOpParsingFlags) && !strings.Contains(err.Error(), "processing") {
+ t.Errorf("Expected error from parsing or processing, got: %v", err)
+ }
+ })
+
+ t.Run("missing_source", func(t *testing.T) {
+ cleanup := withIsolatedFlags(t)
+ defer cleanup()
+
+ outFile, outPath := testutil.CreateTempOutputFile(t, shared.TestMD)
+ testutil.CloseFile(t, outFile)
+ defer func() {
+ if err := os.Remove(outPath); err != nil {
+ t.Logf(shared.TestMsgFailedToRemoveTempFile, err)
+ }
+ }()
+
+ os.Args = []string{"test", shared.TestCLIFlagDestination, outPath, shared.TestCLIFlagNoUI}
+
+ err := run(context.Background())
+ if err == nil {
+ t.Fatal("Expected error from missing source")
+ }
+
+ // Should be a user error
+ if !cli.IsUserError(err) {
+ t.Errorf("Expected user error, got: %v", err)
+ }
+ })
+}
+
+// TestRun_MarkdownExecution tests successful markdown execution.
+func TestRunMarkdownExecution(t *testing.T) {
+ // Suppress all output for cleaner test output
+ restore := testutil.SuppressAllOutput(t)
+ defer restore()
+
+ cleanup := withIsolatedFlags(t)
+ defer cleanup()
+
+ // Create test environment
+ srcDir := t.TempDir()
+ testutil.CreateTestFiles(t, srcDir, []testutil.FileSpec{
+ {Name: shared.TestFileMainGo, Content: shared.LiteralPackageMain + "\nfunc main() {}"},
+ {Name: shared.TestFileHelperGo, Content: shared.LiteralPackageMain + "\nfunc help() {}"},
+ })
+
+ // Use non-existent output path to verify run() actually creates it
+ outPath := filepath.Join(t.TempDir(), "output.md")
+ defer func() {
+ if err := os.Remove(outPath); err != nil && !os.IsNotExist(err) {
+ t.Logf(shared.TestMsgFailedToRemoveTempFile, err)
+ }
+ }()
+
+ os.Args = []string{
+ "test", shared.TestCLIFlagSource, srcDir, shared.TestCLIFlagDestination, outPath,
+ shared.TestCLIFlagFormat, "markdown", shared.TestCLIFlagNoUI,
+ }
+
+ err := run(context.Background())
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+
+ // Verify output file was created with content
+ info, err := os.Stat(outPath)
+ if os.IsNotExist(err) {
+ t.Fatal("Output file was not created")
+ }
+ if err != nil {
+ t.Fatalf("Failed to stat output file: %v", err)
+ }
+ if info.Size() == 0 {
+ t.Error("Output file is empty, expected content")
+ }
+}
+
+// TestRun_JSONExecution tests successful JSON execution.
+func TestRunJSONExecution(t *testing.T) {
+ // Suppress all output for cleaner test output
+ restore := testutil.SuppressAllOutput(t)
+ defer restore()
+
+ cleanup := withIsolatedFlags(t)
+ defer cleanup()
+
+ // Create test environment with unique directories
+ srcDir := t.TempDir()
+ testutil.CreateTestFile(t, srcDir, shared.TestFileMainGo, []byte(shared.LiteralPackageMain))
+
+ // Use non-existent output path to verify run() actually creates it
+ outPath := filepath.Join(t.TempDir(), "output.json")
+ defer func() {
+ if err := os.Remove(outPath); err != nil && !os.IsNotExist(err) {
+ t.Logf(shared.TestMsgFailedToRemoveTempFile, err)
+ }
+ }()
+
+ // Set CLI args with fresh paths
+ os.Args = []string{
+ "test", shared.TestCLIFlagSource, srcDir, shared.TestCLIFlagDestination, outPath,
+ shared.TestCLIFlagFormat, "json", shared.TestCLIFlagNoUI,
+ }
+
+ err := run(context.Background())
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+
+ // Verify output file was created with content
+ info, err := os.Stat(outPath)
+ if os.IsNotExist(err) {
+ t.Fatal("Output file was not created")
+ }
+ if err != nil {
+ t.Fatalf("Failed to stat output file: %v", err)
+ }
+ if info.Size() == 0 {
+ t.Error("Output file is empty, expected content")
+ }
+}
+
+// TestRun_ErrorWrapping tests that errors are properly wrapped.
+func TestRunErrorWrapping(t *testing.T) {
+ cleanup := withIsolatedFlags(t)
+ defer cleanup()
+
+ os.Args = []string{"test", "-invalid-flag"}
+
+ err := run(context.Background())
+ if err == nil {
+ t.Fatal("Expected error")
+ }
+
+ // Should wrap with proper context
+ if !strings.Contains(err.Error(), "parsing flags:") {
+ t.Errorf("Error not properly wrapped, got: %v", err)
+ }
+}
+
+// TestRun_HappyPathWithDefaultConfig tests successful execution with default configuration.
+// This validates that run() completes successfully when given valid inputs,
+// implicitly exercising the config loading path without directly verifying it.
+func TestRunHappyPathWithDefaultConfig(t *testing.T) {
+ // Suppress all output for cleaner test output
+ restore := testutil.SuppressAllOutput(t)
+ defer restore()
+
+ cleanup := withIsolatedFlags(t)
+ defer cleanup()
+
+ // Create valid test setup
+ srcDir := t.TempDir()
+ testutil.CreateTestFile(t, srcDir, shared.TestFileGo, []byte(shared.LiteralPackageMain))
+
+ outFile, outPath := testutil.CreateTempOutputFile(t, shared.TestMD)
+ testutil.CloseFile(t, outFile)
+ defer func() {
+ if err := os.Remove(outPath); err != nil {
+ t.Logf(shared.TestMsgFailedToRemoveTempFile, err)
+ }
+ }()
+
+ os.Args = []string{
+ "test", shared.TestCLIFlagSource, srcDir, shared.TestCLIFlagDestination, outPath, shared.TestCLIFlagNoUI,
+ }
+
+ err := run(context.Background())
+ if err != nil {
+ t.Fatalf("run() failed: %v", err)
+ }
+}
+
+// TestErrorClassification tests user vs system error classification.
+func TestErrorClassification(t *testing.T) {
+ tests := []struct {
+ name string
+ err error
+ isUserErr bool
+ }{
+ {
+ name: "nil_error",
+ err: nil,
+ isUserErr: false,
+ },
+ {
+ name: "cli_missing_source",
+ err: cli.NewCLIMissingSourceError(),
+ isUserErr: true,
+ },
+ {
+ name: "flag_error",
+ err: errors.New("flag: invalid argument"),
+ isUserErr: true,
+ },
+ {
+ name: "permission_denied",
+ err: errors.New("permission denied"),
+ isUserErr: true,
+ },
+ {
+ name: "file_not_found",
+ err: errors.New("file not found"),
+ isUserErr: true,
+ },
+ {
+ name: "generic_system_error",
+ err: errors.New("internal system failure"),
+ isUserErr: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ isUser := cli.IsUserError(tt.err)
+ if isUser != tt.isUserErr {
+ t.Errorf("IsUserError(%v) = %v, want %v", tt.err, isUser, tt.isUserErr)
+ }
+ })
+ }
+}
+
+// TestRun_ContextCancellation tests context cancellation handling.
+func TestRunContextCancellation(t *testing.T) {
+ // Suppress all output for cleaner test output
+ restore := testutil.SuppressAllOutput(t)
+ defer restore()
+
+ cleanup := withIsolatedFlags(t)
+ defer cleanup()
+
+ // Create test environment
+ srcDir := t.TempDir()
+ testutil.CreateTestFile(t, srcDir, shared.TestFileGo, []byte(shared.LiteralPackageMain))
+
+ outFile, outPath := testutil.CreateTempOutputFile(t, shared.TestMD)
+ testutil.CloseFile(t, outFile)
+ defer func() {
+ if err := os.Remove(outPath); err != nil {
+ t.Logf(shared.TestMsgFailedToRemoveTempFile, err)
+ }
+ }()
+
+ os.Args = []string{
+ "test", shared.TestCLIFlagSource, srcDir, shared.TestCLIFlagDestination, outPath, shared.TestCLIFlagNoUI,
+ }
+
+ // Create pre-canceled context
+ ctx, cancel := context.WithCancel(context.Background())
+ cancel()
+
+ err := run(ctx)
+ // Assert that canceled context causes an error
+ if err == nil {
+ t.Error("Expected error with canceled context, got nil")
+ } else if !errors.Is(err, context.Canceled) && !strings.Contains(err.Error(), "context canceled") {
+ t.Errorf("Expected context.Canceled error, got: %v", err)
+ }
+}
diff --git a/main_test.go b/main_test.go
index cb51a5c..c673a70 100644
--- a/main_test.go
+++ b/main_test.go
@@ -2,12 +2,15 @@ package main
import (
"context"
+ "flag"
"fmt"
"os"
+ "path/filepath"
+ "strings"
"testing"
"time"
- "github.com/ivuorinen/gibidify/config"
+ "github.com/ivuorinen/gibidify/cli"
"github.com/ivuorinen/gibidify/testutil"
)
@@ -15,22 +18,25 @@ const (
testFileCount = 1000
)
-// TestMain configures test-time flags for packages.
-func TestMain(m *testing.M) {
- // Inform packages that we're running under tests so they can adjust noisy logging.
- // The config package will suppress the specific info-level message about missing config
- // while still allowing tests to enable debug/info level logging when needed.
- config.SetRunningInTest(true)
- os.Exit(m.Run())
+// resetFlagState resets the global flag state to allow multiple test runs.
+func resetFlagState() {
+ // Reset both the flag.CommandLine and cli global state for clean testing
+ cli.ResetFlags()
+ flag.CommandLine = flag.NewFlagSet(os.Args[0], flag.ExitOnError)
}
// TestIntegrationFullCLI simulates a full run of the CLI application using adaptive concurrency.
func TestIntegrationFullCLI(t *testing.T) {
+ // Suppress logs for cleaner test output
+ restore := testutil.SuppressAllOutput(t)
+ defer restore()
+
+ resetFlagState()
srcDir := setupTestFiles(t)
outFilePath := setupOutputFile(t)
setupCLIArgs(srcDir, outFilePath)
- // Run the application with the test context.
+ // Run the application with a background context.
ctx := t.Context()
if runErr := run(ctx); runErr != nil {
t.Fatalf("Run failed: %v", runErr)
@@ -70,7 +76,7 @@ func setupCLIArgs(srcDir, outFilePath string) {
// verifyOutput checks that the output file contains expected content.
func verifyOutput(t *testing.T, outFilePath string) {
t.Helper()
- data, err := os.ReadFile(outFilePath) // #nosec G304 - test file path is controlled
+ data, err := os.ReadFile(outFilePath)
if err != nil {
t.Fatalf("Failed to read output file: %v", err)
}
@@ -80,6 +86,11 @@ func verifyOutput(t *testing.T, outFilePath string) {
// TestIntegrationCancellation verifies that the application correctly cancels processing when the context times out.
func TestIntegrationCancellation(t *testing.T) {
+ // Suppress logs for cleaner test output
+ restore := testutil.SuppressAllOutput(t)
+ defer restore()
+
+ resetFlagState()
// Create a temporary source directory with many files to simulate a long-running process.
srcDir := t.TempDir()
@@ -101,10 +112,10 @@ func TestIntegrationCancellation(t *testing.T) {
// Set up CLI arguments.
testutil.SetupCLIArgs(srcDir, outFilePath, "PREFIX", "SUFFIX", 2)
- // Create a context with a very short timeout to force cancellation.
+ // Create a context with a short timeout to force cancellation.
ctx, cancel := context.WithTimeout(
t.Context(),
- 1*time.Millisecond,
+ 5*time.Millisecond,
)
defer cancel()
@@ -114,3 +125,86 @@ func TestIntegrationCancellation(t *testing.T) {
t.Error("Expected Run to fail due to cancellation, but it succeeded")
}
}
+
+// BenchmarkRun benchmarks the run() function performance.
+func BenchmarkRun(b *testing.B) {
+ // Save original args and flags
+ oldArgs := os.Args
+ defer func() { os.Args = oldArgs }()
+
+ ctx := context.Background()
+
+ for b.Loop() {
+ // Create fresh directories for each iteration
+ srcDir := b.TempDir()
+ outDir := b.TempDir()
+
+ // Create benchmark files
+ files := map[string]string{
+ "bench1.go": "package main\n// Benchmark file 1",
+ "bench2.txt": "Benchmark content file 2",
+ "bench3.md": "# Benchmark markdown file",
+ }
+
+ for name, content := range files {
+ filePath := filepath.Join(srcDir, name)
+ if err := os.WriteFile(filePath, []byte(content), 0o600); err != nil {
+ b.Fatalf("Failed to create benchmark file %s: %v", name, err)
+ }
+ }
+
+ outFilePath := filepath.Join(outDir, "output.md")
+
+ // Reset flags for each iteration
+ cli.ResetFlags()
+ flag.CommandLine = flag.NewFlagSet("bench", flag.ContinueOnError)
+
+ os.Args = []string{"gibidify", "-source", srcDir, "-destination", outFilePath, "-no-ui"}
+
+ if err := run(ctx); err != nil {
+ b.Fatalf("run() failed in benchmark: %v", err)
+ }
+ }
+}
+
+// BenchmarkRunLargeFiles benchmarks the run() function with larger files.
+func BenchmarkRunLargeFiles(b *testing.B) {
+ // Save original args
+ oldArgs := os.Args
+ defer func() { os.Args = oldArgs }()
+
+ largeContent := strings.Repeat("This is a large file for benchmarking purposes.\n", 1000)
+ ctx := context.Background()
+
+ for b.Loop() {
+ // Create fresh directories for each iteration
+ srcDir := b.TempDir()
+ outDir := b.TempDir()
+
+ // Create large benchmark files
+ files := map[string]string{
+ "large1.go": "package main\n" + largeContent,
+ "large2.txt": largeContent,
+ "large3.md": "# Large File\n" + largeContent,
+ }
+
+ for name, content := range files {
+ filePath := filepath.Join(srcDir, name)
+ if err := os.WriteFile(filePath, []byte(content), 0o600); err != nil {
+ b.Fatalf("Failed to create large benchmark file %s: %v", name, err)
+ }
+ }
+
+ outFilePath := filepath.Join(outDir, "output.md")
+
+ // Reset flags for each iteration
+ cli.ResetFlags()
+ flag.CommandLine = flag.NewFlagSet("bench", flag.ContinueOnError)
+
+ os.Args = []string{"gibidify", "-source", srcDir, "-destination", outFilePath, "-no-ui"}
+
+ if err := run(ctx); err != nil {
+ b.Fatalf("run() failed in large files benchmark: %v", err)
+ }
+ }
+}
diff --git a/metrics/collector.go b/metrics/collector.go
new file mode 100644
index 0000000..92b713a
--- /dev/null
+++ b/metrics/collector.go
@@ -0,0 +1,355 @@
+// Package metrics provides performance monitoring and reporting capabilities.
+package metrics
+
+import (
+ "math"
+ "runtime"
+ "sync/atomic"
+ "time"
+
+ "github.com/ivuorinen/gibidify/shared"
+)
+
+// NewCollector creates a new metrics collector.
+func NewCollector() *Collector {
+ now := time.Now()
+
+ return &Collector{
+ startTime: now,
+ lastUpdate: now,
+ formatCounts: make(map[string]int64),
+ errorCounts: make(map[string]int64),
+ phaseTimings: make(map[string]time.Duration),
+ smallestFile: math.MaxInt64, // Initialize to max value to properly track minimum
+ }
+}
+
+// RecordFileProcessed records the successful processing of a file.
+func (c *Collector) RecordFileProcessed(result FileProcessingResult) {
+ atomic.AddInt64(&c.totalFiles, 1)
+
+ c.updateFileStatusCounters(result)
+ atomic.AddInt64(&c.totalSize, result.FileSize)
+ c.updateFormatAndErrorCounts(result)
+}
+
+// updateFileStatusCounters updates counters based on file processing result.
+func (c *Collector) updateFileStatusCounters(result FileProcessingResult) {
+ switch {
+ case result.Success:
+ atomic.AddInt64(&c.processedFiles, 1)
+ atomic.AddInt64(&c.processedSize, result.FileSize)
+ c.updateFileSizeExtremes(result.FileSize)
+ case result.Skipped:
+ atomic.AddInt64(&c.skippedFiles, 1)
+ default:
+ atomic.AddInt64(&c.errorFiles, 1)
+ }
+}
+
+// updateFileSizeExtremes updates the largest and smallest file size atomically.
+func (c *Collector) updateFileSizeExtremes(fileSize int64) {
+ // Update the largest file atomically
+ for {
+ current := atomic.LoadInt64(&c.largestFile)
+ if fileSize <= current {
+ break
+ }
+ if atomic.CompareAndSwapInt64(&c.largestFile, current, fileSize) {
+ break
+ }
+ }
+
+ // Update the smallest file atomically
+ for {
+ current := atomic.LoadInt64(&c.smallestFile)
+ if fileSize >= current {
+ break
+ }
+ if atomic.CompareAndSwapInt64(&c.smallestFile, current, fileSize) {
+ break
+ }
+ }
+}
+
+// updateFormatAndErrorCounts updates format and error counts with mutex protection.
+func (c *Collector) updateFormatAndErrorCounts(result FileProcessingResult) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if result.Format != "" {
+ c.formatCounts[result.Format]++
+ }
+ if result.Error != nil {
+ errorType := c.simplifyErrorType(result.Error)
+ c.errorCounts[errorType]++
+ }
+ c.lastUpdate = time.Now()
+}
+
+// simplifyErrorType simplifies error messages for better aggregation.
+func (c *Collector) simplifyErrorType(err error) string {
+ errorType := err.Error()
+ // Simplify error types for better aggregation
+ if len(errorType) > 50 {
+ errorType = errorType[:50] + "..."
+ }
+
+ return errorType
+}
+
+// RecordPhaseTime records the time spent in a processing phase.
+func (c *Collector) RecordPhaseTime(phase string, duration time.Duration) {
+ c.mu.Lock()
+ c.phaseTimings[phase] += duration
+ c.mu.Unlock()
+}
+
+// IncrementConcurrency increments the current concurrency counter.
+func (c *Collector) IncrementConcurrency() {
+ newVal := atomic.AddInt32(&c.concurrency, 1)
+
+ // Update peak concurrency if current is higher
+ for {
+ peak := atomic.LoadInt32(&c.peakConcurrency)
+ if newVal <= peak || atomic.CompareAndSwapInt32(&c.peakConcurrency, peak, newVal) {
+ break
+ }
+ }
+}
+
+// DecrementConcurrency decrements the current concurrency counter.
+// Prevents negative values if calls are imbalanced.
+func (c *Collector) DecrementConcurrency() {
+ for {
+ cur := atomic.LoadInt32(&c.concurrency)
+ if cur == 0 {
+ return
+ }
+ if atomic.CompareAndSwapInt32(&c.concurrency, cur, cur-1) {
+ return
+ }
+ }
+}
+
+// CurrentMetrics returns the current metrics snapshot.
+func (c *Collector) CurrentMetrics() ProcessingMetrics {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+
+ var m runtime.MemStats
+ runtime.ReadMemStats(&m)
+
+ now := time.Now()
+ processingTime := now.Sub(c.startTime)
+
+ totalFiles := atomic.LoadInt64(&c.totalFiles)
+ processedFiles := atomic.LoadInt64(&c.processedFiles)
+ processedSize := atomic.LoadInt64(&c.processedSize)
+
+ var avgFileSize float64
+ if processedFiles > 0 {
+ avgFileSize = float64(processedSize) / float64(processedFiles)
+ }
+
+ var filesPerSec, bytesPerSec float64
+ if processingTime.Seconds() > 0 {
+ filesPerSec = float64(processedFiles) / processingTime.Seconds()
+ bytesPerSec = float64(processedSize) / processingTime.Seconds()
+ }
+
+ smallestFile := atomic.LoadInt64(&c.smallestFile)
+ if smallestFile == math.MaxInt64 {
+ smallestFile = 0 // No files processed yet
+ }
+
+ // Copy maps to avoid race conditions
+ formatCounts := make(map[string]int64)
+ for k, v := range c.formatCounts {
+ formatCounts[k] = v
+ }
+
+ errorCounts := make(map[string]int64)
+ for k, v := range c.errorCounts {
+ errorCounts[k] = v
+ }
+
+ phaseTimings := make(map[string]time.Duration)
+ for k, v := range c.phaseTimings {
+ phaseTimings[k] = v
+ }
+
+ return ProcessingMetrics{
+ TotalFiles: totalFiles,
+ ProcessedFiles: processedFiles,
+ SkippedFiles: atomic.LoadInt64(&c.skippedFiles),
+ ErrorFiles: atomic.LoadInt64(&c.errorFiles),
+ LastUpdated: c.lastUpdate,
+ TotalSize: atomic.LoadInt64(&c.totalSize),
+ ProcessedSize: processedSize,
+ AverageFileSize: avgFileSize,
+ LargestFile: atomic.LoadInt64(&c.largestFile),
+ SmallestFile: smallestFile,
+ StartTime: c.startTime,
+ ProcessingTime: processingTime,
+ FilesPerSecond: filesPerSec,
+ BytesPerSecond: bytesPerSec,
+ PeakMemoryMB: shared.BytesToMB(m.Sys),
+ CurrentMemoryMB: shared.BytesToMB(m.Alloc),
+ GoroutineCount: runtime.NumGoroutine(),
+ FormatCounts: formatCounts,
+ ErrorCounts: errorCounts,
+ MaxConcurrency: int(atomic.LoadInt32(&c.peakConcurrency)),
+ CurrentConcurrency: atomic.LoadInt32(&c.concurrency),
+ PhaseTimings: phaseTimings,
+ }
+}
+
+// Finish marks the end of processing and records final metrics.
+func (c *Collector) Finish() {
+ // Get current metrics first (which will acquire its own lock)
+ currentMetrics := c.CurrentMetrics()
+
+ // Then update final metrics with lock
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ c.metrics = currentMetrics
+ c.metrics.EndTime = time.Now()
+ c.metrics.ProcessingTime = c.metrics.EndTime.Sub(c.startTime)
+}
+
+// FinalMetrics returns the final metrics after processing is complete.
+func (c *Collector) FinalMetrics() ProcessingMetrics {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+
+ return c.metrics
+}
+
+// GenerateReport generates a comprehensive profiling report.
+func (c *Collector) GenerateReport() ProfileReport {
+ metrics := c.CurrentMetrics()
+
+ // Generate format breakdown
+ formatBreakdown := make(map[string]FormatMetrics)
+ for format, count := range metrics.FormatCounts {
+ // For now, we don't have detailed per-format timing data
+ // This could be enhanced in the future
+ formatBreakdown[format] = FormatMetrics{
+ Count: count,
+ TotalSize: 0, // Would need to track this separately
+ AverageSize: 0,
+ TotalProcessingTime: 0,
+ AverageProcessingTime: 0,
+ }
+ }
+
+ // Generate phase breakdown
+ phaseBreakdown := make(map[string]PhaseMetrics)
+ totalPhaseTime := time.Duration(0)
+ for _, duration := range metrics.PhaseTimings {
+ totalPhaseTime += duration
+ }
+
+ for phase, duration := range metrics.PhaseTimings {
+ percentage := float64(0)
+ if totalPhaseTime > 0 {
+ percentage = float64(duration) / float64(totalPhaseTime) * 100
+ }
+
+ phaseBreakdown[phase] = PhaseMetrics{
+ TotalTime: duration,
+ Count: 1, // For now, we track total time per phase
+ AverageTime: duration,
+ Percentage: percentage,
+ }
+ }
+
+ // Calculate performance index (files per second normalized)
+ performanceIndex := metrics.FilesPerSecond
+ if performanceIndex > shared.MetricsPerformanceIndexCap {
+ performanceIndex = shared.MetricsPerformanceIndexCap // Cap for reasonable indexing
+ }
+
+ // Generate recommendations
+ recommendations := c.generateRecommendations(metrics)
+
+ return ProfileReport{
+ Summary: metrics,
+ TopLargestFiles: []FileInfo{}, // Would need separate tracking
+ TopSlowestFiles: []FileInfo{}, // Would need separate tracking
+ FormatBreakdown: formatBreakdown,
+ ErrorBreakdown: metrics.ErrorCounts,
+ PhaseBreakdown: phaseBreakdown,
+ PerformanceIndex: performanceIndex,
+ Recommendations: recommendations,
+ }
+}
+
+// generateRecommendations generates performance recommendations based on metrics.
+func (c *Collector) generateRecommendations(metrics ProcessingMetrics) []string {
+ var recommendations []string
+
+ // Memory usage recommendations
+ if metrics.CurrentMemoryMB > 500 {
+ recommendations = append(recommendations, "Consider reducing memory usage - current usage is high (>500MB)")
+ }
+
+ // Processing rate recommendations
+ if metrics.FilesPerSecond < 10 && metrics.ProcessedFiles > 100 {
+ recommendations = append(recommendations,
+ "Processing rate is low (<10 files/sec) - consider optimizing file I/O")
+ }
+
+ // Error rate recommendations
+ if metrics.TotalFiles > 0 {
+ errorRate := float64(metrics.ErrorFiles) / float64(metrics.TotalFiles) * 100
+ if errorRate > 5 {
+ recommendations = append(recommendations, "High error rate (>5%) detected - review error logs")
+ }
+ }
+
+ // Concurrency recommendations
+ halfMaxConcurrency := shared.SafeIntToInt32WithDefault(metrics.MaxConcurrency/2, 1)
+ if halfMaxConcurrency > 0 && metrics.CurrentConcurrency < halfMaxConcurrency {
+ recommendations = append(recommendations,
+ "Low concurrency utilization - consider increasing concurrent processing")
+ }
+
+ // Large file recommendations
+ const largeSizeThreshold = 50 * shared.BytesPerMB // 50MB
+ if metrics.LargestFile > largeSizeThreshold {
+ recommendations = append(
+ recommendations,
+ "Very large files detected (>50MB) - consider streaming processing for large files",
+ )
+ }
+
+ return recommendations
+}
+
+// Reset resets all metrics to initial state.
+func (c *Collector) Reset() {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ now := time.Now()
+ c.startTime = now
+ c.lastUpdate = now
+
+ atomic.StoreInt64(&c.totalFiles, 0)
+ atomic.StoreInt64(&c.processedFiles, 0)
+ atomic.StoreInt64(&c.skippedFiles, 0)
+ atomic.StoreInt64(&c.errorFiles, 0)
+ atomic.StoreInt64(&c.totalSize, 0)
+ atomic.StoreInt64(&c.processedSize, 0)
+ atomic.StoreInt64(&c.largestFile, 0)
+ atomic.StoreInt64(&c.smallestFile, math.MaxInt64)
+ atomic.StoreInt32(&c.concurrency, 0)
+
+ c.formatCounts = make(map[string]int64)
+ c.errorCounts = make(map[string]int64)
+ c.metrics = ProcessingMetrics{} // Clear final snapshot
+ c.phaseTimings = make(map[string]time.Duration)
+}
diff --git a/metrics/collector_test.go b/metrics/collector_test.go
new file mode 100644
index 0000000..2dcb716
--- /dev/null
+++ b/metrics/collector_test.go
@@ -0,0 +1,484 @@
+package metrics
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/ivuorinen/gibidify/shared"
+)
+
+func TestNewCollector(t *testing.T) {
+ collector := NewCollector()
+
+ if collector == nil {
+ t.Fatal("NewCollector returned nil")
+ }
+
+ if collector.formatCounts == nil {
+ t.Error("formatCounts map not initialized")
+ }
+
+ if collector.errorCounts == nil {
+ t.Error("errorCounts map not initialized")
+ }
+
+ if collector.phaseTimings == nil {
+ t.Error("phaseTimings map not initialized")
+ }
+
+ maxInt := shared.MetricsMaxInt64
+ if collector.smallestFile != maxInt {
+ t.Errorf("smallestFile not initialized correctly, got %d, want %d", collector.smallestFile, maxInt)
+ }
+}
+
+func TestRecordFileProcessedSuccess(t *testing.T) {
+ collector := NewCollector()
+
+ result := FileProcessingResult{
+ FilePath: shared.TestPathTestFileGo,
+ FileSize: 1024,
+ Format: "go",
+ ProcessingTime: 10 * time.Millisecond,
+ Success: true,
+ }
+
+ collector.RecordFileProcessed(result)
+
+ metrics := collector.CurrentMetrics()
+
+ if metrics.TotalFiles != 1 {
+ t.Errorf(shared.TestFmtExpectedTotalFiles, metrics.TotalFiles)
+ }
+
+ if metrics.ProcessedFiles != 1 {
+ t.Errorf("Expected ProcessedFiles=1, got %d", metrics.ProcessedFiles)
+ }
+
+ if metrics.ProcessedSize != 1024 {
+ t.Errorf("Expected ProcessedSize=1024, got %d", metrics.ProcessedSize)
+ }
+
+ if metrics.FormatCounts["go"] != 1 {
+ t.Errorf("Expected go format count=1, got %d", metrics.FormatCounts["go"])
+ }
+
+ if metrics.LargestFile != 1024 {
+ t.Errorf("Expected LargestFile=1024, got %d", metrics.LargestFile)
+ }
+
+ if metrics.SmallestFile != 1024 {
+ t.Errorf("Expected SmallestFile=1024, got %d", metrics.SmallestFile)
+ }
+}
+
+func TestRecordFileProcessedError(t *testing.T) {
+ collector := NewCollector()
+
+ result := FileProcessingResult{
+ FilePath: "/test/error.txt",
+ FileSize: 512,
+ Format: "txt",
+ ProcessingTime: 5 * time.Millisecond,
+ Success: false,
+ Error: errors.New(shared.TestErrTestErrorMsg),
+ }
+
+ collector.RecordFileProcessed(result)
+
+ metrics := collector.CurrentMetrics()
+
+ if metrics.TotalFiles != 1 {
+ t.Errorf(shared.TestFmtExpectedTotalFiles, metrics.TotalFiles)
+ }
+
+ if metrics.ErrorFiles != 1 {
+ t.Errorf("Expected ErrorFiles=1, got %d", metrics.ErrorFiles)
+ }
+
+ if metrics.ProcessedFiles != 0 {
+ t.Errorf("Expected ProcessedFiles=0, got %d", metrics.ProcessedFiles)
+ }
+
+ if metrics.ErrorCounts[shared.TestErrTestErrorMsg] != 1 {
+ t.Errorf("Expected error count=1, got %d", metrics.ErrorCounts[shared.TestErrTestErrorMsg])
+ }
+}
+
+func TestRecordFileProcessedSkipped(t *testing.T) {
+ collector := NewCollector()
+
+ result := FileProcessingResult{
+ FilePath: "/test/skipped.bin",
+ FileSize: 256,
+ Success: false,
+ Skipped: true,
+ SkipReason: "binary file",
+ }
+
+ collector.RecordFileProcessed(result)
+
+ metrics := collector.CurrentMetrics()
+
+ if metrics.TotalFiles != 1 {
+ t.Errorf(shared.TestFmtExpectedTotalFiles, metrics.TotalFiles)
+ }
+
+ if metrics.SkippedFiles != 1 {
+ t.Errorf("Expected SkippedFiles=1, got %d", metrics.SkippedFiles)
+ }
+
+ if metrics.ProcessedFiles != 0 {
+ t.Errorf("Expected ProcessedFiles=0, got %d", metrics.ProcessedFiles)
+ }
+}
+
+func TestRecordPhaseTime(t *testing.T) {
+ collector := NewCollector()
+
+ collector.RecordPhaseTime(shared.MetricsPhaseCollection, 100*time.Millisecond)
+ collector.RecordPhaseTime(shared.MetricsPhaseProcessing, 200*time.Millisecond)
+ collector.RecordPhaseTime(shared.MetricsPhaseCollection, 50*time.Millisecond) // Add to existing
+
+ metrics := collector.CurrentMetrics()
+
+ if metrics.PhaseTimings[shared.MetricsPhaseCollection] != 150*time.Millisecond {
+ t.Errorf("Expected collection phase time=150ms, got %v", metrics.PhaseTimings[shared.MetricsPhaseCollection])
+ }
+
+ if metrics.PhaseTimings[shared.MetricsPhaseProcessing] != 200*time.Millisecond {
+ t.Errorf("Expected processing phase time=200ms, got %v", metrics.PhaseTimings[shared.MetricsPhaseProcessing])
+ }
+}
+
+func TestConcurrencyTracking(t *testing.T) {
+ collector := NewCollector()
+
+ // Initial concurrency should be 0
+ metrics := collector.CurrentMetrics()
+ if metrics.CurrentConcurrency != 0 {
+ t.Errorf("Expected initial concurrency=0, got %d", metrics.CurrentConcurrency)
+ }
+
+ // Increment concurrency
+ collector.IncrementConcurrency()
+ collector.IncrementConcurrency()
+
+ metrics = collector.CurrentMetrics()
+ if metrics.CurrentConcurrency != 2 {
+ t.Errorf("Expected concurrency=2, got %d", metrics.CurrentConcurrency)
+ }
+
+ // Decrement concurrency
+ collector.DecrementConcurrency()
+
+ metrics = collector.CurrentMetrics()
+ if metrics.CurrentConcurrency != 1 {
+ t.Errorf("Expected concurrency=1, got %d", metrics.CurrentConcurrency)
+ }
+}
+
+func TestFileSizeTracking(t *testing.T) {
+ collector := NewCollector()
+
+ files := []FileProcessingResult{
+ {FilePath: "small.txt", FileSize: 100, Success: true, Format: "txt"},
+ {FilePath: "large.txt", FileSize: 5000, Success: true, Format: "txt"},
+ {FilePath: "medium.txt", FileSize: 1000, Success: true, Format: "txt"},
+ }
+
+ for _, file := range files {
+ collector.RecordFileProcessed(file)
+ }
+
+ metrics := collector.CurrentMetrics()
+
+ if metrics.LargestFile != 5000 {
+ t.Errorf("Expected LargestFile=5000, got %d", metrics.LargestFile)
+ }
+
+ if metrics.SmallestFile != 100 {
+ t.Errorf("Expected SmallestFile=100, got %d", metrics.SmallestFile)
+ }
+
+ expectedAvg := float64(6100) / 3 // (100 + 5000 + 1000) / 3
+ if math.Abs(metrics.AverageFileSize-expectedAvg) > 0.1 {
+ t.Errorf("Expected AverageFileSize=%.1f, got %.1f", expectedAvg, metrics.AverageFileSize)
+ }
+}
+
+func TestConcurrentAccess(t *testing.T) {
+ collector := NewCollector()
+
+ // Test concurrent file processing
+ var wg sync.WaitGroup
+ numGoroutines := 10
+ filesPerGoroutine := 100
+
+ wg.Add(numGoroutines)
+ for i := 0; i < numGoroutines; i++ {
+ go func(id int) {
+ defer wg.Done()
+ for j := 0; j < filesPerGoroutine; j++ {
+ result := FileProcessingResult{
+ FilePath: fmt.Sprintf("/test/file_%d_%d.go", id, j),
+ FileSize: int64(j + 1),
+ Success: true,
+ Format: "go",
+ }
+ collector.RecordFileProcessed(result)
+ }
+ }(i)
+ }
+
+ wg.Wait()
+
+ metrics := collector.CurrentMetrics()
+ expectedTotal := int64(numGoroutines * filesPerGoroutine)
+
+ if metrics.TotalFiles != expectedTotal {
+ t.Errorf("Expected TotalFiles=%d, got %d", expectedTotal, metrics.TotalFiles)
+ }
+
+ if metrics.ProcessedFiles != expectedTotal {
+ t.Errorf("Expected ProcessedFiles=%d, got %d", expectedTotal, metrics.ProcessedFiles)
+ }
+
+ if metrics.FormatCounts["go"] != expectedTotal {
+ t.Errorf("Expected go format count=%d, got %d", expectedTotal, metrics.FormatCounts["go"])
+ }
+}
+
+func TestFinishAndGetFinalMetrics(t *testing.T) {
+ collector := NewCollector()
+
+ // Process some files
+ result := FileProcessingResult{
+ FilePath: shared.TestPathTestFileGo,
+ FileSize: 1024,
+ Success: true,
+ Format: "go",
+ }
+ collector.RecordFileProcessed(result)
+
+ collector.Finish()
+
+ finalMetrics := collector.FinalMetrics()
+
+ if finalMetrics.EndTime.IsZero() {
+ t.Error("EndTime should be set after Finish()")
+ }
+
+ if finalMetrics.ProcessingTime < 0 {
+ t.Error("ProcessingTime should be >= 0 after Finish()")
+ }
+
+ if finalMetrics.ProcessedFiles != 1 {
+ t.Errorf("Expected ProcessedFiles=1, got %d", finalMetrics.ProcessedFiles)
+ }
+}
+
+func TestGenerateReport(t *testing.T) {
+ collector := NewCollector()
+
+ // Add some test data
+ files := []FileProcessingResult{
+ {FilePath: "file1.go", FileSize: 1000, Success: true, Format: "go"},
+ {FilePath: "file2.js", FileSize: 2000, Success: true, Format: "js"},
+ {FilePath: "file3.go", FileSize: 500, Success: false, Error: errors.New("syntax error")},
+ }
+
+ for _, file := range files {
+ collector.RecordFileProcessed(file)
+ }
+
+ collector.RecordPhaseTime(shared.MetricsPhaseCollection, 100*time.Millisecond)
+ collector.RecordPhaseTime(shared.MetricsPhaseProcessing, 200*time.Millisecond)
+
+ // Call Finish to mirror production usage where GenerateReport is called after processing completes
+ collector.Finish()
+
+ report := collector.GenerateReport()
+
+ if report.Summary.TotalFiles != 3 {
+ t.Errorf("Expected Summary.TotalFiles=3, got %d", report.Summary.TotalFiles)
+ }
+
+ if report.FormatBreakdown["go"].Count != 1 {
+ t.Errorf("Expected go format count=1, got %d", report.FormatBreakdown["go"].Count)
+ }
+
+ if report.FormatBreakdown["js"].Count != 1 {
+ t.Errorf("Expected js format count=1, got %d", report.FormatBreakdown["js"].Count)
+ }
+
+ if len(report.ErrorBreakdown) != 1 {
+ t.Errorf("Expected 1 error type, got %d", len(report.ErrorBreakdown))
+ }
+
+ if len(report.PhaseBreakdown) != 2 {
+ t.Errorf("Expected 2 phases, got %d", len(report.PhaseBreakdown))
+ }
+
+ if len(report.Recommendations) == 0 {
+ t.Error("Expected some recommendations")
+ }
+}
+
+func TestReset(t *testing.T) {
+ collector := NewCollector()
+
+ // Add some data
+ result := FileProcessingResult{
+ FilePath: shared.TestPathTestFileGo,
+ FileSize: 1024,
+ Success: true,
+ Format: "go",
+ }
+ collector.RecordFileProcessed(result)
+ collector.RecordPhaseTime(shared.MetricsPhaseCollection, 100*time.Millisecond)
+
+ // Verify data exists
+ metrics := collector.CurrentMetrics()
+ if metrics.TotalFiles == 0 {
+ t.Error("Expected data before reset")
+ }
+
+ // Reset
+ collector.Reset()
+
+ // Verify reset
+ metrics = collector.CurrentMetrics()
+ if metrics.TotalFiles != 0 {
+ t.Errorf("Expected TotalFiles=0 after reset, got %d", metrics.TotalFiles)
+ }
+
+ if metrics.ProcessedFiles != 0 {
+ t.Errorf("Expected ProcessedFiles=0 after reset, got %d", metrics.ProcessedFiles)
+ }
+
+ if len(metrics.FormatCounts) != 0 {
+ t.Errorf("Expected empty FormatCounts after reset, got %d entries", len(metrics.FormatCounts))
+ }
+
+ if len(metrics.PhaseTimings) != 0 {
+ t.Errorf("Expected empty PhaseTimings after reset, got %d entries", len(metrics.PhaseTimings))
+ }
+}
+
+// Benchmarks for collector hot paths
+
+func BenchmarkCollectorRecordFileProcessed(b *testing.B) {
+ collector := NewCollector()
+ result := FileProcessingResult{
+ FilePath: shared.TestPathTestFileGo,
+ FileSize: 1024,
+ Format: "go",
+ ProcessingTime: 10 * time.Millisecond,
+ Success: true,
+ }
+
+ for b.Loop() {
+ collector.RecordFileProcessed(result)
+ }
+}
+
+func BenchmarkCollectorRecordFileProcessedConcurrent(b *testing.B) {
+ collector := NewCollector()
+ result := FileProcessingResult{
+ FilePath: shared.TestPathTestFileGo,
+ FileSize: 1024,
+ Format: "go",
+ ProcessingTime: 10 * time.Millisecond,
+ Success: true,
+ }
+
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ collector.RecordFileProcessed(result)
+ }
+ })
+}
+
+func BenchmarkCollectorCurrentMetrics(b *testing.B) {
+ collector := NewCollector()
+
+ // Add some baseline data
+ for i := 0; i < 100; i++ {
+ result := FileProcessingResult{
+ FilePath: fmt.Sprintf("/test/file%d.go", i),
+ FileSize: int64(i * 100),
+ Format: "go",
+ Success: true,
+ }
+ collector.RecordFileProcessed(result)
+ }
+
+ b.ResetTimer()
+ for b.Loop() {
+ _ = collector.CurrentMetrics()
+ }
+}
+
+func BenchmarkCollectorGenerateReport(b *testing.B) {
+ benchmarks := []struct {
+ name string
+ files int
+ }{
+ {"10files", 10},
+ {"100files", 100},
+ {"1000files", 1000},
+ }
+
+ for _, bm := range benchmarks {
+ b.Run(bm.name, func(b *testing.B) {
+ collector := NewCollector()
+
+ // Add test data
+ formats := []string{"go", "js", "py", "ts", "rs", "java", "cpp", "rb"}
+ for i := 0; i < bm.files; i++ {
+ var result FileProcessingResult
+ if i%10 == 0 {
+ result = FileProcessingResult{
+ FilePath: fmt.Sprintf("/test/error%d.go", i),
+ FileSize: 500,
+ Success: false,
+ Error: errors.New(shared.TestErrTestErrorMsg),
+ }
+ } else {
+ result = FileProcessingResult{
+ FilePath: fmt.Sprintf("/test/file%d.go", i),
+ FileSize: int64(i * 100),
+ Format: formats[i%len(formats)],
+ Success: true,
+ }
+ }
+ collector.RecordFileProcessed(result)
+ }
+
+ collector.RecordPhaseTime(shared.MetricsPhaseCollection, 50*time.Millisecond)
+ collector.RecordPhaseTime(shared.MetricsPhaseProcessing, 150*time.Millisecond)
+ collector.Finish()
+
+ b.ResetTimer()
+ for b.Loop() {
+ _ = collector.GenerateReport()
+ }
+ })
+ }
+}
+
+func BenchmarkCollectorConcurrencyTracking(b *testing.B) {
+ collector := NewCollector()
+
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ collector.IncrementConcurrency()
+ collector.DecrementConcurrency()
+ }
+ })
+}
diff --git a/metrics/reporter.go b/metrics/reporter.go
new file mode 100644
index 0000000..5401251
--- /dev/null
+++ b/metrics/reporter.go
@@ -0,0 +1,418 @@
+// Package metrics provides performance monitoring and reporting capabilities.
+package metrics
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+ "time"
+
+ "golang.org/x/text/cases"
+ "golang.org/x/text/language"
+
+ "github.com/ivuorinen/gibidify/shared"
+)
+
+// reportBuilder wraps strings.Builder with error accumulation for robust error handling.
+type reportBuilder struct {
+ b *strings.Builder
+ err error
+}
+
+// newReportBuilder creates a new report builder.
+func newReportBuilder() *reportBuilder {
+ return &reportBuilder{b: &strings.Builder{}}
+}
+
+// writeString writes a string, accumulating any errors.
+func (rb *reportBuilder) writeString(s string) {
+ if rb.err != nil {
+ return
+ }
+ _, rb.err = rb.b.WriteString(s)
+}
+
+// fprintf formats and writes, accumulating any errors.
+func (rb *reportBuilder) fprintf(format string, args ...any) {
+ if rb.err != nil {
+ return
+ }
+ _, rb.err = fmt.Fprintf(rb.b, format, args...)
+}
+
+// String returns the accumulated string, or empty string if there was an error.
+func (rb *reportBuilder) String() string {
+ if rb.err != nil {
+ return ""
+ }
+ return rb.b.String()
+}
+
+// Reporter handles metrics reporting and formatting.
+type Reporter struct {
+ collector *Collector
+ verbose bool
+ colors bool
+}
+
+// NewReporter creates a new metrics reporter.
+func NewReporter(collector *Collector, verbose, colors bool) *Reporter {
+ return &Reporter{
+ collector: collector,
+ verbose: verbose,
+ colors: colors,
+ }
+}
+
+// ReportProgress provides a real-time progress report suitable for CLI output.
+func (r *Reporter) ReportProgress() string {
+ if r == nil || r.collector == nil {
+ return "no metrics available"
+ }
+
+ metrics := r.collector.CurrentMetrics()
+
+ if r.verbose {
+ return r.formatVerboseProgress(metrics)
+ }
+
+ return r.formatBasicProgress(metrics)
+}
+
+// ReportFinal provides a comprehensive final report.
+func (r *Reporter) ReportFinal() string {
+ if r == nil || r.collector == nil {
+ return ""
+ }
+
+ report := r.collector.GenerateReport()
+
+ if r.verbose {
+ return r.formatVerboseReport(report)
+ }
+
+ return r.formatBasicReport(report.Summary)
+}
+
+// formatBasicProgress formats basic progress information.
+func (r *Reporter) formatBasicProgress(metrics ProcessingMetrics) string {
+ b := newReportBuilder()
+
+ // Basic stats
+ b.writeString(fmt.Sprintf("Processed: %d files", metrics.ProcessedFiles))
+
+ if metrics.SkippedFiles > 0 {
+ b.writeString(fmt.Sprintf(", Skipped: %d", metrics.SkippedFiles))
+ }
+
+ if metrics.ErrorFiles > 0 {
+ if r.colors {
+ b.writeString(fmt.Sprintf(", \033[31mErrors: %d\033[0m", metrics.ErrorFiles))
+ } else {
+ b.writeString(fmt.Sprintf(", Errors: %d", metrics.ErrorFiles))
+ }
+ }
+
+ // Processing rate
+ if metrics.FilesPerSecond > 0 {
+ b.writeString(fmt.Sprintf(" (%.1f files/sec)", metrics.FilesPerSecond))
+ }
+
+ return b.String()
+}
+
+// formatVerboseProgress formats detailed progress information.
+func (r *Reporter) formatVerboseProgress(metrics ProcessingMetrics) string {
+ b := newReportBuilder()
+
+ // Header
+ b.writeString("=== Processing Statistics ===\n")
+
+ // File counts
+ b.writeString(
+ fmt.Sprintf(
+ "Files - Total: %d, Processed: %d, Skipped: %d, Errors: %d\n",
+ metrics.TotalFiles, metrics.ProcessedFiles, metrics.SkippedFiles, metrics.ErrorFiles,
+ ),
+ )
+
+ // Size information
+ b.writeString(
+ fmt.Sprintf(
+ "Size - Processed: %s, Average: %s\n",
+ r.formatBytes(metrics.ProcessedSize),
+ r.formatBytes(int64(metrics.AverageFileSize)),
+ ),
+ )
+
+ if metrics.LargestFile > 0 {
+ b.writeString(
+ fmt.Sprintf(
+ "File Size Range: %s - %s\n",
+ r.formatBytes(metrics.SmallestFile),
+ r.formatBytes(metrics.LargestFile),
+ ),
+ )
+ }
+
+ // Performance
+ b.writeString(
+ fmt.Sprintf(
+ "Performance - Files/sec: %.1f, MB/sec: %.1f\n",
+ metrics.FilesPerSecond,
+ metrics.BytesPerSecond/float64(shared.BytesPerMB),
+ ),
+ )
+
+ // Memory usage
+ b.writeString(
+ fmt.Sprintf(
+ "Memory - Current: %dMB, Peak: %dMB, Goroutines: %d\n",
+ metrics.CurrentMemoryMB, metrics.PeakMemoryMB, metrics.GoroutineCount,
+ ),
+ )
+
+ // Concurrency
+ b.writeString(
+ fmt.Sprintf(
+ "Concurrency - Current: %d, Max: %d\n",
+ metrics.CurrentConcurrency, metrics.MaxConcurrency,
+ ),
+ )
+
+ // Format breakdown (if available)
+ if len(metrics.FormatCounts) > 0 {
+ b.writeString("Format Breakdown:\n")
+ formats := r.sortedMapKeys(metrics.FormatCounts)
+ for _, format := range formats {
+ count := metrics.FormatCounts[format]
+ b.writeString(fmt.Sprintf(shared.MetricsFmtFileCount, format, count))
+ }
+ }
+
+ // Processing time
+ b.writeString(fmt.Sprintf(shared.MetricsFmtProcessingTime, metrics.ProcessingTime.Truncate(time.Millisecond)))
+
+ return b.String()
+}
+
+// formatBasicReport formats a basic final report.
+func (r *Reporter) formatBasicReport(metrics ProcessingMetrics) string {
+ b := newReportBuilder()
+
+ b.writeString("=== Processing Complete ===\n")
+ b.writeString(
+ fmt.Sprintf(
+ "Total Files: %d (Processed: %d, Skipped: %d, Errors: %d)\n",
+ metrics.TotalFiles, metrics.ProcessedFiles, metrics.SkippedFiles, metrics.ErrorFiles,
+ ),
+ )
+
+ b.writeString(
+ fmt.Sprintf(
+ "Total Size: %s, Average Rate: %.1f files/sec\n",
+ r.formatBytes(metrics.ProcessedSize), metrics.FilesPerSecond,
+ ),
+ )
+
+ b.writeString(fmt.Sprintf(shared.MetricsFmtProcessingTime, metrics.ProcessingTime.Truncate(time.Millisecond)))
+
+ return b.String()
+}
+
+// formatVerboseReport formats a comprehensive final report.
+func (r *Reporter) formatVerboseReport(report ProfileReport) string {
+ b := newReportBuilder()
+
+ b.writeString("=== Comprehensive Processing Report ===\n\n")
+
+ r.writeSummarySection(b, report)
+ r.writeFormatBreakdown(b, report)
+ r.writePhaseBreakdown(b, report)
+ r.writeErrorBreakdown(b, report)
+ r.writeResourceUsage(b, report)
+ r.writeFileSizeStats(b, report)
+ r.writeRecommendations(b, report)
+
+ return b.String()
+}
+
+// writeSummarySection writes the summary section of the verbose report.
+//
+//goland:noinspection ALL
+func (r *Reporter) writeSummarySection(b *reportBuilder, report ProfileReport) {
+ metrics := report.Summary
+
+ b.writeString("SUMMARY:\n")
+ b.fprintf(
+ " Files: %d total (%d processed, %d skipped, %d errors)\n",
+ metrics.TotalFiles, metrics.ProcessedFiles, metrics.SkippedFiles, metrics.ErrorFiles,
+ )
+ b.fprintf(
+ " Size: %s processed (avg: %s per file)\n",
+ r.formatBytes(metrics.ProcessedSize), r.formatBytes(int64(metrics.AverageFileSize)),
+ )
+ b.fprintf(
+ " Time: %v (%.1f files/sec, %.1f MB/sec)\n",
+ metrics.ProcessingTime.Truncate(time.Millisecond),
+ metrics.FilesPerSecond, metrics.BytesPerSecond/float64(shared.BytesPerMB),
+ )
+ b.fprintf(" Performance Index: %.1f\n", report.PerformanceIndex)
+}
+
+// writeFormatBreakdown writes the format breakdown section.
+func (r *Reporter) writeFormatBreakdown(b *reportBuilder, report ProfileReport) {
+ if len(report.FormatBreakdown) == 0 {
+ return
+ }
+
+ b.writeString("\nFORMAT BREAKDOWN:\n")
+ formats := make([]string, 0, len(report.FormatBreakdown))
+ for format := range report.FormatBreakdown {
+ formats = append(formats, format)
+ }
+ sort.Strings(formats)
+
+ for _, format := range formats {
+ formatMetrics := report.FormatBreakdown[format]
+ b.fprintf(shared.MetricsFmtFileCount, format, formatMetrics.Count)
+ }
+}
+
+// writePhaseBreakdown writes the phase timing breakdown section.
+func (r *Reporter) writePhaseBreakdown(b *reportBuilder, report ProfileReport) {
+ if len(report.PhaseBreakdown) == 0 {
+ return
+ }
+
+ b.writeString("\nPHASE BREAKDOWN:\n")
+ phases := []string{
+ shared.MetricsPhaseCollection,
+ shared.MetricsPhaseProcessing,
+ shared.MetricsPhaseWriting,
+ shared.MetricsPhaseFinalize,
+ }
+ for _, phase := range phases {
+ if phaseMetrics, exists := report.PhaseBreakdown[phase]; exists {
+ b.fprintf(
+ " %s: %v (%.1f%%)\n",
+ cases.Title(language.English).String(phase),
+ phaseMetrics.TotalTime.Truncate(time.Millisecond),
+ phaseMetrics.Percentage,
+ )
+ }
+ }
+}
+
+// writeErrorBreakdown writes the error breakdown section.
+func (r *Reporter) writeErrorBreakdown(b *reportBuilder, report ProfileReport) {
+ if len(report.ErrorBreakdown) == 0 {
+ return
+ }
+
+ b.writeString("\nERROR BREAKDOWN:\n")
+ errors := r.sortedMapKeys(report.ErrorBreakdown)
+ for _, errorType := range errors {
+ count := report.ErrorBreakdown[errorType]
+ b.fprintf(" %s: %d occurrences\n", errorType, count)
+ }
+}
+
+// writeResourceUsage writes the resource usage section.
+func (r *Reporter) writeResourceUsage(b *reportBuilder, report ProfileReport) {
+ metrics := report.Summary
+ b.writeString("\nRESOURCE USAGE:\n")
+ b.fprintf(
+ " Memory: %dMB current, %dMB peak\n",
+ metrics.CurrentMemoryMB, metrics.PeakMemoryMB,
+ )
+ b.fprintf(
+ " Concurrency: %d current, %d max, %d goroutines\n",
+ metrics.CurrentConcurrency, metrics.MaxConcurrency, metrics.GoroutineCount,
+ )
+}
+
+// writeFileSizeStats writes the file size statistics section.
+func (r *Reporter) writeFileSizeStats(b *reportBuilder, report ProfileReport) {
+ metrics := report.Summary
+ if metrics.ProcessedFiles == 0 {
+ return
+ }
+
+ b.writeString("\nFILE SIZE STATISTICS:\n")
+ b.fprintf(
+ " Range: %s - %s\n",
+ r.formatBytes(metrics.SmallestFile), r.formatBytes(metrics.LargestFile),
+ )
+ b.fprintf(" Average: %s\n", r.formatBytes(int64(metrics.AverageFileSize)))
+}
+
+// writeRecommendations writes the recommendations section.
+func (r *Reporter) writeRecommendations(b *reportBuilder, report ProfileReport) {
+ if len(report.Recommendations) == 0 {
+ return
+ }
+
+ b.writeString("\nRECOMMENDATIONS:\n")
+ for i, rec := range report.Recommendations {
+ b.fprintf(" %d. %s\n", i+1, rec)
+ }
+}
+
+// formatBytes formats byte counts in human-readable format.
+func (r *Reporter) formatBytes(bytes int64) string {
+ if bytes == 0 {
+ return "0B"
+ }
+
+ if bytes < shared.BytesPerKB {
+ return fmt.Sprintf(shared.MetricsFmtBytesShort, bytes)
+ }
+
+ exp := 0
+ for n := bytes / shared.BytesPerKB; n >= shared.BytesPerKB; n /= shared.BytesPerKB {
+ exp++
+ }
+
+ divisor := int64(1)
+ for i := 0; i < exp+1; i++ {
+ divisor *= shared.BytesPerKB
+ }
+
+ return fmt.Sprintf(shared.MetricsFmtBytesHuman, float64(bytes)/float64(divisor), "KMGTPE"[exp])
+}
+
+// sortedMapKeys returns sorted keys from a map for consistent output.
+func (r *Reporter) sortedMapKeys(m map[string]int64) []string {
+ keys := make([]string, 0, len(m))
+ for k := range m {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ return keys
+}
+
+// QuickStats returns a quick one-line status suitable for progress bars.
+func (r *Reporter) QuickStats() string {
+ if r == nil || r.collector == nil {
+ return "0/0 files"
+ }
+
+ metrics := r.collector.CurrentMetrics()
+
+ status := fmt.Sprintf("%d/%d files", metrics.ProcessedFiles, metrics.TotalFiles)
+ if metrics.FilesPerSecond > 0 {
+ status += fmt.Sprintf(" (%.1f/s)", metrics.FilesPerSecond)
+ }
+
+ if metrics.ErrorFiles > 0 {
+ if r.colors {
+ status += fmt.Sprintf(" \033[31m%d errors\033[0m", metrics.ErrorFiles)
+ } else {
+ status += fmt.Sprintf(" %d errors", metrics.ErrorFiles)
+ }
+ }
+
+ return status
+}
diff --git a/metrics/reporter_test.go b/metrics/reporter_test.go
new file mode 100644
index 0000000..858ccf0
--- /dev/null
+++ b/metrics/reporter_test.go
@@ -0,0 +1,518 @@
+package metrics
+
+import (
+ "errors"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/ivuorinen/gibidify/shared"
+)
+
+func TestNewReporter(t *testing.T) {
+ collector := NewCollector()
+ reporter := NewReporter(collector, true, true)
+
+ if reporter == nil {
+ t.Fatal("NewReporter returned nil")
+ }
+
+ if reporter.collector != collector {
+ t.Error("Reporter collector not set correctly")
+ }
+
+ if !reporter.verbose {
+ t.Error("Verbose flag not set correctly")
+ }
+
+ if !reporter.colors {
+ t.Error("Colors flag not set correctly")
+ }
+}
+
+func TestReportProgressBasic(t *testing.T) {
+ collector := NewCollector()
+ reporter := NewReporter(collector, false, false)
+
+ // Add some test data
+ result := FileProcessingResult{
+ FilePath: shared.TestPathTestFileGo,
+ FileSize: 1024,
+ Success: true,
+ Format: "go",
+ }
+ collector.RecordFileProcessed(result)
+
+ // Wait to ensure FilesPerSecond calculation
+ time.Sleep(10 * time.Millisecond)
+
+ progress := reporter.ReportProgress()
+
+ if !strings.Contains(progress, "Processed: 1 files") {
+ t.Errorf("Expected progress to contain processed files count, got: %s", progress)
+ }
+
+ if !strings.Contains(progress, "files/sec") {
+ t.Errorf("Expected progress to contain files/sec, got: %s", progress)
+ }
+}
+
+func TestReportProgressWithErrors(t *testing.T) {
+ collector := NewCollector()
+ reporter := NewReporter(collector, false, false)
+
+ // Add successful file
+ successResult := FileProcessingResult{
+ FilePath: "/test/success.go",
+ FileSize: 1024,
+ Success: true,
+ Format: "go",
+ }
+ collector.RecordFileProcessed(successResult)
+
+ // Add error file
+ errorResult := FileProcessingResult{
+ FilePath: shared.TestPathTestErrorGo,
+ FileSize: 512,
+ Success: false,
+ Error: errors.New(shared.TestErrSyntaxError),
+ }
+ collector.RecordFileProcessed(errorResult)
+
+ progress := reporter.ReportProgress()
+
+ if !strings.Contains(progress, "Processed: 1 files") {
+ t.Errorf("Expected progress to contain processed files count, got: %s", progress)
+ }
+
+ if !strings.Contains(progress, "Errors: 1") {
+ t.Errorf("Expected progress to contain error count, got: %s", progress)
+ }
+}
+
+func TestReportProgressWithSkipped(t *testing.T) {
+ collector := NewCollector()
+ reporter := NewReporter(collector, false, false)
+
+ // Add successful file
+ successResult := FileProcessingResult{
+ FilePath: "/test/success.go",
+ FileSize: 1024,
+ Success: true,
+ Format: "go",
+ }
+ collector.RecordFileProcessed(successResult)
+
+ // Add skipped file
+ skippedResult := FileProcessingResult{
+ FilePath: "/test/binary.exe",
+ FileSize: 2048,
+ Success: false,
+ Skipped: true,
+ SkipReason: "binary file",
+ }
+ collector.RecordFileProcessed(skippedResult)
+
+ progress := reporter.ReportProgress()
+
+ if !strings.Contains(progress, "Skipped: 1") {
+ t.Errorf("Expected progress to contain skipped count, got: %s", progress)
+ }
+}
+
+func TestReportProgressVerbose(t *testing.T) {
+ collector := NewCollector()
+ reporter := NewReporter(collector, true, false)
+
+ // Add test data
+ files := []FileProcessingResult{
+ {FilePath: shared.TestPathTestFile1Go, FileSize: 1000, Success: true, Format: "go"},
+ {FilePath: shared.TestPathTestFile2JS, FileSize: 2000, Success: true, Format: "js"},
+ {FilePath: "/test/file3.py", FileSize: 1500, Success: true, Format: "py"},
+ }
+
+ for _, file := range files {
+ collector.RecordFileProcessed(file)
+ }
+
+ collector.RecordPhaseTime(shared.MetricsPhaseCollection, 50*time.Millisecond)
+ collector.RecordPhaseTime(shared.MetricsPhaseProcessing, 100*time.Millisecond)
+
+ progress := reporter.ReportProgress()
+
+ // Check for verbose content
+ if !strings.Contains(progress, "=== Processing Statistics ===") {
+ t.Error("Expected verbose header not found")
+ }
+
+ if !strings.Contains(progress, "Format Breakdown:") {
+ t.Error("Expected format breakdown not found")
+ }
+
+ if !strings.Contains(progress, "go: 1 files") {
+ t.Error("Expected go format count not found")
+ }
+
+ if !strings.Contains(progress, "Memory - Current:") {
+ t.Error("Expected memory information not found")
+ }
+
+ if !strings.Contains(progress, "Concurrency - Current:") {
+ t.Error("Expected concurrency information not found")
+ }
+}
+
+func TestReportFinalBasic(t *testing.T) {
+ collector := NewCollector()
+ reporter := NewReporter(collector, false, false)
+
+ // Add test data
+ files := []FileProcessingResult{
+ {FilePath: shared.TestPathTestFile1Go, FileSize: 1000, Success: true, Format: "go"},
+ {FilePath: shared.TestPathTestFile2JS, FileSize: 2000, Success: true, Format: "js"},
+ {
+ FilePath: shared.TestPathTestErrorPy,
+ FileSize: 500,
+ Success: false,
+ Error: errors.New(shared.TestErrSyntaxError),
+ },
+ }
+
+ for _, file := range files {
+ collector.RecordFileProcessed(file)
+ }
+
+ collector.Finish()
+ final := reporter.ReportFinal()
+
+ if !strings.Contains(final, "=== Processing Complete ===") {
+ t.Error("Expected completion header not found")
+ }
+
+ if !strings.Contains(final, "Total Files: 3") {
+ t.Error("Expected total files count not found")
+ }
+
+ if !strings.Contains(final, "Processed: 2") {
+ t.Error("Expected processed files count not found")
+ }
+
+ if !strings.Contains(final, "Errors: 1") {
+ t.Error("Expected error count not found")
+ }
+}
+
+func TestReportFinalVerbose(t *testing.T) {
+ collector := NewCollector()
+ reporter := NewReporter(collector, true, false)
+
+ // Add comprehensive test data
+ files := []FileProcessingResult{
+ {FilePath: shared.TestPathTestFile1Go, FileSize: 1000, Success: true, Format: "go"},
+ {FilePath: "/test/file2.go", FileSize: 2000, Success: true, Format: "go"},
+ {FilePath: "/test/file3.js", FileSize: 1500, Success: true, Format: "js"},
+ {
+ FilePath: shared.TestPathTestErrorPy,
+ FileSize: 500,
+ Success: false,
+ Error: errors.New(shared.TestErrSyntaxError),
+ },
+ {FilePath: "/test/skip.bin", FileSize: 3000, Success: false, Skipped: true, SkipReason: "binary"},
+ }
+
+ for _, file := range files {
+ collector.RecordFileProcessed(file)
+ }
+
+ collector.RecordPhaseTime(shared.MetricsPhaseCollection, 50*time.Millisecond)
+ collector.RecordPhaseTime(shared.MetricsPhaseProcessing, 150*time.Millisecond)
+ collector.RecordPhaseTime(shared.MetricsPhaseWriting, 25*time.Millisecond)
+
+ collector.Finish()
+ final := reporter.ReportFinal()
+
+ // Check comprehensive report sections
+ if !strings.Contains(final, "=== Comprehensive Processing Report ===") {
+ t.Error("Expected comprehensive header not found")
+ }
+
+ if !strings.Contains(final, "SUMMARY:") {
+ t.Error("Expected summary section not found")
+ }
+
+ if !strings.Contains(final, "FORMAT BREAKDOWN:") {
+ t.Error("Expected format breakdown section not found")
+ }
+
+ if !strings.Contains(final, "PHASE BREAKDOWN:") {
+ t.Error("Expected phase breakdown section not found")
+ }
+
+ if !strings.Contains(final, "ERROR BREAKDOWN:") {
+ t.Error("Expected error breakdown section not found")
+ }
+
+ if !strings.Contains(final, "RESOURCE USAGE:") {
+ t.Error("Expected resource usage section not found")
+ }
+
+ if !strings.Contains(final, "FILE SIZE STATISTICS:") {
+ t.Error("Expected file size statistics section not found")
+ }
+
+ if !strings.Contains(final, "RECOMMENDATIONS:") {
+ t.Error("Expected recommendations section not found")
+ }
+
+ // Check specific values
+ if !strings.Contains(final, "go: 2 files") {
+ t.Error("Expected go format count not found")
+ }
+
+ if !strings.Contains(final, "js: 1 files") {
+ t.Error("Expected js format count not found")
+ }
+
+ if !strings.Contains(final, "syntax error: 1 occurrences") {
+ t.Error("Expected error count not found")
+ }
+}
+
+func TestFormatBytes(t *testing.T) {
+ collector := NewCollector()
+ reporter := NewReporter(collector, false, false)
+
+ testCases := []struct {
+ bytes int64
+ expected string
+ }{
+ {0, "0B"},
+ {512, "512B"},
+ {1024, "1.0KB"},
+ {1536, "1.5KB"},
+ {1024 * 1024, "1.0MB"},
+ {1024 * 1024 * 1024, "1.0GB"},
+ {5 * 1024 * 1024, "5.0MB"},
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.expected, func(t *testing.T) {
+ result := reporter.formatBytes(tc.bytes)
+ if result != tc.expected {
+ t.Errorf("formatBytes(%d) = %s, want %s", tc.bytes, result, tc.expected)
+ }
+ })
+ }
+}
+
+func TestGetQuickStats(t *testing.T) {
+ collector := NewCollector()
+ reporter := NewReporter(collector, false, false)
+
+ // Add test data
+ files := []FileProcessingResult{
+ {FilePath: shared.TestPathTestFile1Go, FileSize: 1000, Success: true, Format: "go"},
+ {FilePath: shared.TestPathTestFile2JS, FileSize: 2000, Success: true, Format: "js"},
+ {
+ FilePath: shared.TestPathTestErrorPy,
+ FileSize: 500,
+ Success: false,
+ Error: errors.New(shared.TestErrTestErrorMsg),
+ },
+ }
+
+ for _, file := range files {
+ collector.RecordFileProcessed(file)
+ }
+
+ // Wait to ensure rate calculation
+ time.Sleep(10 * time.Millisecond)
+
+ stats := reporter.QuickStats()
+
+ if !strings.Contains(stats, "2/3 files") {
+ t.Errorf("Expected processed/total files, got: %s", stats)
+ }
+
+ if !strings.Contains(stats, "/s)") {
+ t.Errorf("Expected rate information, got: %s", stats)
+ }
+
+ if !strings.Contains(stats, "1 errors") {
+ t.Errorf("Expected error count, got: %s", stats)
+ }
+}
+
+func TestGetQuickStatsWithColors(t *testing.T) {
+ collector := NewCollector()
+ reporter := NewReporter(collector, false, true)
+
+ // Add error file
+ errorResult := FileProcessingResult{
+ FilePath: shared.TestPathTestErrorGo,
+ FileSize: 512,
+ Success: false,
+ Error: errors.New(shared.TestErrTestErrorMsg),
+ }
+ collector.RecordFileProcessed(errorResult)
+
+ stats := reporter.QuickStats()
+
+ // Should contain ANSI color codes for errors
+ if !strings.Contains(stats, "\033[31m") {
+ t.Errorf("Expected color codes for errors, got: %s", stats)
+ }
+
+ if !strings.Contains(stats, "\033[0m") {
+ t.Errorf("Expected color reset code, got: %s", stats)
+ }
+}
+
+func TestReporterEmptyData(t *testing.T) {
+ collector := NewCollector()
+ reporter := NewReporter(collector, false, false)
+
+ // Test with no data
+ progress := reporter.ReportProgress()
+ if !strings.Contains(progress, "Processed: 0 files") {
+ t.Errorf("Expected empty progress report, got: %s", progress)
+ }
+
+ final := reporter.ReportFinal()
+ if !strings.Contains(final, "Total Files: 0") {
+ t.Errorf("Expected empty final report, got: %s", final)
+ }
+
+ stats := reporter.QuickStats()
+ if !strings.Contains(stats, "0/0 files") {
+ t.Errorf("Expected empty stats, got: %s", stats)
+ }
+}
+
+// setupBenchmarkReporter creates a collector with test data for benchmarking.
+func setupBenchmarkReporter(fileCount int, verbose, colors bool) *Reporter {
+ collector := NewCollector()
+
+ // Add a mix of successful, failed, and skipped files
+ for i := 0; i < fileCount; i++ {
+ var result FileProcessingResult
+ switch i % 10 {
+ case 0:
+ result = FileProcessingResult{
+ FilePath: shared.TestPathTestErrorGo,
+ FileSize: 500,
+ Success: false,
+ Error: errors.New(shared.TestErrTestErrorMsg),
+ }
+ case 1:
+ result = FileProcessingResult{
+ FilePath: "/test/binary.exe",
+ FileSize: 2048,
+ Success: false,
+ Skipped: true,
+ SkipReason: "binary file",
+ }
+ default:
+ formats := []string{"go", "js", "py", "ts", "rs", "java", "cpp", "rb"}
+ result = FileProcessingResult{
+ FilePath: shared.TestPathTestFileGo,
+ FileSize: int64(1000 + i*100),
+ Success: true,
+ Format: formats[i%len(formats)],
+ }
+ }
+ collector.RecordFileProcessed(result)
+ }
+
+ collector.RecordPhaseTime(shared.MetricsPhaseCollection, 50*time.Millisecond)
+ collector.RecordPhaseTime(shared.MetricsPhaseProcessing, 150*time.Millisecond)
+ collector.RecordPhaseTime(shared.MetricsPhaseWriting, 25*time.Millisecond)
+
+ return NewReporter(collector, verbose, colors)
+}
+
+func BenchmarkReporterQuickStats(b *testing.B) {
+ benchmarks := []struct {
+ name string
+ files int
+ }{
+ {"10files", 10},
+ {"100files", 100},
+ {"1000files", 1000},
+ }
+
+ for _, bm := range benchmarks {
+ b.Run(bm.name, func(b *testing.B) {
+ reporter := setupBenchmarkReporter(bm.files, false, false)
+ b.ResetTimer()
+
+ for b.Loop() {
+ _ = reporter.QuickStats()
+ }
+ })
+ }
+}
+
+func BenchmarkReporterReportProgress(b *testing.B) {
+ benchmarks := []struct {
+ name string
+ files int
+ verbose bool
+ }{
+ {"basic_10files", 10, false},
+ {"basic_100files", 100, false},
+ {"verbose_10files", 10, true},
+ {"verbose_100files", 100, true},
+ }
+
+ for _, bm := range benchmarks {
+ b.Run(bm.name, func(b *testing.B) {
+ reporter := setupBenchmarkReporter(bm.files, bm.verbose, false)
+ b.ResetTimer()
+
+ for b.Loop() {
+ _ = reporter.ReportProgress()
+ }
+ })
+ }
+}
+
+func BenchmarkReporterReportFinal(b *testing.B) {
+ benchmarks := []struct {
+ name string
+ files int
+ verbose bool
+ }{
+ {"basic_10files", 10, false},
+ {"basic_100files", 100, false},
+ {"basic_1000files", 1000, false},
+ {"verbose_10files", 10, true},
+ {"verbose_100files", 100, true},
+ {"verbose_1000files", 1000, true},
+ }
+
+ for _, bm := range benchmarks {
+ b.Run(bm.name, func(b *testing.B) {
+ reporter := setupBenchmarkReporter(bm.files, bm.verbose, false)
+ reporter.collector.Finish()
+ b.ResetTimer()
+
+ for b.Loop() {
+ _ = reporter.ReportFinal()
+ }
+ })
+ }
+}
+
+func BenchmarkFormatBytes(b *testing.B) {
+ collector := NewCollector()
+ reporter := NewReporter(collector, false, false)
+
+ sizes := []int64{0, 512, 1024, 1024 * 1024, 1024 * 1024 * 1024}
+
+ for b.Loop() {
+ for _, size := range sizes {
+ _ = reporter.formatBytes(size)
+ }
+ }
+}
diff --git a/metrics/types.go b/metrics/types.go
new file mode 100644
index 0000000..4361679
--- /dev/null
+++ b/metrics/types.go
@@ -0,0 +1,134 @@
+// Package metrics provides comprehensive processing statistics and profiling capabilities.
+package metrics
+
+import (
+ "sync"
+ "time"
+)
+
+// ProcessingMetrics provides comprehensive processing statistics.
+type ProcessingMetrics struct {
+ // File processing metrics
+ TotalFiles int64 `json:"total_files"`
+ ProcessedFiles int64 `json:"processed_files"`
+ SkippedFiles int64 `json:"skipped_files"`
+ ErrorFiles int64 `json:"error_files"`
+ LastUpdated time.Time `json:"last_updated"`
+
+ // Size metrics
+ TotalSize int64 `json:"total_size_bytes"`
+ ProcessedSize int64 `json:"processed_size_bytes"`
+ AverageFileSize float64 `json:"average_file_size_bytes"`
+ LargestFile int64 `json:"largest_file_bytes"`
+ SmallestFile int64 `json:"smallest_file_bytes"`
+
+ // Performance metrics
+ StartTime time.Time `json:"start_time"`
+ EndTime time.Time `json:"end_time,omitempty"`
+ ProcessingTime time.Duration `json:"processing_duration"`
+ FilesPerSecond float64 `json:"files_per_second"`
+ BytesPerSecond float64 `json:"bytes_per_second"`
+
+ // Memory and resource metrics
+ PeakMemoryMB int64 `json:"peak_memory_mb"`
+ CurrentMemoryMB int64 `json:"current_memory_mb"`
+ GoroutineCount int `json:"goroutine_count"`
+
+ // Format specific metrics
+ FormatCounts map[string]int64 `json:"format_counts"`
+ ErrorCounts map[string]int64 `json:"error_counts"`
+
+ // Concurrency metrics
+ MaxConcurrency int `json:"max_concurrency"`
+ CurrentConcurrency int32 `json:"current_concurrency"`
+
+ // Phase timings
+ PhaseTimings map[string]time.Duration `json:"phase_timings"`
+}
+
+// Collector collects and manages processing metrics.
+type Collector struct {
+ metrics ProcessingMetrics
+ mu sync.RWMutex
+ startTime time.Time
+ lastUpdate time.Time
+
+ // Atomic counters for high-concurrency access
+ totalFiles int64
+ processedFiles int64
+ skippedFiles int64
+ errorFiles int64
+ totalSize int64
+ processedSize int64
+ largestFile int64
+ smallestFile int64 // Using max int64 as initial value to track minimum
+
+ // Concurrency tracking
+ concurrency int32
+ peakConcurrency int32
+
+ // Format and error tracking with mutex protection
+ formatCounts map[string]int64
+ errorCounts map[string]int64
+
+ // Phase timing tracking
+ phaseTimings map[string]time.Duration
+}
+
+// FileProcessingResult represents the result of processing a single file.
+type FileProcessingResult struct {
+ FilePath string `json:"file_path"`
+ FileSize int64 `json:"file_size"`
+ Format string `json:"format"`
+ ProcessingTime time.Duration `json:"processing_time"`
+ Success bool `json:"success"`
+ Error error `json:"error,omitempty"`
+ Skipped bool `json:"skipped"`
+ SkipReason string `json:"skip_reason,omitempty"`
+}
+
+// ProfileReport represents a comprehensive profiling report.
+type ProfileReport struct {
+ Summary ProcessingMetrics `json:"summary"`
+ TopLargestFiles []FileInfo `json:"top_largest_files"`
+ TopSlowestFiles []FileInfo `json:"top_slowest_files"`
+ FormatBreakdown map[string]FormatMetrics `json:"format_breakdown"`
+ ErrorBreakdown map[string]int64 `json:"error_breakdown"`
+ HourlyStats []HourlyProcessingStats `json:"hourly_stats,omitempty"`
+ PhaseBreakdown map[string]PhaseMetrics `json:"phase_breakdown"`
+ PerformanceIndex float64 `json:"performance_index"`
+ Recommendations []string `json:"recommendations"`
+}
+
+// FileInfo represents information about a processed file.
+type FileInfo struct {
+ Path string `json:"path"`
+ Size int64 `json:"size"`
+ ProcessingTime time.Duration `json:"processing_time"`
+ Format string `json:"format"`
+}
+
+// FormatMetrics represents metrics for a specific file format.
+type FormatMetrics struct {
+ Count int64 `json:"count"`
+ TotalSize int64 `json:"total_size"`
+ AverageSize float64 `json:"average_size"`
+ TotalProcessingTime time.Duration `json:"total_processing_time"`
+ AverageProcessingTime time.Duration `json:"average_processing_time"`
+}
+
+// HourlyProcessingStats represents processing statistics for an hour.
+type HourlyProcessingStats struct {
+ Hour time.Time `json:"hour"`
+ FilesProcessed int64 `json:"files_processed"`
+ BytesProcessed int64 `json:"bytes_processed"`
+ AverageRate float64 `json:"average_rate"`
+}
+
+// PhaseMetrics represents timing metrics for processing phases.
+type PhaseMetrics struct {
+ TotalTime time.Duration `json:"total_time"`
+ Count int64 `json:"count"`
+ AverageTime time.Duration `json:"average_time"`
+ Percentage float64 `json:"percentage_of_total"`
+}
diff --git a/revive.toml b/revive.toml
index e5cf9de..8209042 100644
--- a/revive.toml
+++ b/revive.toml
@@ -1,58 +1,194 @@
-# revive configuration for gibidify
-# See https://revive.run/ for more information
+# Revive configuration for gibidify project
+# https://revive.run/
+# Migrated from golangci-lint v2.4.0 configuration
+# NOTE: For comprehensive security scanning, also run: gosec ./...
-# Global settings
-ignoreGeneratedHeader = false
-severity = "warning"
+# Global configuration
+ignoreGeneratedHeader = true
+severity = "error"
confidence = 0.8
errorCode = 1
-warningCode = 0
+warningCode = 2
+
+# Enable all rules initially then selectively disable/configure
+enableAllRules = true
+
+# ============================================================================
+# ESSENTIAL ERROR DETECTION (from errcheck, govet, ineffassign, staticcheck, unused)
+# ============================================================================
+
+# Error handling rules (from errcheck)
+[rule.unhandled-error]
-# Enable all rules by default, then selectively disable or configure
-[rule.blank-imports]
-[rule.context-as-argument]
-[rule.context-keys-type]
-[rule.dot-imports]
[rule.error-return]
[rule.error-strings]
[rule.error-naming]
-[rule.exported]
-[rule.if-return]
-[rule.increment-decrement]
-[rule.var-naming]
-[rule.var-declaration]
-[rule.package-comments]
+[rule.errorf]
+
+# Code correctness rules (from govet)
+[rule.atomic]
+[rule.bool-literal-in-expr]
+[rule.context-as-argument]
+[rule.context-keys-type]
+[rule.dot-imports]
[rule.range]
[rule.receiver-naming]
[rule.time-naming]
[rule.unexported-return]
-[rule.indent-error-flow]
-[rule.errorf]
-[rule.empty-block]
-[rule.superfluous-else]
-[rule.unused-parameter]
[rule.unreachable-code]
[rule.redefines-builtin-id]
+[rule.struct-tag]
+[rule.modifies-value-receiver]
+[rule.constant-logical-expr]
+[rule.unconditional-recursion]
+[rule.identical-branches]
+[rule.defer]
+ arguments = [["call-chain", "loop", "method-call", "recover", "immediate-recover", "return"]]
-# Configure specific rules
-[rule.line-length-limit]
- arguments = [120]
- Exclude = ["**/*_test.go"]
+# Unused code detection (from unused, ineffassign)
+[rule.unused-parameter]
+[rule.unused-receiver]
+ disabled = true # Too strict for interface methods
+[rule.blank-imports]
-[rule.function-length]
- arguments = [50, 100]
- Exclude = ["**/*_test.go"]
+# ============================================================================
+# BUG PREVENTION (from bodyclose, dupl, gochecknoinits, goconst, gocritic, gosec, misspell, unparam, usestdlibvars)
+# ============================================================================
-[rule.max-public-structs]
- arguments = [10]
+# Security rules (from gosec and additional security checks)
+[rule.file-header]
+[rule.datarace]
+[rule.unchecked-type-assertion]
+
+# Code duplication (from dupl - threshold: 150)
+[rule.duplicated-imports]
+
+# Constants (from goconst - min-len: 3, min-occurrences: 3)
+[rule.add-constant]
+ disabled = true # Complex configuration, use goconst separately if needed
+
+# Init functions (from gochecknoinits)
+# Note: main.go and config package are excluded in original
+
+# Comprehensive analysis (from gocritic)
+[rule.if-return]
+[rule.early-return]
+[rule.indent-error-flow]
+[rule.superfluous-else]
+[rule.confusing-naming]
+[rule.get-return]
+[rule.modifies-parameter]
+[rule.confusing-results]
+[rule.deep-exit]
+[rule.flag-parameter]
+ disabled = true # Too strict for CLI applications
+[rule.unnecessary-stmt]
+[rule.empty-block]
+[rule.empty-lines]
+
+# Standard library usage (from usestdlibvars)
+[rule.use-any]
+
+# ============================================================================
+# PERFORMANCE (from prealloc, perfsprint)
+# ============================================================================
+
+[rule.optimize-operands-order]
+[rule.string-format]
+[rule.string-of-int]
+[rule.range-val-in-closure]
+[rule.range-val-address]
+[rule.waitgroup-by-value]
+
+# ============================================================================
+# CODE COMPLEXITY (from gocognit: 15, gocyclo: 15, nestif: 5)
+# ============================================================================
[rule.cognitive-complexity]
- arguments = [15]
- Exclude = ["**/*_test.go"]
+ arguments = [20] # Increased for test files which can be more complex
[rule.cyclomatic]
- arguments = [15]
- Exclude = ["**/*_test.go"]
+ arguments = [15]
+
+[rule.max-control-nesting]
+ arguments = [5]
+
+[rule.function-length]
+ arguments = [100, 0] # statements, lines
+
+[rule.function-result-limit]
+ arguments = [10]
[rule.argument-limit]
- arguments = [5]
+ arguments = [8]
+
+# ============================================================================
+# TESTING (from thelper, errorlint)
+# ============================================================================
+
+# Testing rules are covered by error handling and naming convention rules above
+
+# ============================================================================
+# NAMING CONVENTIONS (from predeclared, varnamelen)
+# ============================================================================
+
+[rule.var-naming]
+ arguments = [["ID", "URL", "API", "HTTP", "JSON", "XML", "UI", "URI", "SQL", "SSH", "EOF", "LHS", "RHS", "TTL", "OK", "UUID", "VM"]]
+
+[rule.exported]
+
+[rule.package-comments]
+ severity = "error"
+
+# Variable name length (from varnamelen)
+# Original config: min-length: 3, with specific ignored names
+# This is partially covered by var-naming rule
+
+# ============================================================================
+# IMPORT MANAGEMENT (from depguard, importas)
+# ============================================================================
+
+[rule.imports-blacklist]
+ arguments = [
+ "io/ioutil", # Use os and io packages instead
+ "github.com/pkg/errors", # Use standard errors package
+ "github.com/golang/protobuf" # Use google.golang.org/protobuf
+ ]
+
+[rule.import-alias-naming]
+ arguments = ["^[a-z][a-z0-9]*$"]
+
+[rule.import-shadowing]
+
+# ============================================================================
+# FORMAT (from nlreturn, lll: 120, godot, tagalign, whitespace)
+# ============================================================================
+
+[rule.line-length-limit]
+ arguments = [120]
+
+[rule.comment-spacings]
+ arguments = ["godot"]
+
+# Note: nlreturn, tagalign, whitespace don't have direct revive equivalents
+
+# ============================================================================
+# ADDITIONAL REVIVE-SPECIFIC RULES
+# ============================================================================
+
+[rule.increment-decrement]
+[rule.var-declaration]
+[rule.useless-break]
+[rule.call-to-gc]
+ disabled = true
+[rule.max-public-structs]
+ disabled = true # Too restrictive for this project
+
+# ============================================================================
+# EXCLUSIONS (from original golangci-lint config)
+# ============================================================================
+# Note: Exclusions in revive are handled differently
+# - main.go: gochecknoinits excluded
+# - _test.go: varnamelen excluded for variable names
+# - internal/config/: gochecknoinits excluded
+# These will need to be handled via file-specific ignores or code comments
diff --git a/scripts/gosec.sh b/scripts/gosec.sh
new file mode 100755
index 0000000..d71a2d7
--- /dev/null
+++ b/scripts/gosec.sh
@@ -0,0 +1,145 @@
+#!/usr/bin/env bash
+
+# Gosec security scanner script for individual Go files
+# Runs gosec on each Go directory and reports issues per file
+
+set -euo pipefail
+
+# Colors for output
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[1;33m'
+BLUE='\033[0;34m'
+NC='\033[0m' # No Color
+
+# If NO_COLOR is set, disable colors
+if [[ -n "${NO_COLOR:-}" ]]; then
+ RED=''
+ GREEN=''
+ YELLOW=''
+ BLUE=''
+ NC=''
+fi
+
+# Function to print status
+print_status() {
+ local msg="$1"
+ echo -e "${BLUE}[INFO]${NC} $msg"
+ return 0
+}
+
+print_warning() {
+ local msg="$1"
+ echo -e "${YELLOW}[WARN]${NC} $msg" >&2
+ return 0
+}
+
+print_error() {
+ local msg="$1"
+ echo -e "${RED}[ERROR]${NC} $msg" >&2
+ return 0
+}
+
+print_success() {
+ local msg="$1"
+ echo -e "${GREEN}[SUCCESS]${NC} $msg"
+ return 0
+}
+
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
+
+cd "$PROJECT_ROOT" || {
+ print_error "Failed to change directory to $PROJECT_ROOT"
+ exit 1
+}
+
+# Check if gosec is available
+if ! command -v gosec &>/dev/null; then
+ print_error "gosec not found. Please install it first:"
+ print_error "go install github.com/securego/gosec/v2/cmd/gosec@latest"
+ exit 1
+fi
+
+# Check if jq is available
+if ! command -v jq &>/dev/null; then
+ print_error "jq not found. Please install it first:"
+ print_error "brew install jq # on macOS"
+ print_error "apt-get install jq # on Ubuntu/Debian"
+ exit 1
+fi
+
+# Get all Go files and unique directories
+GO_FILES=$(find . -name "*.go" -not -path "./.*" | sort)
+TOTAL_FILES=$(echo "$GO_FILES" | wc -l | tr -d ' ')
+
+DIRECTORIES=$(echo "$GO_FILES" | xargs -n1 dirname | sort -u)
+TOTAL_DIRS=$(echo "$DIRECTORIES" | wc -l | tr -d ' ')
+
+print_status "Found $TOTAL_FILES Go files in $TOTAL_DIRS directories"
+print_status "Running gosec security scan..."
+
+ISSUES_FOUND=0
+FILES_WITH_ISSUES=0
+CURRENT_DIR=0
+
+# Create a temporary directory for reports
+TEMP_DIR=$(mktemp -d)
+trap 'rm -rf "$TEMP_DIR"' EXIT
+
+# Process each directory
+while IFS= read -r dir; do
+ CURRENT_DIR=$((CURRENT_DIR + 1))
+ echo -ne "\r${BLUE}[PROGRESS]${NC} Scanning $CURRENT_DIR/$TOTAL_DIRS: $dir "
+
+ # Run gosec on the directory
+ REPORT_FILE="$TEMP_DIR/$(echo "$dir" | tr '/' '_' | tr '.' '_').json"
+ if gosec -fmt=json "$dir" >"$REPORT_FILE" 2>/dev/null; then
+ # Check for issues in all files in this directory
+ ISSUES=$(jq -r '.Issues // [] | length' "$REPORT_FILE" 2>/dev/null || echo "0")
+
+ if [[ "$ISSUES" -gt 0 ]]; then
+ echo # New line after progress
+ print_warning "Found $ISSUES security issue(s) in directory $dir:"
+
+ # Group issues by file and display them
+ jq -r '.Issues[] | "\(.file)|\(.rule_id)|\(.details)|\(.line)"' "$REPORT_FILE" 2>/dev/null | while IFS='|' read -r file rule details line; do
+ if [[ -n "$file" ]]; then
+ # Only count each file once
+ if ! grep -q "$file" "$TEMP_DIR/processed_files.txt" 2>/dev/null; then
+ echo "$file" >>"$TEMP_DIR/processed_files.txt"
+ FILES_WITH_ISSUES=$((FILES_WITH_ISSUES + 1))
+ fi
+ echo " $file:$line → $rule: $details"
+ fi
+ done
+
+ ISSUES_FOUND=$((ISSUES_FOUND + ISSUES))
+ echo
+ fi
+ else
+ echo # New line after progress
+ print_error "Failed to scan directory $dir"
+ fi
+done <<<"$DIRECTORIES"
+
+echo # Final new line after progress
+
+# Count actual files with issues
+if [[ -f "$TEMP_DIR/processed_files.txt" ]]; then
+ FILES_WITH_ISSUES=$(wc -l <"$TEMP_DIR/processed_files.txt" | tr -d ' ')
+fi
+
+# Summary
+print_status "Gosec scan completed!"
+print_status "Directories scanned: $TOTAL_DIRS"
+print_status "Files scanned: $TOTAL_FILES"
+
+if [[ $ISSUES_FOUND -eq 0 ]]; then
+ print_success "No security issues found! 🎉"
+ exit 0
+else
+ print_warning "Found $ISSUES_FOUND security issue(s) in $FILES_WITH_ISSUES file(s)"
+ print_status "Review the issues above and fix them before proceeding"
+ exit 1
+fi
diff --git a/scripts/help.txt b/scripts/help.txt
index afb4db0..be07499 100644
--- a/scripts/help.txt
+++ b/scripts/help.txt
@@ -1,31 +1,24 @@
Available targets:
- install-tools - Install required linting and development tools
- lint - Run all linters (Go, EditorConfig, Makefile, shell, YAML)
- lint-fix - Run linters with auto-fix enabled
- lint-verbose - Run linters with verbose output
- test - Run tests
- test-coverage - Run tests with coverage output
- coverage - Run tests with coverage and generate HTML report
- build - Build the application
- clean - Clean build artifacts
- all - Run lint, test, and build
+ install-tools - Install required linting and development tools
+ lint - Run all linters (Go, Makefile, shell, YAML)
+ lint-fix - Run linters with auto-fix enabled
+ test - Run tests
+ coverage - Run tests with coverage
+ build - Build the application
+ clean - Clean build artifacts
+ all - Run lint, test, and build
Security targets:
- security - Run comprehensive security scan
- security-full - Run full security analysis with all tools
- vuln-check - Check for dependency vulnerabilities
-
-Dependency management:
- deps-check - Check for available dependency updates
- deps-update - Update all dependencies to latest versions
- deps-tidy - Clean up and verify dependencies
+ security - Run comprehensive security scan
+ security-full - Run full security analysis with all tools
+ vuln-check - Check for dependency vulnerabilities
Benchmark targets:
- build-benchmark - Build the benchmark binary
- benchmark - Run all benchmarks
- benchmark-collection - Run file collection benchmarks
- benchmark-processing - Run file processing benchmarks
- benchmark-concurrency - Run concurrency benchmarks
- benchmark-format - Run format benchmarks
+ build-benchmark - Build the benchmark binary
+ benchmark - Run all benchmarks
+ benchmark-collection - Run file collection benchmarks
+ benchmark-processing - Run file processing benchmarks
+ benchmark-concurrency - Run concurrency benchmarks
+ benchmark-format - Run format benchmarks
Run 'make ' to execute a specific target.
diff --git a/scripts/install-tools.sh b/scripts/install-tools.sh
index c8e409c..98a2862 100755
--- a/scripts/install-tools.sh
+++ b/scripts/install-tools.sh
@@ -1,30 +1,153 @@
-#!/bin/sh
-set -eu
+#!/usr/bin/env bash
+set -euo pipefail
-echo "Installing golangci-lint..."
-go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest
-echo "Installing gofumpt..."
-go install mvdan.cc/gofumpt@latest
-echo "Installing golines..."
-go install github.com/segmentio/golines@latest
-echo "Installing goimports..."
-go install golang.org/x/tools/cmd/goimports@latest
-echo "Installing staticcheck..."
-go install honnef.co/go/tools/cmd/staticcheck@latest
-echo "Installing gosec..."
-go install github.com/securego/gosec/v2/cmd/gosec@latest
-echo "Installing gocyclo..."
-go install github.com/fzipp/gocyclo/cmd/gocyclo@latest
-echo "Installing revive..."
-go install github.com/mgechev/revive@latest
-echo "Installing checkmake..."
-go install github.com/checkmake/checkmake/cmd/checkmake@latest
-echo "Installing shellcheck..."
-go install github.com/koalaman/shellcheck/cmd/shellcheck@latest
-echo "Installing shfmt..."
-go install mvdan.cc/sh/v3/cmd/shfmt@latest
-echo "Installing yamllint (Go-based)..."
-go install github.com/excilsploft/yamllint@latest
-echo "Installing editorconfig-checker..."
-go install github.com/editorconfig-checker/editorconfig-checker/cmd/editorconfig-checker@latest
-echo "All tools installed successfully!"
+# Colors for output
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[1;33m'
+BLUE='\033[0;34m'
+NC='\033[0m' # No Color
+
+# If NO_COLOR is set, disable colors
+if [[ -n "${NO_COLOR:-}" ]]; then
+ RED=''
+ GREEN=''
+ YELLOW=''
+ BLUE=''
+ NC=''
+fi
+
+# Function to print status
+print_status() {
+ local msg="$1"
+ echo -e "${BLUE}[INFO]${NC} $msg"
+ return 0
+}
+
+print_warning() {
+ local msg="$1"
+ echo -e "${YELLOW}[WARN]${NC} $msg" >&2
+ return 0
+}
+
+print_error() {
+ local msg="$1"
+ echo -e "${RED}[ERROR]${NC} $msg" >&2
+ return 0
+}
+
+print_success() {
+ local msg="$1"
+ echo -e "${GREEN}[SUCCESS]${NC} $msg"
+ return 0
+}
+
+# Check if required tools are installed
+check_dependencies() {
+ print_status "Checking dependencies..."
+
+ local missing_tools=()
+
+ if ! command -v go &>/dev/null; then
+ missing_tools+=("go")
+ fi
+
+ # Check that tools are installed:
+
+ if [[ ${#missing_tools[@]} -ne 0 ]]; then
+ print_error "Missing required tools: ${missing_tools[*]}"
+ print_error "Please install the missing tools and try again."
+ exit 1
+ fi
+
+ # Security tools
+
+ if ! command -v gosec &>/dev/null; then
+ print_warning "gosec not found, installing..."
+ go install github.com/securego/gosec/v2/cmd/gosec@v2.22.8
+ fi
+
+ if ! command -v govulncheck &>/dev/null; then
+ print_warning "govulncheck not found, installing..."
+ go install golang.org/x/vuln/cmd/govulncheck@v1.1.4
+ fi
+
+ # Linting tools
+
+ if ! command -v revive &>/dev/null; then
+ print_warning "revive not found, installing..."
+ go install github.com/mgechev/revive@v1.11.0
+ fi
+
+ if ! command -v gocyclo &>/dev/null; then
+ print_warning "gocyclo not found, installing..."
+ go install github.com/fzipp/gocyclo/cmd/gocyclo@v0.6.0
+ fi
+
+ if ! command -v checkmake &>/dev/null; then
+ print_warning "checkmake not found, installing..."
+ go install github.com/checkmake/checkmake/cmd/checkmake@v0.2.2
+ fi
+
+ if ! command -v eclint &>/dev/null; then
+ print_warning "eclint not found, installing..."
+ go install gitlab.com/greut/eclint/cmd/eclint@v0.5.1
+ fi
+
+ if ! command -v staticcheck &>/dev/null; then
+ print_warning "staticcheck not found, installing..."
+ go install honnef.co/go/tools/cmd/staticcheck@v0.6.1
+ fi
+
+ if ! command -v yamllint &>/dev/null; then
+ print_warning "yamllint not found, installing..."
+ go install mvdan.cc/yaml/cmd/yaml-lint@v2.4.0
+ fi
+
+ # Formatting tools
+
+ if ! command -v gofumpt &>/dev/null; then
+ print_warning "gofumpt not found, installing..."
+ go install mvdan.cc/gofumpt@v0.8.0
+ fi
+
+ if ! command -v goimports &>/dev/null; then
+ print_warning "goimports not found, installing..."
+ go install golang.org/x/tools/cmd/goimports@v0.36.0
+ fi
+
+ if ! command -v shfmt &>/dev/null; then
+ print_warning "shfmt not found, installing..."
+ go install mvdan.cc/sh/v3/cmd/shfmt@v3.12.0
+ fi
+
+ if ! command -v yamlfmt &>/dev/null; then
+ print_warning "yamlfmt not found, installing..."
+ go install github.com/google/yamlfmt/cmd/yamlfmt@v0.4.0
+ fi
+
+ print_success "All dependencies are available"
+ return 0
+}
+
+# ---
+
+# If this file is sourced, export the functions
+if [[ "${BASH_SOURCE[0]}" != "${0}" ]]; then
+ export -f check_dependencies print_error print_warning print_success print_status
+fi
+
+# if this file is executed, execute the function
+if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
+ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+ PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
+
+ cd "$PROJECT_ROOT" || {
+ echo "Failed to change directory to $PROJECT_ROOT"
+ exit 1
+ }
+
+ echo "Installing dev tools for gibidify..."
+
+ check_dependencies
+fi
diff --git a/scripts/lint-fix.sh b/scripts/lint-fix.sh
index 668b1f0..d6153f7 100755
--- a/scripts/lint-fix.sh
+++ b/scripts/lint-fix.sh
@@ -1,10 +1,27 @@
-#!/bin/sh
-set -eu
+#!/usr/bin/env bash
+
+# Enable strict error handling
+set -euo pipefail
+IFS=$'\n\t'
+shopt -s globstar nullglob
+
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
+
+cd "$PROJECT_ROOT" || {
+ echo "Failed to change directory to $PROJECT_ROOT"
+ exit 1
+}
+
+# shellcheck source=scripts/install-tools.sh
+source "$SCRIPT_DIR/install-tools.sh"
+# shellcheck source=scripts/security.sh
+source "$SCRIPT_DIR/security.sh"
+
+check_dependencies
echo "Running gofumpt..."
gofumpt -l -w .
-echo "Running golines..."
-golines -w -m 120 --base-formatter="gofumpt" --shorten-comments .
echo "Running goimports..."
goimports -w -local github.com/ivuorinen/gibidify .
echo "Running go fmt..."
@@ -12,14 +29,23 @@ go fmt ./...
echo "Running go mod tidy..."
go mod tidy
echo "Running shfmt formatting..."
-shfmt -w -i 0 -ci .
-echo "Running golangci-lint with --fix..."
-golangci-lint run --fix ./...
+shfmt -w -i 2 -ci .
+echo "Running revive linter..."
+revive -config revive.toml -formatter friendly -set_exit_status ./...
+echo "Running gosec security linter in parallel..."
+if ! run_gosec_parallel; then
+ echo "gosec found security issues"
+ exit 1
+fi
echo "Auto-fix completed. Running final lint check..."
-golangci-lint run ./...
-echo "Running revive..."
-revive -config revive.toml -formatter friendly ./...
+revive -config revive.toml -formatter friendly -set_exit_status ./...
+if ! run_gosec_parallel; then
+ echo "Final gosec check found security issues"
+ exit 1
+fi
echo "Running checkmake..."
checkmake --config=.checkmake Makefile
-echo "Running yamllint..."
-yamllint .
+echo "Running yamlfmt..."
+yamlfmt -conf .yamlfmt.yml -gitignore_excludes -dstar ./**/*.{yaml,yml}
+echo "Running eclint fix..."
+eclint -fix ./*.go ./*.md benchmark/ cli/ cmd/ config/ fileproc/ metrics/ shared/ templates/ testutil/ scripts/ Makefile
diff --git a/scripts/lint.sh b/scripts/lint.sh
index a041c40..a41798c 100755
--- a/scripts/lint.sh
+++ b/scripts/lint.sh
@@ -1,23 +1,65 @@
-#!/bin/sh
-set -eu
+#!/usr/bin/env bash
-echo "Running golangci-lint..."
-golangci-lint run ./...
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
+
+cd "$PROJECT_ROOT" || {
+ echo "Failed to change directory to $PROJECT_ROOT"
+ exit 1
+}
+
+# shellcheck source=scripts/install-tools.sh
+source "$SCRIPT_DIR/install-tools.sh"
+# shellcheck source=scripts/security.sh
+source "$SCRIPT_DIR/security.sh"
+
+check_dependencies
+
+echo "Linting..."
+
+# Track overall exit status
+exit_code=0
echo "Running revive..."
-revive -config revive.toml -formatter friendly ./...
+if ! revive -config revive.toml -formatter friendly -set_exit_status ./...; then
+ exit_code=1
+fi
+
+echo "Running gosec in parallel..."
+
+if ! run_gosec_parallel; then
+ exit_code=1
+fi
echo "Running checkmake..."
-checkmake --config=.checkmake Makefile
-
-echo "Running editorconfig-checker..."
-editorconfig-checker
-
-echo "Running shellcheck..."
-shellcheck scripts/*.sh
+if ! checkmake --config=.checkmake Makefile; then
+ exit_code=1
+fi
echo "Running shfmt check..."
-shfmt -d -i 0 -ci .
+if ! shfmt -d .; then
+ exit_code=1
+fi
echo "Running yamllint..."
-yamllint .
+if command -v yamllint >/dev/null 2>&1; then
+ # Python yamllint supports the .yamllint config; lint the whole repo
+ if ! yamllint -c .yamllint .; then
+ exit_code=1
+ fi
+elif command -v yaml-lint >/dev/null 2>&1; then
+ # Go yaml-lint has different flags and no .yamllint support; use its defaults
+ if ! yaml-lint .; then
+ exit_code=1
+ fi
+else
+ echo "YAML linter not found (yamllint or yaml-lint); skipping."
+fi
+
+echo "Running editorconfig-checker..."
+if ! eclint ./*.go ./*.md benchmark/ cli/ cmd/ config/ fileproc/ metrics/ shared/ templates/ testutil/ scripts/ Makefile; then
+ exit_code=1
+fi
+
+# Exit with failure status if any linter failed
+exit $exit_code
diff --git a/scripts/security-scan.sh b/scripts/security-scan.sh
index 7a8b596..6459aa7 100755
--- a/scripts/security-scan.sh
+++ b/scripts/security-scan.sh
@@ -1,483 +1,258 @@
-#!/bin/sh
-set -eu
+#!/bin/bash
+set -euo pipefail
# Security Scanning Script for gibidify
# This script runs comprehensive security checks locally and in CI
-SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
-cd "$PROJECT_ROOT"
+cd "$PROJECT_ROOT" || {
+ echo "Failed to change directory to $PROJECT_ROOT"
+ exit 1
+}
+
+# shellcheck source=scripts/install-tools.sh
+source "$SCRIPT_DIR/install-tools.sh"
echo "🔒 Starting comprehensive security scan for gibidify..."
-# Colors for output
-RED='\033[0;31m'
-GREEN='\033[0;32m'
-YELLOW='\033[1;33m'
-BLUE='\033[0;34m'
-NC='\033[0m' # No Color
-
-# Function to print status
-print_status() {
- printf "${BLUE}[INFO]${NC} %s\n" "$1"
-}
-
-print_warning() {
- printf "${YELLOW}[WARN]${NC} %s\n" "$1"
-}
-
-print_error() {
- printf "${RED}[ERROR]${NC} %s\n" "$1"
-}
-
-print_success() {
- printf "${GREEN}[SUCCESS]${NC} %s\n" "$1"
-}
-
-# Run command with timeout if available, otherwise run directly
-# Usage: run_with_timeout DURATION COMMAND [ARGS...]
-run_with_timeout() {
- duration="$1"
- shift
-
- if command -v timeout >/dev/null 2>&1; then
- timeout "$duration" "$@"
- else
- # timeout not available, run command directly
- "$@"
- fi
-}
-
-# Check if required tools are installed
-check_dependencies() {
- print_status "Checking security scanning dependencies..."
-
- missing_tools=""
-
- if ! command -v go >/dev/null 2>&1; then
- missing_tools="${missing_tools}go "
- print_error "Go is not installed. Please install Go first."
- print_error "Visit https://golang.org/doc/install for installation instructions."
- exit 1
- fi
-
- if ! command -v golangci-lint >/dev/null 2>&1; then
- print_warning "golangci-lint not found, installing..."
- go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest
- fi
-
- if ! command -v gosec >/dev/null 2>&1; then
- print_warning "gosec not found, installing..."
- go install github.com/securego/gosec/v2/cmd/gosec@latest
- fi
-
- if ! command -v govulncheck >/dev/null 2>&1; then
- print_warning "govulncheck not found, installing..."
- go install golang.org/x/vuln/cmd/govulncheck@latest
- fi
-
- if ! command -v checkmake >/dev/null 2>&1; then
- print_warning "checkmake not found, installing..."
- go install github.com/checkmake/checkmake/cmd/checkmake@latest
- fi
-
- if ! command -v shfmt >/dev/null 2>&1; then
- print_warning "shfmt not found, installing..."
- go install mvdan.cc/sh/v3/cmd/shfmt@latest
- fi
-
- if ! command -v yamllint >/dev/null 2>&1; then
- print_warning "yamllint not found, attempting to install..."
-
- # Update PATH to include common user install directories
- export PATH="$HOME/.local/bin:$HOME/.cargo/bin:$PATH"
-
- installed=0
-
- # Try pipx first
- if command -v pipx >/dev/null 2>&1; then
- print_status "Attempting install with pipx..."
- if pipx install yamllint; then
- # Update PATH to include pipx bin directory
- pipx_bin_dir=$(pipx environment --value PIPX_BIN_DIR 2>/dev/null || echo "$HOME/.local/bin")
- export PATH="$pipx_bin_dir:$PATH"
- installed=1
- else
- print_warning "pipx install yamllint failed, trying next method..."
- fi
- fi
-
- # Try pip3 --user if pipx didn't work
- if [ "$installed" -eq 0 ] && command -v pip3 >/dev/null 2>&1; then
- print_status "Attempting install with pip3 --user..."
- if pip3 install --user yamllint; then
- installed=1
- else
- print_warning "pip3 install yamllint failed, trying next method..."
- fi
- fi
-
- # Try apt-get with smart sudo handling
- if [ "$installed" -eq 0 ] && command -v apt-get >/dev/null 2>&1; then
- sudo_cmd=""
- can_use_apt=false
-
- # Check if running as root
- if [ "$(id -u)" -eq 0 ]; then
- print_status "Running as root, no sudo needed for apt-get..."
- sudo_cmd=""
- can_use_apt=true
- elif command -v sudo >/dev/null 2>&1; then
- # Try non-interactive sudo first
- if sudo -n true 2>/dev/null; then
- print_status "Attempting install with apt-get (sudo cached)..."
- sudo_cmd="sudo"
- can_use_apt=true
- elif [ -t 0 ]; then
- # TTY available, allow interactive sudo
- print_status "Attempting install with apt-get (may prompt for sudo)..."
- sudo_cmd="sudo"
- can_use_apt=true
- else
- print_warning "apt-get available but sudo not accessible (non-interactive, no cache). Skipping apt-get."
- can_use_apt=false
- fi
- else
- print_warning "apt-get available but sudo not found. Skipping apt-get."
- can_use_apt=false
- fi
-
- # Attempt apt-get only if we have permission to use it
- if [ "$can_use_apt" = true ]; then
- if [ -n "$sudo_cmd" ]; then
- if run_with_timeout 300 ${sudo_cmd:+"$sudo_cmd"} apt-get update; then
- if run_with_timeout 300 ${sudo_cmd:+"$sudo_cmd"} apt-get install -y yamllint; then
- installed=1
- else
- print_warning "apt-get install yamllint failed or timed out"
- fi
- else
- print_warning "apt-get update failed or timed out"
- fi
- else
- # Running as root without sudo
- if run_with_timeout 300 apt-get update; then
- if run_with_timeout 300 apt-get install -y yamllint; then
- installed=1
- else
- print_warning "apt-get install yamllint failed or timed out"
- fi
- else
- print_warning "apt-get update failed or timed out"
- fi
- fi
- fi
- fi
-
- # Final check with updated PATH
- if ! command -v yamllint >/dev/null 2>&1; then
- print_error "yamllint installation failed or yamllint still not found in PATH."
- print_error "Please install yamllint manually using one of:"
- print_error " - pipx install yamllint"
- print_error " - pip3 install --user yamllint"
- print_error " - sudo apt-get install yamllint (Debian/Ubuntu)"
- print_error " - brew install yamllint (macOS)"
- exit 1
- fi
-
- print_status "yamllint successfully installed and found in PATH"
- fi
-
- if [ -n "$missing_tools" ]; then
- print_error "Missing required tools: $missing_tools"
- print_error "Please install the missing tools and try again."
- exit 1
- fi
-
- print_success "All dependencies are available"
-}
+check_dependencies
# Run gosec security scanner
run_gosec() {
- print_status "Running gosec security scanner..."
+ print_status "Running gosec security scanner..."
- if gosec -fmt=json -out=gosec-report.json -stdout -verbose=text ./...; then
- print_success "gosec scan completed successfully"
- else
- print_error "gosec found security issues!"
- if [ -f "gosec-report.json" ]; then
- echo "Detailed report saved to gosec-report.json"
- fi
- return 1
- fi
+ if gosec -fmt=json -out=gosec-report.json -stdout -verbose=text ./...; then
+ print_success "gosec scan completed successfully"
+ else
+ print_error "gosec found security issues!"
+ if [[ -f "gosec-report.json" ]]; then
+ echo "Detailed report saved to gosec-report.json"
+ fi
+ return 1
+ fi
}
# Run vulnerability check
run_govulncheck() {
- print_status "Running govulncheck for dependency vulnerabilities..."
+ print_status "Running govulncheck for dependency vulnerabilities..."
- # govulncheck with -json always exits 0, so we need to check the output
- # Redirect stderr to separate file to avoid corrupting JSON output
- govulncheck -json ./... >govulncheck-report.json 2>govulncheck-errors.log
-
- # Check if there were errors during execution
- if [ -s govulncheck-errors.log ]; then
- print_warning "govulncheck produced errors (see govulncheck-errors.log)"
- fi
-
- # Use jq to detect finding entries in the JSON output
- # govulncheck emits a stream of Message objects, need to slurp and filter for Finding field
- if command -v jq >/dev/null 2>&1; then
- # First validate JSON is parseable
- if ! jq -s '.' govulncheck-report.json >/dev/null 2>&1; then
- print_error "govulncheck report contains malformed JSON"
- echo "Unable to parse govulncheck-report.json"
- return 1
- fi
-
- # JSON is valid, now check for findings
- if jq -s -e 'map(select(.Finding)) | length > 0' govulncheck-report.json >/dev/null 2>&1; then
- print_error "Vulnerabilities found in dependencies!"
- echo "Detailed report saved to govulncheck-report.json"
- return 1
- else
- print_success "No known vulnerabilities found in dependencies"
- fi
- else
- # Fallback to grep if jq is not available (case-insensitive to match "Finding")
- if grep -qi '"finding":' govulncheck-report.json 2>/dev/null; then
- print_error "Vulnerabilities found in dependencies!"
- echo "Detailed report saved to govulncheck-report.json"
- return 1
- else
- print_success "No known vulnerabilities found in dependencies"
- fi
- fi
+ if govulncheck -json ./... >govulncheck-report.json 2>&1; then
+ print_success "No known vulnerabilities found in dependencies"
+ else
+ if grep -q '"finding"' govulncheck-report.json 2>/dev/null; then
+ print_error "Vulnerabilities found in dependencies!"
+ echo "Detailed report saved to govulncheck-report.json"
+ return 1
+ else
+ print_success "No vulnerabilities found"
+ fi
+ fi
}
-# Run enhanced golangci-lint with security focus
+# Run revive with comprehensive linting
run_security_lint() {
- print_status "Running security-focused linting..."
+ print_status "Running comprehensive code quality linting with revive..."
- security_linters="gosec,gocritic,bodyclose,rowserrcheck,misspell,unconvert,unparam,unused,errcheck,ineffassign,staticcheck"
-
- if golangci-lint run --enable="$security_linters" --timeout=5m; then
- print_success "Security linting passed"
- else
- print_error "Security linting found issues!"
- return 1
- fi
+ if revive -config revive.toml -set_exit_status ./...; then
+ print_success "Revive linting passed"
+ else
+ print_error "Revive linting found issues!"
+ return 1
+ fi
}
# Check for potential secrets
check_secrets() {
- print_status "Scanning for potential secrets and sensitive data..."
+ print_status "Scanning for potential secrets and sensitive data..."
- # POSIX-compatible secrets_found flag using a temp file
- secrets_found_file="$(mktemp)" || {
- print_error "Failed to create temporary file with mktemp"
- exit 1
- }
- if [ -z "$secrets_found_file" ]; then
- print_error "mktemp returned empty path"
- exit 1
- fi
- # Clean up temp file on exit and signals (POSIX-portable)
- trap 'rm -f "$secrets_found_file"' 0 HUP INT TERM
+ local secrets_found=false
- # Common secret patterns (POSIX [[:space:]] and here-doc quoting)
- cat <<'PATTERNS' | while IFS= read -r pattern; do
-password[[:space:]]*[:=][[:space:]]*['"][^'"]{3,}['"]
-secret[[:space:]]*[:=][[:space:]]*['"][^'"]{3,}['"]
-key[[:space:]]*[:=][[:space:]]*['"][^'"]{8,}['"]
-token[[:space:]]*[:=][[:space:]]*['"][^'"]{8,}['"]
-api_?key[[:space:]]*[:=][[:space:]]*['"][^'"]{8,}['"]
-aws_?access_?key
-aws_?secret
-AKIA[0-9A-Z]{16}
-github_?token
-private_?key
-PATTERNS
- if [ -n "$pattern" ]; then
- if find . -type f -name "*.go" -exec grep -i -E -H -n -e "$pattern" {} + 2>/dev/null | grep -q .; then
- print_warning "Potential secret pattern found: $pattern"
- touch "$secrets_found_file"
- fi
- fi
- done
+ # Common secret patterns
+ local patterns=(
+ "password\s*[:=]\s*['\"][^'\"]{3,}['\"]"
+ "secret\s*[:=]\s*['\"][^'\"]{3,}['\"]"
+ "key\s*[:=]\s*['\"][^'\"]{8,}['\"]"
+ "token\s*[:=]\s*['\"][^'\"]{8,}['\"]"
+ "api_?key\s*[:=]\s*['\"][^'\"]{8,}['\"]"
+ "aws_?access_?key"
+ "aws_?secret"
+ "AKIA[0-9A-Z]{16}" # AWS Access Key pattern
+ "github_?token"
+ "private_?key"
+ )
- if [ -f "$secrets_found_file" ]; then
- secrets_found=true
- else
- secrets_found=false
- fi
+ for pattern in "${patterns[@]}"; do
+ if grep -r -i -E "$pattern" --include="*.go" . 2>/dev/null; then
+ print_warning "Potential secret pattern found: $pattern"
+ secrets_found=true
+ fi
+ done
- # Check git history for secrets (last 10 commits)
- if git log --oneline -10 2>/dev/null | grep -i -E "(password|secret|key|token)" >/dev/null 2>&1; then
- print_warning "Potential secrets mentioned in recent commit messages"
- secrets_found=true
- fi
+ # Check git history for secrets (last 10 commits)
+ if git log --oneline -10 | grep -i -E "(password|secret|key|token)" >/dev/null 2>&1; then
+ print_warning "Potential secrets mentioned in recent commit messages"
+ secrets_found=true
+ fi
- if [ "$secrets_found" = true ]; then
- print_warning "Potential secrets detected. Please review manually."
- return 1
- else
- print_success "No obvious secrets detected"
- fi
+ if [[ "$secrets_found" = true ]]; then
+ print_warning "Potential secrets detected. Please review manually."
+ return 1
+ else
+ print_success "No obvious secrets detected"
+ fi
}
# Check for hardcoded network addresses
check_hardcoded_addresses() {
- print_status "Checking for hardcoded network addresses..."
+ print_status "Checking for hardcoded network addresses..."
- addresses_found=false
+ local addresses_found=false
- # Look for IP addresses (excluding common safe ones)
- if grep -r -E "([0-9]{1,3}\.){3}[0-9]{1,3}" --include="*.go" . 2>/dev/null |
- grep -v -E "(127\.0\.0\.1|0\.0\.0\.0|255\.255\.255\.255|localhost)" >/dev/null 2>&1; then
- print_warning "Hardcoded IP addresses found:"
- grep -r -E "([0-9]{1,3}\.){3}[0-9]{1,3}" --include="*.go" . 2>/dev/null |
- grep -v -E "(127\.0\.0\.1|0\.0\.0\.0|255\.255\.255\.255|localhost)" || true
- addresses_found=true
- fi
+ # Look for IP addresses (excluding common safe ones)
+ if grep -r -E "([0-9]{1,3}\.){3}[0-9]{1,3}" --include="*.go" . |
+ grep -v -E "(127\.0\.0\.1|0\.0\.0\.0|255\.255\.255\.255|localhost)" >/dev/null 2>&1; then
+ print_warning "Hardcoded IP addresses found:"
+ grep -r -E "([0-9]{1,3}\.){3}[0-9]{1,3}" --include="*.go" . |
+ grep -v -E "(127\.0\.0\.1|0\.0\.0\.0|255\.255\.255\.255|localhost)" || true
+ addresses_found=true
+ fi
- # Look for URLs (excluding documentation examples and comments)
- if grep -r -E "https?://[^/\s]+" --include="*.go" . 2>/dev/null |
- grep -v -E "(example\.com|localhost|127\.0\.0\.1|\$\{|//.*https?://)" >/dev/null 2>&1; then
- print_warning "Hardcoded URLs found:"
- grep -r -E "https?://[^/\s]+" --include="*.go" . 2>/dev/null |
- grep -v -E "(example\.com|localhost|127\.0\.0\.1|\$\{|//.*https?://)" || true
- addresses_found=true
- fi
+ # Look for URLs (excluding documentation examples)
+ if grep -r -E "https?://[^/\s]+" --include="*.go" . |
+ grep -v -E "(example\.com|no-color\.org|localhost|127\.0\.0\.1|\$\{)" >/dev/null 2>&1; then
+ print_warning "Hardcoded URLs found:"
+ grep -r -E "https?://[^/\s]+" --include="*.go" . |
+ grep -v -E "(example\.com|localhost|127\.0\.0\.1|\$\{)" || true
+ addresses_found=true
+ fi
- if [ "$addresses_found" = true ]; then
- print_warning "Hardcoded network addresses detected. Please review."
- return 1
- else
- print_success "No hardcoded network addresses found"
- fi
+ if [[ "$addresses_found" = true ]]; then
+ print_warning "Hardcoded network addresses detected. Please review."
+ return 1
+ else
+ print_success "No hardcoded network addresses found"
+ fi
}
# Check Docker security (if Dockerfile exists)
check_docker_security() {
- if [ -f "Dockerfile" ]; then
- print_status "Checking Docker security..."
+ if [[ -f "Dockerfile" ]]; then
+ print_status "Checking Docker security..."
- # Basic Dockerfile security checks
- docker_issues=false
+ # Basic Dockerfile security checks
+ local docker_issues=false
- if grep -q "^USER root" Dockerfile; then
- print_warning "Dockerfile runs as root user"
- docker_issues=true
- fi
+ if grep -q "^USER root" Dockerfile; then
+ print_warning "Dockerfile runs as root user"
+ docker_issues=true
+ fi
- if ! grep -q "^USER " Dockerfile; then
- print_warning "Dockerfile doesn't specify a non-root user"
- docker_issues=true
- fi
+ if ! grep -q "^USER " Dockerfile; then
+ print_warning "Dockerfile doesn't specify a non-root user"
+ docker_issues=true
+ fi
- if grep -Eq 'RUN.*(wget|curl)' Dockerfile && ! grep -Eq 'rm.*(wget|curl)' Dockerfile; then
- print_warning "Dockerfile may leave curl/wget installed"
- docker_issues=true
- fi
+ if grep -q "RUN.*wget\|RUN.*curl" Dockerfile && ! grep -q "rm.*wget\|rm.*curl" Dockerfile; then
+ print_warning "Dockerfile may leave curl/wget installed"
+ docker_issues=true
+ fi
- if [ "$docker_issues" = true ]; then
- print_warning "Docker security issues detected"
- return 1
- else
- print_success "Docker security check passed"
- fi
- else
- print_status "No Dockerfile found, skipping Docker security check"
- fi
+ if [[ "$docker_issues" = true ]]; then
+ print_warning "Docker security issues detected"
+ return 1
+ else
+ print_success "Docker security check passed"
+ fi
+ else
+ print_status "No Dockerfile found, skipping Docker security check"
+ fi
}
# Check file permissions
check_file_permissions() {
- print_status "Checking file permissions..."
+ print_status "Checking file permissions..."
- perm_issues=false
+ local perm_issues=false
- # Check for overly permissive files (using octal for cross-platform compatibility)
- # -perm -002 finds files writable by others (works on both BSD and GNU find)
- if find . -type f -perm -002 -not -path "./.git/*" 2>/dev/null | grep -q .; then
- print_warning "World-writable files found:"
- find . -type f -perm -002 -not -path "./.git/*" 2>/dev/null || true
- perm_issues=true
- fi
+ # Check for overly permissive files
+ if find . -type f -perm +002 -not -path "./.git/*" | grep -q .; then
+ print_warning "World-writable files found:"
+ find . -type f -perm +002 -not -path "./.git/*" || true
+ perm_issues=true
+ fi
- # Check for executable files that shouldn't be
- # -perm -111 finds files executable by anyone (works on both BSD and GNU find)
- if find . -type f -name "*.go" -perm -111 -not -path "./.git/*" 2>/dev/null | grep -q .; then
- print_warning "Executable Go files found (should not be executable):"
- find . -type f -name "*.go" -perm -111 -not -path "./.git/*" 2>/dev/null || true
- perm_issues=true
- fi
+ # Check for executable files that shouldn't be
+ if find . -type f -name "*.go" -perm +111 | grep -q .; then
+ print_warning "Executable Go files found (should not be executable):"
+ find . -type f -name "*.go" -perm +111 || true
+ perm_issues=true
+ fi
- if [ "$perm_issues" = true ]; then
- print_warning "File permission issues detected"
- return 1
- else
- print_success "File permissions check passed"
- fi
+ if [[ "$perm_issues" = true ]]; then
+ print_warning "File permission issues detected"
+ return 1
+ else
+ print_success "File permissions check passed"
+ fi
}
# Check Makefile with checkmake
check_makefile() {
- if [ -f "Makefile" ]; then
- print_status "Checking Makefile with checkmake..."
+ if [[ -f "Makefile" ]]; then
+ print_status "Checking Makefile with checkmake..."
- if checkmake --config=.checkmake Makefile; then
- print_success "Makefile check passed"
- else
- print_error "Makefile issues detected!"
- return 1
- fi
- else
- print_status "No Makefile found, skipping checkmake"
- fi
+ if checkmake --config=.checkmake Makefile; then
+ print_success "Makefile check passed"
+ else
+ print_error "Makefile issues detected!"
+ return 1
+ fi
+ else
+ print_status "No Makefile found, skipping checkmake"
+ fi
}
# Check shell scripts with shfmt
check_shell_scripts() {
- print_status "Checking shell script formatting..."
+ print_status "Checking shell script formatting..."
- if find . -name "*.sh" -type f 2>/dev/null | head -1 | grep -q .; then
- if shfmt -d .; then
- print_success "Shell script formatting check passed"
- else
- print_error "Shell script formatting issues detected!"
- return 1
- fi
- else
- print_status "No shell scripts found, skipping shfmt check"
- fi
+ if find . -name "*.sh" -type f | head -1 | grep -q .; then
+ if shfmt -d .; then
+ print_success "Shell script formatting check passed"
+ else
+ print_error "Shell script formatting issues detected!"
+ return 1
+ fi
+ else
+ print_status "No shell scripts found, skipping shfmt check"
+ fi
}
# Check YAML files
check_yaml_files() {
- print_status "Checking YAML files..."
+ print_status "Checking YAML files..."
- if find . \( -name "*.yml" -o -name "*.yaml" \) -type f 2>/dev/null | head -1 | grep -q .; then
- if yamllint .; then
- print_success "YAML files check passed"
- else
- print_error "YAML file issues detected!"
- return 1
- fi
- else
- print_status "No YAML files found, skipping yamllint check"
- fi
+ if find . -name "*.yml" -o -name "*.yaml" -type f | head -1 | grep -q .; then
+ if yamllint -c .yamllint .; then
+ print_success "YAML files check passed"
+ else
+ print_error "YAML file issues detected!"
+ return 1
+ fi
+ else
+ print_status "No YAML files found, skipping yamllint check"
+ fi
}
# Generate security report
generate_report() {
- print_status "Generating security scan report..."
+ print_status "Generating security scan report..."
- report_file="security-report.md"
+ local report_file="security-report.md"
- cat >"$report_file" <"$report_file" <"gosec_${dir//\//_}.log" 2>&1 &
+ else
+ # For subdirectories, exclude vendor and .git
+ gosec -fmt=text -quiet -exclude-dir=vendor -exclude-dir=.git "$dir" >"gosec_${dir//\//_}.log" 2>&1 &
+ fi
+ pids+=($!)
+ done
+
+ # Wait for all gosec processes to complete and check their exit codes
+ for i in "${!pids[@]}"; do
+ local pid="${pids[$i]}"
+ local dir="${go_dirs[$i]}"
+ if ! wait "$pid"; then
+ echo "gosec failed for directory: $dir"
+ cat "gosec_${dir//\//_}.log"
+ # Keep log for inspection/artifacts on failure
+ exit_code=1
+ else
+ # Clean up log file if successful
+ rm -f "gosec_${dir//\//_}.log"
+ fi
+ done
+
+ return $exit_code
+}
+
+# If this file is sourced, export the functions
+if [[ "${BASH_SOURCE[0]}" != "${0}" ]]; then
+ export -f run_gosec_parallel
+fi
diff --git a/scripts/update-deps.sh b/scripts/update-deps.sh
new file mode 100755
index 0000000..d52cc00
--- /dev/null
+++ b/scripts/update-deps.sh
@@ -0,0 +1,115 @@
+#!/usr/bin/env bash
+set -euo pipefail
+
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
+
+cd "$PROJECT_ROOT" || {
+ echo "Failed to change directory to $PROJECT_ROOT"
+ exit 1
+}
+
+# shellcheck source=scripts/install-tools.sh
+source "$SCRIPT_DIR/install-tools.sh"
+
+# Track overall exit status
+exit_code=0
+
+print_status "=== Updating Go Dependencies ==="
+
+# Function to handle rollback if needed
+rollback() {
+ if [[ -f go.mod.backup && -f go.sum.backup ]]; then
+ print_warning "Rolling back changes due to errors..."
+ mv go.mod.backup go.mod
+ mv go.sum.backup go.sum
+ print_success "Rollback completed"
+ fi
+ return 0
+}
+
+# Function to cleanup backup files
+cleanup() {
+ if [[ -f go.mod.backup ]]; then
+ rm go.mod.backup
+ fi
+ if [[ -f go.sum.backup ]]; then
+ rm go.sum.backup
+ fi
+ return 0
+}
+
+# Trap to ensure cleanup on exit
+trap cleanup EXIT
+
+print_status "Creating backup of go.mod and go.sum..."
+cp go.mod go.mod.backup
+cp go.sum go.sum.backup
+
+print_status "Checking current module status..."
+if ! go mod verify; then
+ print_error "Current module verification failed"
+ exit_code=1
+ exit $exit_code
+fi
+
+print_status "Updating dependencies with 'go get -u'..."
+if ! go get -u ./...; then
+ print_error "Failed to update dependencies"
+ rollback
+ exit_code=1
+ exit $exit_code
+fi
+
+print_status "Running 'go mod tidy'..."
+if ! go mod tidy; then
+ print_error "Failed to tidy module dependencies"
+ rollback
+ exit_code=1
+ exit $exit_code
+fi
+
+print_status "Verifying updated dependencies..."
+if ! go mod verify; then
+ print_error "Module verification failed after updates"
+ rollback
+ exit_code=1
+ exit $exit_code
+fi
+
+print_status "Running vulnerability check..."
+if command -v govulncheck >/dev/null 2>&1; then
+ if ! govulncheck ./...; then
+ print_warning "Vulnerability check failed - review output above"
+ print_warning "Consider updating specific vulnerable packages or pinning versions"
+ # Don't fail the script for vulnerabilities, just warn
+ fi
+else
+ print_warning "govulncheck not found - install with: go install golang.org/x/vuln/cmd/govulncheck@latest"
+fi
+
+print_status "Running basic build test..."
+if ! go build ./...; then
+ print_error "Build failed after dependency updates"
+ rollback
+ exit_code=1
+ exit $exit_code
+fi
+
+print_status "Running quick test to ensure functionality..."
+if ! go test -short ./...; then
+ print_error "Tests failed after dependency updates"
+ rollback
+ exit_code=1
+ exit $exit_code
+fi
+
+if [[ $exit_code -eq 0 ]]; then
+ print_success "Dependencies updated successfully!"
+ print_success "Review the changes with 'git diff' before committing"
+ cleanup
+else
+ print_error "Dependency update failed"
+fi
+
+exit $exit_code
diff --git a/shared/constants.go b/shared/constants.go
new file mode 100644
index 0000000..1e5ab00
--- /dev/null
+++ b/shared/constants.go
@@ -0,0 +1,803 @@
+// Package shared provides common constants used across the gibidify application.
+package shared
+
+// Byte Conversion Constants
+const (
+ // BytesPerKB is the number of bytes in a kilobyte (1024).
+ BytesPerKB = 1024
+ // BytesPerMB is the number of bytes in a megabyte (1024 * 1024).
+ BytesPerMB = 1024 * BytesPerKB
+ // BytesPerGB is the number of bytes in a gigabyte (1024 * 1024 * 1024).
+ BytesPerGB = 1024 * BytesPerMB
+)
+
+// Configuration Default Values - Numeric Constants
+const (
+ // ConfigFileSizeLimitDefault is the default maximum file size (5MB).
+ ConfigFileSizeLimitDefault = 5 * BytesPerMB
+ // ConfigFileSizeLimitMin is the minimum allowed file size limit (1KB).
+ ConfigFileSizeLimitMin = BytesPerKB
+ // ConfigFileSizeLimitMax is the maximum allowed file size limit (100MB).
+ ConfigFileSizeLimitMax = 100 * BytesPerMB
+
+ // ConfigMaxFilesDefault is the default maximum number of files to process.
+ ConfigMaxFilesDefault = 10000
+ // ConfigMaxFilesMin is the minimum allowed file count limit.
+ ConfigMaxFilesMin = 1
+ // ConfigMaxFilesMax is the maximum allowed file count limit.
+ ConfigMaxFilesMax = 1000000
+
+ // ConfigMaxTotalSizeDefault is the default maximum total size of files (1GB).
+ ConfigMaxTotalSizeDefault = BytesPerGB
+ // ConfigMaxTotalSizeMin is the minimum allowed total size limit (1MB).
+ ConfigMaxTotalSizeMin = BytesPerMB
+ // ConfigMaxTotalSizeMax is the maximum allowed total size limit (100GB).
+ ConfigMaxTotalSizeMax = 100 * BytesPerGB
+
+ // ConfigFileProcessingTimeoutSecDefault is the default timeout for individual file processing (30 seconds).
+ ConfigFileProcessingTimeoutSecDefault = 30
+ // ConfigFileProcessingTimeoutSecMin is the minimum allowed file processing timeout (1 second).
+ ConfigFileProcessingTimeoutSecMin = 1
+ // ConfigFileProcessingTimeoutSecMax is the maximum allowed file processing timeout (300 seconds).
+ ConfigFileProcessingTimeoutSecMax = 300
+
+ // ConfigOverallTimeoutSecDefault is the default timeout for overall processing (3600 seconds = 1 hour).
+ ConfigOverallTimeoutSecDefault = 3600
+ // ConfigOverallTimeoutSecMin is the minimum allowed overall timeout (10 seconds).
+ ConfigOverallTimeoutSecMin = 10
+ // ConfigOverallTimeoutSecMax is the maximum allowed overall timeout (86400 seconds = 24 hours).
+ ConfigOverallTimeoutSecMax = 86400
+
+ // ConfigMaxConcurrentReadsDefault is the default maximum concurrent file reading operations.
+ ConfigMaxConcurrentReadsDefault = 10
+ // ConfigMaxConcurrentReadsMin is the minimum allowed concurrent reads.
+ ConfigMaxConcurrentReadsMin = 1
+ // ConfigMaxConcurrentReadsMax is the maximum allowed concurrent reads.
+ ConfigMaxConcurrentReadsMax = 100
+
+ // ConfigRateLimitFilesPerSecDefault is the default rate limit for file processing (0 = disabled).
+ ConfigRateLimitFilesPerSecDefault = 0
+ // ConfigRateLimitFilesPerSecMin is the minimum rate limit.
+ ConfigRateLimitFilesPerSecMin = 0
+ // ConfigRateLimitFilesPerSecMax is the maximum rate limit.
+ ConfigRateLimitFilesPerSecMax = 10000
+
+ // ConfigHardMemoryLimitMBDefault is the default hard memory limit (512MB).
+ ConfigHardMemoryLimitMBDefault = 512
+ // ConfigHardMemoryLimitMBMin is the minimum hard memory limit (64MB).
+ ConfigHardMemoryLimitMBMin = 64
+ // ConfigHardMemoryLimitMBMax is the maximum hard memory limit (8192MB = 8GB).
+ ConfigHardMemoryLimitMBMax = 8192
+
+ // ConfigMaxPendingFilesDefault is the default maximum files in file channel buffer.
+ ConfigMaxPendingFilesDefault = 1000
+ // ConfigMaxPendingWritesDefault is the default maximum writes in write channel buffer.
+ ConfigMaxPendingWritesDefault = 100
+ // ConfigMaxMemoryUsageDefault is the default maximum memory usage (100MB).
+ ConfigMaxMemoryUsageDefault = 100 * BytesPerMB
+ // ConfigMemoryCheckIntervalDefault is the default memory check interval (every 1000 files).
+ ConfigMemoryCheckIntervalDefault = 1000
+
+ // ConfigMaxConcurrencyDefault is the default maximum concurrency (high enough for typical systems).
+ ConfigMaxConcurrencyDefault = 32
+
+ // FileTypeRegistryMaxCacheSize is the default maximum cache size for file type registry.
+ FileTypeRegistryMaxCacheSize = 500
+
+ // ConfigMarkdownHeaderLevelDefault is the default header level for file sections.
+ ConfigMarkdownHeaderLevelDefault = 0
+ // ConfigMarkdownMaxLineLengthDefault is the default maximum line length (0 = unlimited).
+ ConfigMarkdownMaxLineLengthDefault = 0
+)
+
+// Configuration Default Values - Boolean Constants
+const (
+ // ConfigFileTypesEnabledDefault is the default state for file type detection.
+ ConfigFileTypesEnabledDefault = true
+
+ // ConfigBackpressureEnabledDefault is the default state for backpressure.
+ ConfigBackpressureEnabledDefault = true
+
+ // ConfigResourceLimitsEnabledDefault is the default state for resource limits.
+ ConfigResourceLimitsEnabledDefault = true
+ // ConfigEnableGracefulDegradationDefault is the default state for graceful degradation.
+ ConfigEnableGracefulDegradationDefault = true
+ // ConfigEnableResourceMonitoringDefault is the default state for resource monitoring.
+ ConfigEnableResourceMonitoringDefault = true
+
+ // ConfigMetadataIncludeStatsDefault is the default for including stats in metadata.
+ ConfigMetadataIncludeStatsDefault = false
+ // ConfigMetadataIncludeTimestampDefault is the default for including timestamp.
+ ConfigMetadataIncludeTimestampDefault = false
+ // ConfigMetadataIncludeFileCountDefault is the default for including file count.
+ ConfigMetadataIncludeFileCountDefault = false
+ // ConfigMetadataIncludeSourcePathDefault is the default for including source path.
+ ConfigMetadataIncludeSourcePathDefault = false
+ // ConfigMetadataIncludeFileTypesDefault is the default for including file types.
+ ConfigMetadataIncludeFileTypesDefault = false
+ // ConfigMetadataIncludeProcessingTimeDefault is the default for including processing time.
+ ConfigMetadataIncludeProcessingTimeDefault = false
+ // ConfigMetadataIncludeTotalSizeDefault is the default for including total size.
+ ConfigMetadataIncludeTotalSizeDefault = false
+ // ConfigMetadataIncludeMetricsDefault is the default for including metrics.
+ ConfigMetadataIncludeMetricsDefault = false
+
+ // ConfigMarkdownUseCodeBlocksDefault is the default for using code blocks.
+ ConfigMarkdownUseCodeBlocksDefault = false
+ // ConfigMarkdownIncludeLanguageDefault is the default for including language in code blocks.
+ ConfigMarkdownIncludeLanguageDefault = false
+ // ConfigMarkdownTableOfContentsDefault is the default for table of contents.
+ ConfigMarkdownTableOfContentsDefault = false
+ // ConfigMarkdownUseCollapsibleDefault is the default for collapsible sections.
+ ConfigMarkdownUseCollapsibleDefault = false
+ // ConfigMarkdownSyntaxHighlightingDefault is the default for syntax highlighting.
+ ConfigMarkdownSyntaxHighlightingDefault = false
+ // ConfigMarkdownLineNumbersDefault is the default for line numbers.
+ ConfigMarkdownLineNumbersDefault = false
+ // ConfigMarkdownFoldLongFilesDefault is the default for folding long files.
+ ConfigMarkdownFoldLongFilesDefault = false
+)
+
+// Configuration Default Values - String Constants
+const (
+ // ConfigOutputTemplateDefault is the default output template (empty = use built-in).
+ ConfigOutputTemplateDefault = ""
+ // ConfigMarkdownCustomCSSDefault is the default custom CSS.
+ ConfigMarkdownCustomCSSDefault = ""
+ // ConfigCustomHeaderDefault is the default custom header template.
+ ConfigCustomHeaderDefault = ""
+ // ConfigCustomFooterDefault is the default custom footer template.
+ ConfigCustomFooterDefault = ""
+ // ConfigCustomFileHeaderDefault is the default custom file header template.
+ ConfigCustomFileHeaderDefault = ""
+ // ConfigCustomFileFooterDefault is the default custom file footer template.
+ ConfigCustomFileFooterDefault = ""
+)
+
+// Configuration Keys - Viper Path Constants
+const (
+ // ConfigKeyFileSizeLimit is the config key for file size limit.
+ ConfigKeyFileSizeLimit = "fileSizeLimit"
+ // ConfigKeyMaxConcurrency is the config key for max concurrency.
+ ConfigKeyMaxConcurrency = "maxConcurrency"
+ // ConfigKeySupportedFormats is the config key for supported formats.
+ ConfigKeySupportedFormats = "supportedFormats"
+ // ConfigKeyFilePatterns is the config key for file patterns.
+ ConfigKeyFilePatterns = "filePatterns"
+ // ConfigKeyIgnoreDirectories is the config key for ignored directories.
+ ConfigKeyIgnoreDirectories = "ignoreDirectories"
+
+ // ConfigKeyFileTypesEnabled is the config key for fileTypes.enabled.
+ ConfigKeyFileTypesEnabled = "fileTypes.enabled"
+ // ConfigKeyFileTypesCustomImageExtensions is the config key for fileTypes.customImageExtensions.
+ ConfigKeyFileTypesCustomImageExtensions = "fileTypes.customImageExtensions"
+ // ConfigKeyFileTypesCustomBinaryExtensions is the config key for fileTypes.customBinaryExtensions.
+ ConfigKeyFileTypesCustomBinaryExtensions = "fileTypes.customBinaryExtensions"
+ // ConfigKeyFileTypesCustomLanguages is the config key for fileTypes.customLanguages.
+ ConfigKeyFileTypesCustomLanguages = "fileTypes.customLanguages"
+ // ConfigKeyFileTypesDisabledImageExtensions is the config key for fileTypes.disabledImageExtensions.
+ ConfigKeyFileTypesDisabledImageExtensions = "fileTypes.disabledImageExtensions"
+ // ConfigKeyFileTypesDisabledBinaryExtensions is the config key for fileTypes.disabledBinaryExtensions.
+ ConfigKeyFileTypesDisabledBinaryExtensions = "fileTypes.disabledBinaryExtensions"
+ // ConfigKeyFileTypesDisabledLanguageExts is the config key for fileTypes.disabledLanguageExtensions.
+ ConfigKeyFileTypesDisabledLanguageExts = "fileTypes.disabledLanguageExtensions"
+
+ // ConfigKeyBackpressureEnabled is the config key for backpressure.enabled.
+ ConfigKeyBackpressureEnabled = "backpressure.enabled"
+ // ConfigKeyBackpressureMaxPendingFiles is the config key for backpressure.maxPendingFiles.
+ ConfigKeyBackpressureMaxPendingFiles = "backpressure.maxPendingFiles"
+ // ConfigKeyBackpressureMaxPendingWrites is the config key for backpressure.maxPendingWrites.
+ ConfigKeyBackpressureMaxPendingWrites = "backpressure.maxPendingWrites"
+ // ConfigKeyBackpressureMaxMemoryUsage is the config key for backpressure.maxMemoryUsage.
+ ConfigKeyBackpressureMaxMemoryUsage = "backpressure.maxMemoryUsage"
+ // ConfigKeyBackpressureMemoryCheckInt is the config key for backpressure.memoryCheckInterval.
+ ConfigKeyBackpressureMemoryCheckInt = "backpressure.memoryCheckInterval"
+
+ // ConfigKeyResourceLimitsEnabled is the config key for resourceLimits.enabled.
+ ConfigKeyResourceLimitsEnabled = "resourceLimits.enabled"
+ // ConfigKeyResourceLimitsMaxFiles is the config key for resourceLimits.maxFiles.
+ ConfigKeyResourceLimitsMaxFiles = "resourceLimits.maxFiles"
+ // ConfigKeyResourceLimitsMaxTotalSize is the config key for resourceLimits.maxTotalSize.
+ ConfigKeyResourceLimitsMaxTotalSize = "resourceLimits.maxTotalSize"
+ // ConfigKeyResourceLimitsFileProcessingTO is the config key for resourceLimits.fileProcessingTimeoutSec.
+ ConfigKeyResourceLimitsFileProcessingTO = "resourceLimits.fileProcessingTimeoutSec"
+ // ConfigKeyResourceLimitsOverallTO is the config key for resourceLimits.overallTimeoutSec.
+ ConfigKeyResourceLimitsOverallTO = "resourceLimits.overallTimeoutSec"
+ // ConfigKeyResourceLimitsMaxConcurrentReads is the config key for resourceLimits.maxConcurrentReads.
+ ConfigKeyResourceLimitsMaxConcurrentReads = "resourceLimits.maxConcurrentReads"
+ // ConfigKeyResourceLimitsRateLimitFilesPerSec is the config key for resourceLimits.rateLimitFilesPerSec.
+ ConfigKeyResourceLimitsRateLimitFilesPerSec = "resourceLimits.rateLimitFilesPerSec"
+ // ConfigKeyResourceLimitsHardMemoryLimitMB is the config key for resourceLimits.hardMemoryLimitMB.
+ ConfigKeyResourceLimitsHardMemoryLimitMB = "resourceLimits.hardMemoryLimitMB"
+ // ConfigKeyResourceLimitsEnableGracefulDeg is the config key for resourceLimits.enableGracefulDegradation.
+ ConfigKeyResourceLimitsEnableGracefulDeg = "resourceLimits.enableGracefulDegradation"
+ // ConfigKeyResourceLimitsEnableMonitoring is the config key for resourceLimits.enableResourceMonitoring.
+ ConfigKeyResourceLimitsEnableMonitoring = "resourceLimits.enableResourceMonitoring"
+
+ // ConfigKeyOutputTemplate is the config key for output.template.
+ ConfigKeyOutputTemplate = "output.template"
+ // ConfigKeyOutputMarkdownHeaderLevel is the config key for output.markdown.headerLevel.
+ ConfigKeyOutputMarkdownHeaderLevel = "output.markdown.headerLevel"
+ // ConfigKeyOutputMarkdownMaxLineLen is the config key for output.markdown.maxLineLength.
+ ConfigKeyOutputMarkdownMaxLineLen = "output.markdown.maxLineLength"
+ // ConfigKeyOutputMarkdownCustomCSS is the config key for output.markdown.customCSS.
+ ConfigKeyOutputMarkdownCustomCSS = "output.markdown.customCSS"
+ // ConfigKeyOutputCustomHeader is the config key for output.custom.header.
+ ConfigKeyOutputCustomHeader = "output.custom.header"
+ // ConfigKeyOutputCustomFooter is the config key for output.custom.footer.
+ ConfigKeyOutputCustomFooter = "output.custom.footer"
+ // ConfigKeyOutputCustomFileHeader is the config key for output.custom.fileHeader.
+ ConfigKeyOutputCustomFileHeader = "output.custom.fileHeader"
+ // ConfigKeyOutputCustomFileFooter is the config key for output.custom.fileFooter.
+ ConfigKeyOutputCustomFileFooter = "output.custom.fileFooter"
+ // ConfigKeyOutputVariables is the config key for output.variables.
+ ConfigKeyOutputVariables = "output.variables"
+)
+
+// Configuration Collections - Slice and Map Variables
+var (
+ // ConfigIgnoredDirectoriesDefault is the default list of directories to ignore.
+ ConfigIgnoredDirectoriesDefault = []string{
+ "vendor", "node_modules", ".git", "dist", "build", "target",
+ "bower_components", "cache", "tmp",
+ }
+
+ // ConfigCustomImageExtensionsDefault is the default list of custom image extensions.
+ ConfigCustomImageExtensionsDefault = []string{}
+
+ // ConfigCustomBinaryExtensionsDefault is the default list of custom binary extensions.
+ ConfigCustomBinaryExtensionsDefault = []string{}
+
+ // ConfigDisabledImageExtensionsDefault is the default list of disabled image extensions.
+ ConfigDisabledImageExtensionsDefault = []string{}
+
+ // ConfigDisabledBinaryExtensionsDefault is the default list of disabled binary extensions.
+ ConfigDisabledBinaryExtensionsDefault = []string{}
+
+ // ConfigDisabledLanguageExtensionsDefault is the default list of disabled language extensions.
+ ConfigDisabledLanguageExtensionsDefault = []string{}
+
+ // ConfigCustomLanguagesDefault is the default custom language mappings.
+ ConfigCustomLanguagesDefault = map[string]string{}
+
+ // ConfigTemplateVariablesDefault is the default template variables.
+ ConfigTemplateVariablesDefault = map[string]string{}
+
+ // ConfigSupportedFormatsDefault is the default list of supported output formats.
+ ConfigSupportedFormatsDefault = []string{"json", "yaml", "markdown"}
+
+ // ConfigFilePatternsDefault is the default list of file patterns (empty = all files).
+ ConfigFilePatternsDefault = []string{}
+)
+
+// Test Paths and Files
+const (
+ // TestSourcePath is a common test source directory path.
+ TestSourcePath = "/test/source"
+ // TestOutputMarkdown is a common test output markdown file path.
+ TestOutputMarkdown = "/test/output.md"
+ // TestFile1 is a common test filename.
+ TestFile1 = "file1.txt"
+ // TestFile2 is a common test filename.
+ TestFile2 = "file2.txt"
+ // TestOutputMD is a common output markdown filename.
+ TestOutputMD = "output.md"
+ // TestMD is a common markdown test file.
+ TestMD = "test.md"
+ // TestFile1Name is test1.txt used in benchmark tests.
+ TestFile1Name = "test1.txt"
+ // TestFile2Name is test2.txt used in benchmark tests.
+ TestFile2Name = "test2.txt"
+ // TestFile3Name is test3.md used in benchmark tests.
+ TestFile3Name = "test3.md"
+ // TestFile1Go is a common Go test file path.
+ TestFile1Go = "/test/file.go"
+ // TestFile1GoAlt is an alternative Go test file path.
+ TestFile1GoAlt = "/test/file1.go"
+ // TestFile2JS is a common JavaScript test file path.
+ TestFile2JS = "/test/file2.js"
+ // TestErrorPy is a Python test file path for error scenarios.
+ TestErrorPy = "/test/error.py"
+ // TestNetworkData is a network data file path for testing.
+ TestNetworkData = "/tmp/network.data"
+)
+
+// Test CLI Flags
+const (
+ // TestCLIFlagSource is the -source flag.
+ TestCLIFlagSource = "-source"
+ // TestCLIFlagDestination is the -destination flag.
+ TestCLIFlagDestination = "-destination"
+ // TestCLIFlagFormat is the -format flag.
+ TestCLIFlagFormat = "-format"
+ // TestCLIFlagNoUI is the -no-ui flag.
+ TestCLIFlagNoUI = "-no-ui"
+ // TestCLIFlagConcurrency is the -concurrency flag.
+ TestCLIFlagConcurrency = "-concurrency"
+)
+
+// Test Content Strings
+const (
+ // TestContent is common test file content.
+ TestContent = "Hello World"
+ // TestConcurrencyList is a common concurrency list for benchmarks.
+ TestConcurrencyList = "1,2,4,8"
+ // TestFormatList is a common format list for tests.
+ TestFormatList = "json,yaml,markdown"
+ // TestSharedGoContent is content for shared.go test files.
+ TestSharedGoContent = "package main\n\nfunc Helper() {}"
+ // TestSafeConversion is used in safe conversion tests.
+ TestSafeConversion = "safe conversion"
+ // TestContentTest is generic test content string.
+ TestContentTest = "test content"
+ // TestContentEmpty is empty content test string.
+ TestContentEmpty = "empty content"
+ // TestContentHelloWorld is hello world test string.
+ TestContentHelloWorld = "hello world"
+ // TestContentDocumentation is documentation test string.
+ TestContentDocumentation = "# Documentation"
+ // TestContentPackageHandlers is package handlers test string.
+ TestContentPackageHandlers = "package handlers"
+)
+
+// Test Error Messages
+const (
+ // TestMsgExpectedError is used when an error was expected but none occurred.
+ TestMsgExpectedError = "Expected error but got none"
+ // TestMsgErrorShouldContain is used to check if error message contains expected text.
+ TestMsgErrorShouldContain = "Error should contain %q, got: %v"
+ // TestMsgUnexpectedError is used when an unexpected error occurred.
+ TestMsgUnexpectedError = "Unexpected error: %v"
+ // TestMsgFailedToClose is used for file close failures.
+ TestMsgFailedToClose = "Failed to close pipe writer: %v"
+ // TestMsgFailedToCreateFile is used for file creation failures.
+ TestMsgFailedToCreateFile = "Failed to create temp file: %v"
+ // TestMsgFailedToRemoveTempFile is used for temp file removal failures.
+ TestMsgFailedToRemoveTempFile = "Failed to remove temp file: %v"
+ // TestMsgFailedToReadOutput is used for output read failures.
+ TestMsgFailedToReadOutput = "Failed to read captured output: %v"
+ // TestMsgFailedToCreateTempDir is used for temp directory creation failures.
+ TestMsgFailedToCreateTempDir = "Failed to create temp dir: %v"
+ // TestMsgOutputMissingSubstring is used when output doesn't contain expected text.
+ TestMsgOutputMissingSubstring = "Output missing expected substring: %q\nFull output:\n%s"
+ // TestMsgOperationFailed is used when an operation fails.
+ TestMsgOperationFailed = "Operation %s failed: %v"
+ // TestMsgOperationNoError is used when an operation expected error but got none.
+ TestMsgOperationNoError = "Operation %s expected error but got none"
+ // TestMsgTimeoutWriterCompletion is used for writer timeout errors.
+ TestMsgTimeoutWriterCompletion = "timeout waiting for writer completion (doneCh)"
+ // TestMsgFailedToCreateTestDir is used for test directory creation failures.
+ TestMsgFailedToCreateTestDir = "Failed to create test directory: %v"
+ // TestMsgFailedToCreateTestFile is used for test file creation failures.
+ TestMsgFailedToCreateTestFile = "Failed to create test file: %v"
+ // TestMsgNewEngineFailed is used when template engine creation fails.
+ TestMsgNewEngineFailed = "NewEngine failed: %v"
+ // TestMsgRenderFileContentFailed is used when rendering file content fails.
+ TestMsgRenderFileContentFailed = "RenderFileContent failed: %v"
+ // TestMsgFailedToCreatePipe is used for pipe creation failures.
+ TestMsgFailedToCreatePipe = "Failed to create pipe: %v"
+ // TestMsgFailedToWriteContent is used for content write failures.
+ TestMsgFailedToWriteContent = "Failed to write content: %v"
+ // TestMsgFailedToCloseFile is used for file close failures.
+ TestMsgFailedToCloseFile = "Failed to close temp file: %v"
+ // TestFileStreamTest is a stream test filename.
+ TestFileStreamTest = "stream_test.txt"
+)
+
+// Test UI Strings
+const (
+ // TestSuggestionsPlain is the plain suggestions header without emoji.
+ TestSuggestionsPlain = "Suggestions:"
+ // TestSuggestionsWarning is the warning-style suggestions header.
+ TestSuggestionsWarning = "⚠ Suggestions:"
+ // TestSuggestionsIcon is the icon-style suggestions header.
+ TestSuggestionsIcon = "💡 Suggestions:"
+ // TestOutputErrorMarker is the error output marker.
+ TestOutputErrorMarker = "❌ Error:"
+ // TestOutputSuccessMarker is the success output marker.
+ TestOutputSuccessMarker = "✓ Success:"
+ // TestSuggestCheckPermissions suggests checking file permissions.
+ TestSuggestCheckPermissions = "Check file/directory permissions"
+ // TestSuggestCheckArguments suggests checking command line arguments.
+ TestSuggestCheckArguments = "Check your command line arguments"
+ // TestSuggestVerifyPath suggests verifying the path.
+ TestSuggestVerifyPath = "Verify the path is correct"
+ // TestSuggestCheckExists suggests checking if path exists.
+ TestSuggestCheckExists = "Check if the path exists:"
+ // TestSuggestCheckFileExists suggests checking if file/directory exists.
+ TestSuggestCheckFileExists = "Check if the file/directory exists:"
+ // TestSuggestUseAbsolutePath suggests using absolute paths.
+ TestSuggestUseAbsolutePath = "Use an absolute path instead of relative"
+)
+
+// Test Error Strings and Categories
+const (
+ // TestErrEmptyFilePath is error message for empty file paths.
+ TestErrEmptyFilePath = "empty file path"
+ // TestErrTestErrorMsg is a generic test error message string.
+ TestErrTestErrorMsg = "test error"
+ // TestErrSyntaxError is a syntax error message.
+ TestErrSyntaxError = "syntax error"
+ // TestErrDiskFull is a disk full error message.
+ TestErrDiskFull = "disk full"
+ // TestErrAccessDenied is an access denied error message.
+ TestErrAccessDenied = "access denied"
+ // TestErrProcessingFailed is a processing failed error message.
+ TestErrProcessingFailed = "processing failed"
+ // TestErrCannotAccessFile is an error message for file access errors.
+ TestErrCannotAccessFile = "cannot access file"
+)
+
+// Test Terminal and UI Strings
+const (
+ // TestTerminalXterm256 is a common terminal type for testing.
+ TestTerminalXterm256 = "xterm-256color"
+ // TestProgressMessage is a common progress message.
+ TestProgressMessage = "Processing files"
+)
+
+// Test Logger Messages
+const (
+ // TestLoggerDebugMsg is a debug level test message.
+ TestLoggerDebugMsg = "debug message"
+ // TestLoggerInfoMsg is an info level test message.
+ TestLoggerInfoMsg = "info message"
+ // TestLoggerWarnMsg is a warn level test message.
+ TestLoggerWarnMsg = "warn message"
+)
+
+// Test Assertion Case Names
+const (
+ // TestCaseSuccessCases is the name for success test cases.
+ TestCaseSuccessCases = "success cases"
+ // TestCaseEmptyOperationName is the name for empty operation test cases.
+ TestCaseEmptyOperationName = "empty operation name"
+ // TestCaseDifferentErrorTypes is the name for different error types test cases.
+ TestCaseDifferentErrorTypes = "different error types"
+ // TestCaseFunctionAvailability is the name for function availability test cases.
+ TestCaseFunctionAvailability = "function availability"
+ // TestCaseMessageTest is the name for message test cases.
+ TestCaseMessageTest = "message test"
+ // TestCaseTestOperation is the name for test operation cases.
+ TestCaseTestOperation = "test operation"
+)
+
+// Test File Extensions and Special Names
+const (
+ // TestExtensionSpecial is a special extension for testing.
+ TestExtensionSpecial = ".SPECIAL"
+ // TestExtensionValid is a valid extension for testing custom extensions.
+ TestExtensionValid = ".valid"
+ // TestExtensionCustom is a custom extension for testing.
+ TestExtensionCustom = ".custom"
+)
+
+// Test Paths
+const (
+ // TestPathBase is a base test path.
+ TestPathBase = "/test/path"
+ // TestPathTestFileGo is a test file.go path.
+ TestPathTestFileGo = "/test/file.go"
+ // TestPathTestFileTXT is a test file.txt path.
+ TestPathTestFileTXT = "/test/file.txt"
+ // TestPathTestErrorGo is a test error.go path.
+ TestPathTestErrorGo = "/test/error.go"
+ // TestPathTestFile1Go is a test file1.go path.
+ TestPathTestFile1Go = "/test/file1.go"
+ // TestPathTestFile2JS is a test file2.js path.
+ TestPathTestFile2JS = "/test/file2.js"
+ // TestPathTestErrorPy is a test error.py path.
+ TestPathTestErrorPy = "/test/error.py"
+ // TestPathTestEmptyTXT is a test empty.txt path.
+ TestPathTestEmptyTXT = "/test/empty.txt"
+ // TestPathTestProject is a test project path.
+ TestPathTestProject = "/test/project"
+ // TestPathTmpNetworkData is a temp network data path.
+ TestPathTmpNetworkData = "/tmp/network.data"
+ // TestPathEtcPasswdTraversal is a path traversal test path.
+ TestPathEtcPasswdTraversal = "../../../etc/passwd" // #nosec G101 -- test constant, not credentials
+)
+
+// Test File Names
+const (
+ // TestFileTXT is a common test file name.
+ TestFileTXT = "test.txt"
+ // TestFileGo is a common Go test file name.
+ TestFileGo = "test.go"
+ // TestFileSharedGo is a common shared Go file name.
+ TestFileSharedGo = "shared.go"
+ // TestFilePNG is a PNG test file name.
+ TestFilePNG = "test.png"
+ // TestFileJPG is a JPG test file name.
+ TestFileJPG = "test.jpg"
+ // TestFileEXE is an EXE test file name.
+ TestFileEXE = "test.exe"
+ // TestFileDLL is a DLL test file name.
+ TestFileDLL = "test.dll"
+ // TestFilePy is a Python test file name.
+ TestFilePy = "test.py"
+ // TestFileValid is a test file with .valid extension.
+ TestFileValid = "test.valid"
+ // TestFileWebP is a WebP test file name.
+ TestFileWebP = "test.webp"
+ // TestFileImageJPG is a JPG test file name.
+ TestFileImageJPG = "image.jpg"
+ // TestFileBinaryDLL is a DLL test file name.
+ TestFileBinaryDLL = "binary.dll"
+ // TestFileScriptPy is a Python script test file name.
+ TestFileScriptPy = "script.py"
+ // TestFileMainGo is a main.go test file name.
+ TestFileMainGo = "main.go"
+ // TestFileHelperGo is a helper.go test file name.
+ TestFileHelperGo = "helper.go"
+ // TestFileJSON is a JSON test file name.
+ TestFileJSON = "test.json"
+ // TestFileConfigJSON is a config.json test file name.
+ TestFileConfigJSON = "config.json"
+ // TestFileReadmeMD is a README.md test file name.
+ TestFileReadmeMD = "README.md"
+ // TestFileOutputTXT is an output.txt test file name.
+ TestFileOutputTXT = "output.txt"
+ // TestFileConfigYAML is a config.yaml test file name.
+ TestFileConfigYAML = "config.yaml"
+ // TestFileGoExt is a file.go test file name.
+ TestFileGoExt = "file.go"
+)
+
+// Test Validation and Operation Strings
+const (
+ // TestOpParsingFlags is used in error messages for flag parsing operations.
+ TestOpParsingFlags = "parsing flags"
+ // TestOpValidatingConcurrency is used for concurrency validation.
+ TestOpValidatingConcurrency = "validating concurrency"
+ // TestMsgInvalidConcurrencyLevel is error message for invalid concurrency.
+ TestMsgInvalidConcurrencyLevel = "invalid concurrency level"
+ // TestKeyName is a common test key name.
+ TestKeyName = "test.key"
+ // TestMsgExpectedExtensionWithoutDot is error message for extension validation.
+ TestMsgExpectedExtensionWithoutDot = "Expected extension without dot to not work"
+ // TestMsgSourcePath is the validation message for source path.
+ TestMsgSourcePath = "source path"
+ // TestMsgEmptyPath is used for empty path test cases.
+ TestMsgEmptyPath = "empty path"
+ // TestMsgPathTraversalAttempt is used for path traversal detection tests.
+ TestMsgPathTraversalAttempt = "path traversal attempt detected"
+ // TestCfgResourceLimitsEnabled is the config key for resource limits enabled.
+ TestCfgResourceLimitsEnabled = "resourceLimits.enabled"
+)
+
+// Test Structured Error Format Strings
+const (
+ // TestFmtExpectedFilePath is format string for file path assertions.
+ TestFmtExpectedFilePath = "Expected FilePath %q, got %q"
+ // TestFmtExpectedLine is format string for line number assertions.
+ TestFmtExpectedLine = "Expected Line %d, got %d"
+ // TestFmtExpectedType is format string for type assertions.
+ TestFmtExpectedType = "Expected Type %v, got %v"
+ // TestFmtExpectedCode is format string for code assertions.
+ TestFmtExpectedCode = "Expected Code %q, got %q"
+ // TestFmtExpectedMessage is format string for message assertions.
+ TestFmtExpectedMessage = "Expected Message %q, got %q"
+ // TestFmtExpectedCount is format string for count assertions.
+ TestFmtExpectedCount = "Expected %d %s, got %d"
+ // TestFmtExpectedGot is generic format string for assertions.
+ TestFmtExpectedGot = "%s returned: %v (type: %T)"
+ // TestFmtExpectedFilesProcessed is format string for files processed assertion.
+ TestFmtExpectedFilesProcessed = "Expected files processed > 0, got %d"
+ // TestFmtExpectedResults is format string for results count assertion.
+ TestFmtExpectedResults = "Expected %d results, got %d"
+ // TestFmtExpectedTotalFiles is format string for total files assertion.
+ TestFmtExpectedTotalFiles = "Expected TotalFiles=1, got %d"
+ // TestFmtExpectedContent is format string for content assertions.
+ TestFmtExpectedContent = "Expected content %q, got %q"
+ // TestFmtExpectedErrorTypeIO is format string for error type IO assertions.
+ TestFmtExpectedErrorTypeIO = "Expected ErrorTypeIO, got %v"
+ // TestFmtDirectoryShouldExist is format string for directory existence assertions.
+ TestFmtDirectoryShouldExist = "Directory %s should exist: %v"
+ // TestFmtPathShouldBeDirectory is format string for directory type assertions.
+ TestFmtPathShouldBeDirectory = "Path %s should be a directory"
+)
+
+// CLI Error Messages
+const (
+ // CLIMsgErrorFormat is the error message format.
+ CLIMsgErrorFormat = "Error: %s"
+ // CLIMsgSuggestions is the suggestions header.
+ CLIMsgSuggestions = "Suggestions:"
+ // CLIMsgCheckFilePermissions suggests checking file permissions.
+ CLIMsgCheckFilePermissions = " • Check file/directory permissions\n"
+ // CLIMsgCheckCommandLineArgs suggests checking command line arguments.
+ CLIMsgCheckCommandLineArgs = " • Check your command line arguments\n"
+ // CLIMsgRunWithHelp suggests running with help flag.
+ CLIMsgRunWithHelp = " • Run with --help for usage information\n"
+)
+
+// CLI Processing Messages
+const (
+ // CLIMsgFoundFilesToProcess is the message format when files are found to process.
+ CLIMsgFoundFilesToProcess = "Found %d files to process"
+ // CLIMsgFileProcessingWorker is the worker identifier for file processing.
+ CLIMsgFileProcessingWorker = "file processing worker"
+)
+
+// CLI UI Constants
+const (
+ // UIProgressBarChar is the character used for progress bar display.
+ UIProgressBarChar = "█"
+)
+
+// Error Format Strings
+const (
+ // ErrorFmtWithCause is the format string for errors with cause information.
+ ErrorFmtWithCause = "%s: %v"
+ // LogLevelWarningAlias is an alias for the warning log level used in validation.
+ LogLevelWarningAlias = "warning"
+)
+
+// File Processing Constants
+const (
+ // FileProcessingStreamChunkSize is the size of chunks when streaming large files (64KB).
+ FileProcessingStreamChunkSize = 64 * BytesPerKB
+ // FileProcessingStreamThreshold is the file size above which we use streaming (1MB).
+ FileProcessingStreamThreshold = BytesPerMB
+ // FileProcessingMaxMemoryBuffer is the maximum memory to use for buffering content (10MB).
+ FileProcessingMaxMemoryBuffer = 10 * BytesPerMB
+)
+
+// File Processing Error Messages
+const (
+ // FileProcessingMsgFailedToProcess is the error message format for processing failures.
+ FileProcessingMsgFailedToProcess = "Failed to process file: %s"
+ // FileProcessingMsgSizeExceeds is the error message when file size exceeds limit.
+ FileProcessingMsgSizeExceeds = "file size (%d bytes) exceeds limit (%d bytes)"
+)
+
+// Metrics Constants
+const (
+ // MetricsPhaseCollection represents the collection phase.
+ MetricsPhaseCollection = "collection"
+ // MetricsPhaseProcessing represents the processing phase.
+ MetricsPhaseProcessing = "processing"
+ // MetricsPhaseWriting represents the writing phase.
+ MetricsPhaseWriting = "writing"
+ // MetricsPhaseFinalize represents the finalize phase.
+ MetricsPhaseFinalize = "finalize"
+ // MetricsMaxInt64 is the maximum int64 value for initial smallest file tracking.
+ MetricsMaxInt64 = int64(^uint64(0) >> 1)
+ // MetricsPerformanceIndexCap is the maximum performance index value for reasonable indexing.
+ MetricsPerformanceIndexCap = 1000
+)
+
+// Metrics Format Strings
+const (
+ // MetricsFmtProcessingTime is the format string for processing time display.
+ MetricsFmtProcessingTime = "Processing Time: %v\n"
+ // MetricsFmtFileCount is the format string for file count display.
+ MetricsFmtFileCount = " %s: %d files\n"
+ // MetricsFmtBytesShort is the format string for bytes without suffix.
+ MetricsFmtBytesShort = "%dB"
+ // MetricsFmtBytesHuman is the format string for human-readable bytes.
+ MetricsFmtBytesHuman = "%.1f%cB"
+)
+
+// ============================================================================
+// YAML WRITER FORMATS
+// ============================================================================
+
+const (
+ // YAMLFmtFileEntry is the format string for YAML file entries.
+ YAMLFmtFileEntry = " - path: %s\n language: %s\n content: |\n"
+)
+
+// ============================================================================
+// YAML/STRING LITERAL VALUES
+// ============================================================================
+
+const (
+ // LiteralTrue is the string literal "true" used in YAML/env comparisons.
+ LiteralTrue = "true"
+ // LiteralFalse is the string literal "false" used in YAML/env comparisons.
+ LiteralFalse = "false"
+ // LiteralNull is the string literal "null" used in YAML comparisons.
+ LiteralNull = "null"
+ // LiteralPackageMain is the string literal "package main" used in test files.
+ LiteralPackageMain = "package main"
+)
+
+// ============================================================================
+// TEMPLATE CONSTANTS
+// ============================================================================
+
+const (
+ // TemplateFmtTimestamp is the Go time format for timestamps in templates.
+ TemplateFmtTimestamp = "2006-01-02 15:04:05"
+)
+
+// ============================================================================
+// BENCHMARK CONSTANTS
+// ============================================================================
+
+const (
+ // BenchmarkDefaultFileCount is the default number of files to create for benchmarks.
+ BenchmarkDefaultFileCount = 100
+ // BenchmarkDefaultIterations is the default number of iterations for benchmarks.
+ BenchmarkDefaultIterations = 1000
+)
+
+// ============================================================================
+// BENCHMARK MESSAGES
+// ============================================================================
+
+const (
+ // BenchmarkMsgFailedToCreateFiles is the error message when benchmark file creation fails.
+ BenchmarkMsgFailedToCreateFiles = "failed to create benchmark files"
+ // BenchmarkMsgCollectionFailed is the error message when collection benchmark fails.
+ BenchmarkMsgCollectionFailed = "benchmark file collection failed"
+ // BenchmarkMsgRunningCollection is the status message when running collection benchmark.
+ BenchmarkMsgRunningCollection = "Running file collection benchmark..."
+ // BenchmarkMsgFileCollectionFailed is the error message when file collection benchmark fails.
+ BenchmarkMsgFileCollectionFailed = "file collection benchmark failed"
+ // BenchmarkMsgConcurrencyFailed is the error message when concurrency benchmark fails.
+ BenchmarkMsgConcurrencyFailed = "concurrency benchmark failed"
+ // BenchmarkMsgFormatFailed is the error message when format benchmark fails.
+ BenchmarkMsgFormatFailed = "format benchmark failed"
+ // BenchmarkFmtSectionHeader is the format string for benchmark section headers.
+ BenchmarkFmtSectionHeader = "=== %s ===\n"
+)
+
+// Test File Permissions
+const (
+ // TestFilePermission is the default file permission for test files.
+ TestFilePermission = 0o644
+ // TestDirPermission is the default directory permission for test directories.
+ TestDirPermission = 0o755
+)
+
+// Log Level Constants
+const (
+ // LogLevelDebug logs all messages including debug information.
+ LogLevelDebug LogLevel = "debug"
+ // LogLevelInfo logs info, warning, and error messages.
+ LogLevelInfo LogLevel = "info"
+ // LogLevelWarn logs warning and error messages only.
+ LogLevelWarn LogLevel = "warn"
+ // LogLevelError logs error messages only.
+ LogLevelError LogLevel = "error"
+)
+
+// ============================================================================
+// FORMAT CONSTANTS
+// ============================================================================
+
+const (
+ // FormatJSON is the JSON format identifier.
+ FormatJSON = "json"
+ // FormatYAML is the YAML format identifier.
+ FormatYAML = "yaml"
+ // FormatMarkdown is the Markdown format identifier.
+ FormatMarkdown = "markdown"
+)
+
+// ============================================================================
+// CLI ARGUMENT NAMES
+// ============================================================================
+
+const (
+ // CLIArgSource is the source argument name.
+ CLIArgSource = "source"
+ // CLIArgFormat is the format argument name.
+ CLIArgFormat = "format"
+ // CLIArgConcurrency is the concurrency argument name.
+ CLIArgConcurrency = "concurrency"
+ // CLIArgAll is the all benchmarks argument value.
+ CLIArgAll = "all"
+)
+
+// ============================================================================
+// APPLICATION CONSTANTS
+// ============================================================================
+
+const (
+ // AppName is the application name.
+ AppName = "gibidify"
+)
diff --git a/shared/conversions.go b/shared/conversions.go
new file mode 100644
index 0000000..f419f24
--- /dev/null
+++ b/shared/conversions.go
@@ -0,0 +1,74 @@
+// Package shared provides common utility functions for gibidify.
+package shared
+
+import (
+ "math"
+)
+
+// SafeUint64ToInt64 safely converts uint64 to int64, checking for overflow.
+// Returns the converted value and true if conversion is safe, or 0 and false if overflow would occur.
+func SafeUint64ToInt64(value uint64) (int64, bool) {
+ if value > math.MaxInt64 {
+ return 0, false
+ }
+
+ return int64(value), true
+}
+
+// SafeIntToInt32 safely converts int to int32, checking for overflow.
+// Returns the converted value and true if conversion is safe, or 0 and false if overflow would occur.
+func SafeIntToInt32(value int) (int32, bool) {
+ if value > math.MaxInt32 || value < math.MinInt32 {
+ return 0, false
+ }
+
+ return int32(value), true
+}
+
+// SafeUint64ToInt64WithDefault safely converts uint64 to int64 with a default value on overflow.
+func SafeUint64ToInt64WithDefault(value uint64, defaultValue int64) int64 {
+ if converted, ok := SafeUint64ToInt64(value); ok {
+ return converted
+ }
+
+ return defaultValue
+}
+
+// SafeIntToInt32WithDefault safely converts int to int32 with a default value on overflow.
+func SafeIntToInt32WithDefault(value int, defaultValue int32) int32 {
+ if converted, ok := SafeIntToInt32(value); ok {
+ return converted
+ }
+
+ return defaultValue
+}
+
+// BytesToMB safely converts bytes (uint64) to megabytes (int64), handling overflow.
+func BytesToMB(bytes uint64) int64 {
+ mb := bytes / uint64(BytesPerMB)
+
+ return SafeUint64ToInt64WithDefault(mb, math.MaxInt64)
+}
+
+// BytesToMBFloat64 safely converts bytes (uint64) to megabytes (float64), handling overflow.
+func BytesToMBFloat64(bytes uint64) float64 {
+ const bytesPerMB = float64(BytesPerMB)
+ if bytes > math.MaxUint64/2 {
+ // Prevent overflow in arithmetic by dividing step by step
+ return float64(bytes/uint64(BytesPerKB)) / float64(BytesPerKB)
+ }
+
+ return float64(bytes) / bytesPerMB
+}
+
+// SafeMemoryDiffMB safely calculates the difference between two uint64 memory values
+// and converts to MB as float64, handling potential underflow.
+func SafeMemoryDiffMB(after, before uint64) float64 {
+ if after >= before {
+ diff := after - before
+
+ return BytesToMBFloat64(diff)
+ }
+ // Handle underflow case - return 0 instead of negative
+ return 0.0
+}
diff --git a/shared/conversions_test.go b/shared/conversions_test.go
new file mode 100644
index 0000000..87bd01e
--- /dev/null
+++ b/shared/conversions_test.go
@@ -0,0 +1,321 @@
+package shared
+
+import (
+ "math"
+ "testing"
+)
+
+func TestSafeUint64ToInt64(t *testing.T) {
+ tests := []struct {
+ name string
+ input uint64
+ expected int64
+ wantOk bool
+ }{
+ {
+ name: TestSafeConversion,
+ input: 1000,
+ expected: 1000,
+ wantOk: true,
+ },
+ {
+ name: "max safe value",
+ input: math.MaxInt64,
+ expected: math.MaxInt64,
+ wantOk: true,
+ },
+ {
+ name: "overflow by one",
+ input: math.MaxInt64 + 1,
+ expected: 0,
+ wantOk: false,
+ },
+ {
+ name: "max uint64 overflow",
+ input: math.MaxUint64,
+ expected: 0,
+ wantOk: false,
+ },
+ {
+ name: "zero value",
+ input: 0,
+ expected: 0,
+ wantOk: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(
+ tt.name, func(t *testing.T) {
+ got, ok := SafeUint64ToInt64(tt.input)
+ if ok != tt.wantOk {
+ t.Errorf("SafeUint64ToInt64() ok = %v, want %v", ok, tt.wantOk)
+ }
+ if got != tt.expected {
+ t.Errorf("SafeUint64ToInt64() = %v, want %v", got, tt.expected)
+ }
+ },
+ )
+ }
+}
+
+func TestSafeIntToInt32(t *testing.T) {
+ tests := []struct {
+ name string
+ input int
+ expected int32
+ wantOk bool
+ }{
+ {
+ name: TestSafeConversion,
+ input: 1000,
+ expected: 1000,
+ wantOk: true,
+ },
+ {
+ name: "max safe value",
+ input: math.MaxInt32,
+ expected: math.MaxInt32,
+ wantOk: true,
+ },
+ {
+ name: "min safe value",
+ input: math.MinInt32,
+ expected: math.MinInt32,
+ wantOk: true,
+ },
+ {
+ name: "overflow by one",
+ input: math.MaxInt32 + 1,
+ expected: 0,
+ wantOk: false,
+ },
+ {
+ name: "underflow by one",
+ input: math.MinInt32 - 1,
+ expected: 0,
+ wantOk: false,
+ },
+ {
+ name: "zero value",
+ input: 0,
+ expected: 0,
+ wantOk: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(
+ tt.name, func(t *testing.T) {
+ got, ok := SafeIntToInt32(tt.input)
+ if ok != tt.wantOk {
+ t.Errorf("SafeIntToInt32() ok = %v, want %v", ok, tt.wantOk)
+ }
+ if got != tt.expected {
+ t.Errorf("SafeIntToInt32() = %v, want %v", got, tt.expected)
+ }
+ },
+ )
+ }
+}
+
+func TestSafeUint64ToInt64WithDefault(t *testing.T) {
+ tests := []struct {
+ name string
+ input uint64
+ defaultValue int64
+ expected int64
+ }{
+ {
+ name: TestSafeConversion,
+ input: 1000,
+ defaultValue: -1,
+ expected: 1000,
+ },
+ {
+ name: "overflow uses default",
+ input: math.MaxUint64,
+ defaultValue: -1,
+ expected: -1,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(
+ tt.name, func(t *testing.T) {
+ got := SafeUint64ToInt64WithDefault(tt.input, tt.defaultValue)
+ if got != tt.expected {
+ t.Errorf("SafeUint64ToInt64WithDefault() = %v, want %v", got, tt.expected)
+ }
+ },
+ )
+ }
+}
+
+func TestSafeIntToInt32WithDefault(t *testing.T) {
+ tests := []struct {
+ name string
+ input int
+ defaultValue int32
+ expected int32
+ }{
+ {
+ name: TestSafeConversion,
+ input: 1000,
+ defaultValue: -1,
+ expected: 1000,
+ },
+ {
+ name: "overflow uses default",
+ input: math.MaxInt32 + 1,
+ defaultValue: -1,
+ expected: -1,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(
+ tt.name, func(t *testing.T) {
+ got := SafeIntToInt32WithDefault(tt.input, tt.defaultValue)
+ if got != tt.expected {
+ t.Errorf("SafeIntToInt32WithDefault() = %v, want %v", got, tt.expected)
+ }
+ },
+ )
+ }
+}
+
+func TestBytesToMB(t *testing.T) {
+ tests := []struct {
+ name string
+ input uint64
+ expected int64
+ }{
+ {
+ name: "zero bytes",
+ input: 0,
+ expected: 0,
+ },
+ {
+ name: "1MB",
+ input: 1024 * 1024,
+ expected: 1,
+ },
+ {
+ name: "1GB",
+ input: 1024 * 1024 * 1024,
+ expected: 1024,
+ },
+ {
+ name: "large value (no overflow)",
+ input: math.MaxUint64,
+ expected: 17592186044415, // MaxUint64 / 1024 / 1024, which is still within int64 range
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(
+ tt.name, func(t *testing.T) {
+ got := BytesToMB(tt.input)
+ if got != tt.expected {
+ t.Errorf("BytesToMB() = %v, want %v", got, tt.expected)
+ }
+ },
+ )
+ }
+}
+
+func TestBytesToMBFloat64(t *testing.T) {
+ tests := []struct {
+ name string
+ input uint64
+ expected float64
+ delta float64
+ }{
+ {
+ name: "zero bytes",
+ input: 0,
+ expected: 0,
+ delta: 0.0001,
+ },
+ {
+ name: "1MB",
+ input: 1024 * 1024,
+ expected: 1.0,
+ delta: 0.0001,
+ },
+ {
+ name: "1GB",
+ input: 1024 * 1024 * 1024,
+ expected: 1024.0,
+ delta: 0.0001,
+ },
+ {
+ name: "large value near overflow",
+ input: math.MaxUint64 - 1,
+ expected: float64((math.MaxUint64-1)/1024) / 1024.0,
+ delta: 1.0, // Allow larger delta for very large numbers
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(
+ tt.name, func(t *testing.T) {
+ got := BytesToMBFloat64(tt.input)
+ if math.Abs(got-tt.expected) > tt.delta {
+ t.Errorf("BytesToMBFloat64() = %v, want %v (±%v)", got, tt.expected, tt.delta)
+ }
+ },
+ )
+ }
+}
+
+func TestSafeMemoryDiffMB(t *testing.T) {
+ tests := []struct {
+ name string
+ after uint64
+ before uint64
+ expected float64
+ delta float64
+ }{
+ {
+ name: "normal increase",
+ after: 2 * 1024 * 1024, // 2MB
+ before: 1 * 1024 * 1024, // 1MB
+ expected: 1.0,
+ delta: 0.0001,
+ },
+ {
+ name: "no change",
+ after: 1 * 1024 * 1024,
+ before: 1 * 1024 * 1024,
+ expected: 0.0,
+ delta: 0.0001,
+ },
+ {
+ name: "underflow case",
+ after: 1 * 1024 * 1024, // 1MB
+ before: 2 * 1024 * 1024, // 2MB
+ expected: 0.0, // Should return 0 instead of negative
+ delta: 0.0001,
+ },
+ {
+ name: "large difference",
+ after: 2 * 1024 * 1024 * 1024, // 2GB
+ before: 1 * 1024 * 1024 * 1024, // 1GB
+ expected: 1024.0, // 1GB = 1024MB
+ delta: 0.0001,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(
+ tt.name, func(t *testing.T) {
+ got := SafeMemoryDiffMB(tt.after, tt.before)
+ if math.Abs(got-tt.expected) > tt.delta {
+ t.Errorf("SafeMemoryDiffMB() = %v, want %v (±%v)", got, tt.expected, tt.delta)
+ }
+ },
+ )
+ }
+}
diff --git a/gibidiutils/errors.go b/shared/errors.go
similarity index 80%
rename from gibidiutils/errors.go
rename to shared/errors.go
index c41ebcd..662e5c4 100644
--- a/gibidiutils/errors.go
+++ b/shared/errors.go
@@ -1,13 +1,9 @@
-// Package gibidiutils provides common utility functions for gibidify.
-package gibidiutils
+// Package shared provides common utility functions.
+package shared
import (
"errors"
"fmt"
- "sort"
- "strings"
-
- "github.com/sirupsen/logrus"
)
// ErrorType represents the category of error.
@@ -50,11 +46,6 @@ func (e ErrorType) String() string {
}
}
-// Error formatting templates.
-const (
- errorFormatWithCause = "%s: %v"
-)
-
// StructuredError represents a structured error with type, code, and context.
type StructuredError struct {
Type ErrorType
@@ -68,25 +59,11 @@ type StructuredError struct {
// Error implements the error interface.
func (e *StructuredError) Error() string {
- base := fmt.Sprintf("%s [%s]: %s", e.Type, e.Code, e.Message)
- if len(e.Context) > 0 {
- // Sort keys for deterministic output
- keys := make([]string, 0, len(e.Context))
- for k := range e.Context {
- keys = append(keys, k)
- }
- sort.Strings(keys)
-
- ctxPairs := make([]string, 0, len(e.Context))
- for _, k := range keys {
- ctxPairs = append(ctxPairs, fmt.Sprintf("%s=%v", k, e.Context[k]))
- }
- base = fmt.Sprintf("%s | context: %s", base, strings.Join(ctxPairs, ", "))
- }
if e.Cause != nil {
- return fmt.Sprintf(errorFormatWithCause, base, e.Cause)
+ return fmt.Sprintf("%s [%s]: %s: %v", e.Type, e.Code, e.Message, e.Cause)
}
- return base
+
+ return fmt.Sprintf("%s [%s]: %s", e.Type, e.Code, e.Message)
}
// Unwrap returns the underlying cause error.
@@ -100,27 +77,26 @@ func (e *StructuredError) WithContext(key string, value any) *StructuredError {
e.Context = make(map[string]any)
}
e.Context[key] = value
+
return e
}
// WithFilePath adds file path information to the error.
func (e *StructuredError) WithFilePath(filePath string) *StructuredError {
e.FilePath = filePath
+
return e
}
// WithLine adds line number information to the error.
func (e *StructuredError) WithLine(line int) *StructuredError {
e.Line = line
+
return e
}
// NewStructuredError creates a new structured error.
-func NewStructuredError(
- errorType ErrorType,
- code, message, filePath string,
- context map[string]any,
-) *StructuredError {
+func NewStructuredError(errorType ErrorType, code, message, filePath string, context map[string]any) *StructuredError {
return &StructuredError{
Type: errorType,
Code: code,
@@ -159,51 +135,44 @@ func WrapErrorf(err error, errorType ErrorType, code, format string, args ...any
}
}
-// Common error codes for each type
+// Common error codes for each type.
const (
- // CLI Error Codes
-
+ // CodeCLIMissingSource CLI Error Codes.
CodeCLIMissingSource = "MISSING_SOURCE"
CodeCLIInvalidArgs = "INVALID_ARGS"
- // FileSystem Error Codes
-
+ // CodeFSPathResolution FileSystem Error Codes.
CodeFSPathResolution = "PATH_RESOLUTION"
CodeFSPermission = "PERMISSION_DENIED"
CodeFSNotFound = "NOT_FOUND"
CodeFSAccess = "ACCESS_DENIED"
- // Processing Error Codes
-
+ // CodeProcessingFileRead Processing Error Codes.
CodeProcessingFileRead = "FILE_READ"
CodeProcessingCollection = "COLLECTION"
CodeProcessingTraversal = "TRAVERSAL"
CodeProcessingEncode = "ENCODE"
- // Configuration Error Codes
-
+ // CodeConfigValidation Configuration Error Codes.
CodeConfigValidation = "VALIDATION"
CodeConfigMissing = "MISSING"
- // IO Error Codes
-
+ // CodeIOFileCreate IO Error Codes.
CodeIOFileCreate = "FILE_CREATE"
CodeIOFileWrite = "FILE_WRITE"
CodeIOEncoding = "ENCODING"
CodeIOWrite = "WRITE"
- CodeIOFileRead = "FILE_READ"
+ CodeIORead = "READ"
CodeIOClose = "CLOSE"
- // Validation Error Codes
-
+ // Validation Error Codes.
CodeValidationFormat = "FORMAT"
CodeValidationFileType = "FILE_TYPE"
CodeValidationSize = "SIZE_LIMIT"
CodeValidationRequired = "REQUIRED"
CodeValidationPath = "PATH_TRAVERSAL"
- // Resource Limit Error Codes
-
+ // Resource Limit Error Codes.
CodeResourceLimitFiles = "FILE_COUNT_LIMIT"
CodeResourceLimitTotalSize = "TOTAL_SIZE_LIMIT"
CodeResourceLimitTimeout = "TIMEOUT"
@@ -219,8 +188,8 @@ func NewMissingSourceError() *StructuredError {
return NewStructuredError(
ErrorTypeCLI,
CodeCLIMissingSource,
- "usage: gibidify -source "+
- "[--destination ] [--format=json|yaml|markdown]",
+ "usage: gibidify -source [--destination ] "+
+ "[--format=json|yaml|markdown (default: json)]",
"",
nil,
)
@@ -257,19 +226,20 @@ func LogError(operation string, err error, args ...any) {
msg = fmt.Sprintf(operation, args...)
}
+ logger := GetLogger()
// Check if it's a structured error and log with additional context
- var structErr *StructuredError
+ structErr := &StructuredError{}
if errors.As(err, &structErr) {
- logrus.WithFields(logrus.Fields{
+ fields := map[string]any{
"error_type": structErr.Type.String(),
"error_code": structErr.Code,
"context": structErr.Context,
"file_path": structErr.FilePath,
"line": structErr.Line,
- }).Errorf(errorFormatWithCause, msg, err)
+ }
+ logger.WithFields(fields).Errorf(ErrorFmtWithCause, msg, err)
} else {
- // Log regular errors without structured fields
- logrus.Errorf(errorFormatWithCause, msg, err)
+ logger.Errorf(ErrorFmtWithCause, msg, err)
}
}
}
@@ -281,3 +251,9 @@ func LogErrorf(err error, format string, args ...any) {
LogError(format, err, args...)
}
}
+
+// Test error variables.
+var (
+ // ErrTestError is a generic test error.
+ ErrTestError = errors.New(TestErrTestErrorMsg)
+)
diff --git a/shared/errors_test.go b/shared/errors_test.go
new file mode 100644
index 0000000..85e5aa6
--- /dev/null
+++ b/shared/errors_test.go
@@ -0,0 +1,932 @@
+package shared
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+ "testing"
+)
+
+// captureLogOutput captures logger output for testing.
+func captureLogOutput(f func()) string {
+ var buf bytes.Buffer
+ logger := GetLogger()
+ logger.SetOutput(&buf)
+ defer logger.SetOutput(io.Discard) // Set to discard to avoid test output noise
+ f()
+
+ return buf.String()
+}
+
+func TestLogError(t *testing.T) {
+ tests := []struct {
+ name string
+ operation string
+ err error
+ args []any
+ wantLog string
+ wantEmpty bool
+ }{
+ {
+ name: "nil error should not log",
+ operation: "test operation",
+ err: nil,
+ args: nil,
+ wantEmpty: true,
+ },
+ {
+ name: "basic error logging",
+ operation: "failed to read file",
+ err: errors.New("permission denied"),
+ args: nil,
+ wantLog: "failed to read file: permission denied",
+ },
+ {
+ name: "error with formatting args",
+ operation: "failed to process file %s",
+ err: errors.New("file too large"),
+ args: []any{"test.txt"},
+ wantLog: "failed to process file test.txt: file too large",
+ },
+ {
+ name: "error with multiple formatting args",
+ operation: "failed to copy from %s to %s",
+ err: errors.New(TestErrDiskFull),
+ args: []any{"source.txt", "dest.txt"},
+ wantLog: "failed to copy from source.txt to dest.txt: disk full",
+ },
+ {
+ name: "wrapped error",
+ operation: "database operation failed",
+ err: fmt.Errorf("connection error: %w", errors.New("timeout")),
+ args: nil,
+ wantLog: "database operation failed: connection error: timeout",
+ },
+ {
+ name: "empty operation string",
+ operation: "",
+ err: errors.New("some error"),
+ args: nil,
+ wantLog: ": some error",
+ },
+ {
+ name: "operation with percentage sign",
+ operation: "processing 50% complete",
+ err: errors.New("interrupted"),
+ args: nil,
+ wantLog: "processing 50% complete: interrupted",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(
+ tt.name, func(t *testing.T) {
+ output := captureLogOutput(
+ func() {
+ LogError(tt.operation, tt.err, tt.args...)
+ },
+ )
+
+ if tt.wantEmpty {
+ if output != "" {
+ t.Errorf("LogError() logged output when error was nil: %q", output)
+ }
+
+ return
+ }
+
+ if !strings.Contains(output, tt.wantLog) {
+ t.Errorf("LogError() output = %q, want to contain %q", output, tt.wantLog)
+ }
+
+ // Verify it's logged at ERROR level
+ if !strings.Contains(output, "level=error") {
+ t.Errorf("LogError() should log at ERROR level, got: %q", output)
+ }
+ },
+ )
+ }
+}
+
+func TestLogErrorf(t *testing.T) {
+ tests := []struct {
+ name string
+ err error
+ format string
+ args []any
+ wantLog string
+ wantEmpty bool
+ }{
+ {
+ name: "nil error should not log",
+ err: nil,
+ format: "operation %s failed",
+ args: []any{"test"},
+ wantEmpty: true,
+ },
+ {
+ name: "basic formatted error",
+ err: errors.New("not found"),
+ format: "file %s not found",
+ args: []any{"config.yaml"},
+ wantLog: "file config.yaml not found: not found",
+ },
+ {
+ name: "multiple format arguments",
+ err: errors.New("invalid range"),
+ format: "value %d is not between %d and %d",
+ args: []any{150, 0, 100},
+ wantLog: "value 150 is not between 0 and 100: invalid range",
+ },
+ {
+ name: "no format arguments",
+ err: errors.New("generic error"),
+ format: "operation failed",
+ args: nil,
+ wantLog: "operation failed: generic error",
+ },
+ {
+ name: "format with different types",
+ err: errors.New("type mismatch"),
+ format: "expected %s but got %d",
+ args: []any{"string", 42},
+ wantLog: "expected string but got 42: type mismatch",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(
+ tt.name, func(t *testing.T) {
+ output := captureLogOutput(
+ func() {
+ LogErrorf(tt.err, tt.format, tt.args...)
+ },
+ )
+
+ if tt.wantEmpty {
+ if output != "" {
+ t.Errorf("LogErrorf() logged output when error was nil: %q", output)
+ }
+
+ return
+ }
+
+ if !strings.Contains(output, tt.wantLog) {
+ t.Errorf("LogErrorf() output = %q, want to contain %q", output, tt.wantLog)
+ }
+
+ // Verify it's logged at ERROR level
+ if !strings.Contains(output, "level=error") {
+ t.Errorf("LogErrorf() should log at ERROR level, got: %q", output)
+ }
+ },
+ )
+ }
+}
+
+func TestLogErrorConcurrency(_ *testing.T) {
+ // Test that LogError is safe for concurrent use
+ done := make(chan bool)
+ for i := range 10 {
+ go func(n int) {
+ LogError("concurrent operation", fmt.Errorf("error %d", n))
+ done <- true
+ }(i)
+ }
+
+ // Wait for all goroutines to complete
+ for range 10 {
+ <-done
+ }
+}
+
+func TestLogErrorfConcurrency(_ *testing.T) {
+ // Test that LogErrorf is safe for concurrent use
+ done := make(chan bool)
+ for i := range 10 {
+ go func(n int) {
+ LogErrorf(fmt.Errorf("error %d", n), "concurrent operation %d", n)
+ done <- true
+ }(i)
+ }
+
+ // Wait for all goroutines to complete
+ for range 10 {
+ <-done
+ }
+}
+
+// BenchmarkLogError benchmarks the LogError function.
+func BenchmarkLogError(b *testing.B) {
+ err := errors.New("benchmark error")
+ // Disable output during benchmark
+ logger := GetLogger()
+ logger.SetOutput(io.Discard)
+ defer logger.SetOutput(io.Discard)
+
+ for b.Loop() {
+ LogError("benchmark operation", err)
+ }
+}
+
+// BenchmarkLogErrorf benchmarks the LogErrorf function.
+func BenchmarkLogErrorf(b *testing.B) {
+ err := errors.New("benchmark error")
+ // Disable output during benchmark
+ logger := GetLogger()
+ logger.SetOutput(io.Discard)
+ defer logger.SetOutput(io.Discard)
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ LogErrorf(err, "benchmark operation %d", i)
+ }
+}
+
+// BenchmarkLogErrorNil benchmarks LogError with nil error (no-op case).
+func BenchmarkLogErrorNil(b *testing.B) {
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ LogError("benchmark operation", nil)
+ }
+}
+
+func TestErrorTypeString(t *testing.T) {
+ tests := []struct {
+ name string
+ errType ErrorType
+ expected string
+ }{
+ {
+ name: "CLI error type",
+ errType: ErrorTypeCLI,
+ expected: "CLI",
+ },
+ {
+ name: "FileSystem error type",
+ errType: ErrorTypeFileSystem,
+ expected: "FileSystem",
+ },
+ {
+ name: "Processing error type",
+ errType: ErrorTypeProcessing,
+ expected: "Processing",
+ },
+ {
+ name: "Configuration error type",
+ errType: ErrorTypeConfiguration,
+ expected: "Configuration",
+ },
+ {
+ name: "IO error type",
+ errType: ErrorTypeIO,
+ expected: "IO",
+ },
+ {
+ name: "Validation error type",
+ errType: ErrorTypeValidation,
+ expected: "Validation",
+ },
+ {
+ name: "Unknown error type",
+ errType: ErrorTypeUnknown,
+ expected: "Unknown",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(
+ tt.name, func(t *testing.T) {
+ result := tt.errType.String()
+ if result != tt.expected {
+ t.Errorf("ErrorType.String() = %q, want %q", result, tt.expected)
+ }
+ },
+ )
+ }
+}
+
+func TestStructuredErrorError(t *testing.T) {
+ tests := []struct {
+ name string
+ err *StructuredError
+ expected string
+ }{
+ {
+ name: "error without cause",
+ err: &StructuredError{
+ Type: ErrorTypeFileSystem,
+ Code: "ACCESS_DENIED",
+ Message: "permission denied",
+ },
+ expected: "FileSystem [ACCESS_DENIED]: permission denied",
+ },
+ {
+ name: "error with cause",
+ err: &StructuredError{
+ Type: ErrorTypeIO,
+ Code: "WRITE_FAILED",
+ Message: "unable to write file",
+ Cause: errors.New(TestErrDiskFull),
+ },
+ expected: "IO [WRITE_FAILED]: unable to write file: disk full",
+ },
+ {
+ name: "error with empty message",
+ err: &StructuredError{
+ Type: ErrorTypeValidation,
+ Code: "INVALID_FORMAT",
+ Message: "",
+ },
+ expected: "Validation [INVALID_FORMAT]: ",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(
+ tt.name, func(t *testing.T) {
+ result := tt.err.Error()
+ if result != tt.expected {
+ t.Errorf("StructuredError.Error() = %q, want %q", result, tt.expected)
+ }
+ },
+ )
+ }
+}
+
+func TestStructuredErrorUnwrap(t *testing.T) {
+ originalErr := errors.New("original error")
+
+ tests := []struct {
+ name string
+ err *StructuredError
+ expected error
+ }{
+ {
+ name: "error with cause",
+ err: &StructuredError{
+ Type: ErrorTypeIO,
+ Code: "READ_FAILED",
+ Cause: originalErr,
+ },
+ expected: originalErr,
+ },
+ {
+ name: "error without cause",
+ err: &StructuredError{
+ Type: ErrorTypeValidation,
+ Code: "INVALID_INPUT",
+ },
+ expected: nil,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(
+ tt.name, func(t *testing.T) {
+ result := tt.err.Unwrap()
+ if !errors.Is(result, tt.expected) {
+ t.Errorf("StructuredError.Unwrap() = %v, want %v", result, tt.expected)
+ }
+ },
+ )
+ }
+}
+
+func TestStructuredErrorWithContext(t *testing.T) {
+ err := &StructuredError{
+ Type: ErrorTypeProcessing,
+ Code: "PROCESSING_FAILED",
+ Message: "processing error",
+ }
+
+ // Test adding context to error without existing context
+ result := err.WithContext("key1", "value1")
+
+ // Should return the same error instance
+ if !errors.Is(result, err) {
+ t.Error("WithContext() should return the same error instance")
+ }
+
+ // Check that context was added
+ if len(err.Context) != 1 {
+ t.Errorf("Expected context length 1, got %d", len(err.Context))
+ }
+
+ if err.Context["key1"] != "value1" {
+ t.Errorf("Expected context key1=value1, got %v", err.Context["key1"])
+ }
+
+ // Test adding more context
+ err = err.WithContext("key2", 42)
+
+ if len(err.Context) != 2 {
+ t.Errorf("Expected context length 2, got %d", len(err.Context))
+ }
+
+ if err.Context["key2"] != 42 {
+ t.Errorf("Expected context key2=42, got %v", err.Context["key2"])
+ }
+}
+
+func TestStructuredErrorWithFilePath(t *testing.T) {
+ err := &StructuredError{
+ Type: ErrorTypeFileSystem,
+ Code: "FILE_NOT_FOUND",
+ Message: "file not found",
+ }
+
+ filePath := "/path/to/file.txt"
+ result := err.WithFilePath(filePath)
+
+ // Should return the same error instance
+ if !errors.Is(result, err) {
+ t.Error("WithFilePath() should return the same error instance")
+ }
+
+ // Check that file path was set
+ if err.FilePath != filePath {
+ t.Errorf(TestFmtExpectedFilePath, filePath, err.FilePath)
+ }
+
+ // Test overwriting existing file path
+ newPath := "/another/path.txt"
+ err = err.WithFilePath(newPath)
+
+ if err.FilePath != newPath {
+ t.Errorf(TestFmtExpectedFilePath, newPath, err.FilePath)
+ }
+}
+
+func TestStructuredErrorWithLine(t *testing.T) {
+ err := &StructuredError{
+ Type: ErrorTypeValidation,
+ Code: "SYNTAX_ERROR",
+ Message: "syntax error",
+ }
+
+ lineNum := 42
+ result := err.WithLine(lineNum)
+
+ // Should return the same error instance
+ if !errors.Is(result, err) {
+ t.Error("WithLine() should return the same error instance")
+ }
+
+ // Check that line number was set
+ if err.Line != lineNum {
+ t.Errorf(TestFmtExpectedLine, lineNum, err.Line)
+ }
+
+ // Test overwriting existing line number
+ newLine := 100
+ err = err.WithLine(newLine)
+
+ if err.Line != newLine {
+ t.Errorf(TestFmtExpectedLine, newLine, err.Line)
+ }
+}
+
+// validateStructuredErrorBasics validates basic structured error fields.
+func validateStructuredErrorBasics(
+ t *testing.T,
+ err *StructuredError,
+ errorType ErrorType,
+ code, message, filePath string,
+) {
+ t.Helper()
+
+ if err.Type != errorType {
+ t.Errorf(TestFmtExpectedType, errorType, err.Type)
+ }
+ if err.Code != code {
+ t.Errorf(TestFmtExpectedCode, code, err.Code)
+ }
+ if err.Message != message {
+ t.Errorf(TestFmtExpectedMessage, message, err.Message)
+ }
+ if err.FilePath != filePath {
+ t.Errorf(TestFmtExpectedFilePath, filePath, err.FilePath)
+ }
+}
+
+// validateStructuredErrorContext validates context fields.
+func validateStructuredErrorContext(t *testing.T, err *StructuredError, expectedContext map[string]any) {
+ t.Helper()
+
+ if expectedContext == nil {
+ if len(err.Context) != 0 {
+ t.Errorf("Expected empty context, got %v", err.Context)
+ }
+
+ return
+ }
+
+ if len(err.Context) != len(expectedContext) {
+ t.Errorf("Expected context length %d, got %d", len(expectedContext), len(err.Context))
+ }
+
+ for k, v := range expectedContext {
+ if err.Context[k] != v {
+ t.Errorf("Expected context[%q] = %v, got %v", k, v, err.Context[k])
+ }
+ }
+}
+
+func TestNewStructuredError(t *testing.T) {
+ tests := []struct {
+ name string
+ errorType ErrorType
+ code string
+ message string
+ filePath string
+ context map[string]any
+ }{
+ {
+ name: "basic structured error",
+ errorType: ErrorTypeFileSystem,
+ code: "ACCESS_DENIED",
+ message: TestErrAccessDenied,
+ filePath: "/test/file.txt",
+ context: nil,
+ },
+ {
+ name: "error with context",
+ errorType: ErrorTypeValidation,
+ code: "INVALID_FORMAT",
+ message: "invalid format",
+ filePath: "",
+ context: map[string]any{
+ "expected": "json",
+ "got": "xml",
+ },
+ },
+ {
+ name: "error with all fields",
+ errorType: ErrorTypeIO,
+ code: "WRITE_FAILED",
+ message: "write failed",
+ filePath: "/output/file.txt",
+ context: map[string]any{
+ "bytes_written": 1024,
+ "total_size": 2048,
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(
+ tt.name, func(t *testing.T) {
+ err := NewStructuredError(tt.errorType, tt.code, tt.message, tt.filePath, tt.context)
+ validateStructuredErrorBasics(t, err, tt.errorType, tt.code, tt.message, tt.filePath)
+ validateStructuredErrorContext(t, err, tt.context)
+ },
+ )
+ }
+}
+
+func TestNewStructuredErrorf(t *testing.T) {
+ tests := []struct {
+ name string
+ errorType ErrorType
+ code string
+ format string
+ args []any
+ expectedMsg string
+ }{
+ {
+ name: "formatted error without args",
+ errorType: ErrorTypeProcessing,
+ code: "PROCESSING_FAILED",
+ format: TestErrProcessingFailed,
+ args: nil,
+ expectedMsg: TestErrProcessingFailed,
+ },
+ {
+ name: "formatted error with args",
+ errorType: ErrorTypeValidation,
+ code: "INVALID_VALUE",
+ format: "invalid value %q, expected between %d and %d",
+ args: []any{"150", 0, 100},
+ expectedMsg: "invalid value \"150\", expected between 0 and 100",
+ },
+ {
+ name: "formatted error with multiple types",
+ errorType: ErrorTypeIO,
+ code: "READ_ERROR",
+ format: "failed to read %d bytes from %s",
+ args: []any{1024, "/tmp/file.txt"},
+ expectedMsg: "failed to read 1024 bytes from /tmp/file.txt",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(
+ tt.name, func(t *testing.T) {
+ err := NewStructuredErrorf(tt.errorType, tt.code, tt.format, tt.args...)
+
+ if err.Type != tt.errorType {
+ t.Errorf(TestFmtExpectedType, tt.errorType, err.Type)
+ }
+ if err.Code != tt.code {
+ t.Errorf(TestFmtExpectedCode, tt.code, err.Code)
+ }
+ if err.Message != tt.expectedMsg {
+ t.Errorf(TestFmtExpectedMessage, tt.expectedMsg, err.Message)
+ }
+ },
+ )
+ }
+}
+
+// validateWrapErrorResult validates wrap error results.
+func validateWrapErrorResult(
+ t *testing.T,
+ result *StructuredError,
+ originalErr error,
+ errorType ErrorType,
+ code, message string,
+) {
+ t.Helper()
+
+ if result.Type != errorType {
+ t.Errorf(TestFmtExpectedType, errorType, result.Type)
+ }
+ if result.Code != code {
+ t.Errorf(TestFmtExpectedCode, code, result.Code)
+ }
+ if result.Message != message {
+ t.Errorf(TestFmtExpectedMessage, message, result.Message)
+ }
+ if !errors.Is(result.Cause, originalErr) {
+ t.Errorf("Expected Cause %v, got %v", originalErr, result.Cause)
+ }
+
+ if originalErr != nil && !errors.Is(result, originalErr) {
+ t.Error("Expected errors.Is to return true for wrapped error")
+ }
+}
+
+func TestWrapError(t *testing.T) {
+ originalErr := errors.New("original error")
+
+ tests := []struct {
+ name string
+ err error
+ errorType ErrorType
+ code string
+ message string
+ }{
+ {
+ name: "wrap simple error",
+ err: originalErr,
+ errorType: ErrorTypeFileSystem,
+ code: "ACCESS_DENIED",
+ message: TestErrAccessDenied,
+ },
+ {
+ name: "wrap nil error",
+ err: nil,
+ errorType: ErrorTypeValidation,
+ code: "INVALID_INPUT",
+ message: "invalid input",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(
+ tt.name, func(t *testing.T) {
+ result := WrapError(tt.err, tt.errorType, tt.code, tt.message)
+ validateWrapErrorResult(t, result, tt.err, tt.errorType, tt.code, tt.message)
+ },
+ )
+ }
+}
+
+func TestWrapErrorf(t *testing.T) {
+ originalErr := errors.New(TestErrDiskFull)
+
+ tests := []struct {
+ name string
+ err error
+ errorType ErrorType
+ code string
+ format string
+ args []any
+ expectedMsg string
+ }{
+ {
+ name: "wrap with formatted message",
+ err: originalErr,
+ errorType: ErrorTypeIO,
+ code: "WRITE_FAILED",
+ format: "failed to write %d bytes to %s",
+ args: []any{1024, "/tmp/output.txt"},
+ expectedMsg: "failed to write 1024 bytes to /tmp/output.txt",
+ },
+ {
+ name: "wrap without args",
+ err: originalErr,
+ errorType: ErrorTypeProcessing,
+ code: "PROCESSING_ERROR",
+ format: TestErrProcessingFailed,
+ args: nil,
+ expectedMsg: TestErrProcessingFailed,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(
+ tt.name, func(t *testing.T) {
+ result := WrapErrorf(tt.err, tt.errorType, tt.code, tt.format, tt.args...)
+
+ if result.Type != tt.errorType {
+ t.Errorf(TestFmtExpectedType, tt.errorType, result.Type)
+ }
+ if result.Code != tt.code {
+ t.Errorf(TestFmtExpectedCode, tt.code, result.Code)
+ }
+ if result.Message != tt.expectedMsg {
+ t.Errorf(TestFmtExpectedMessage, tt.expectedMsg, result.Message)
+ }
+ if !errors.Is(result.Cause, tt.err) {
+ t.Errorf("Expected Cause %v, got %v", tt.err, result.Cause)
+ }
+ },
+ )
+ }
+}
+
+// validatePredefinedError validates predefined error constructor results.
+func validatePredefinedError(t *testing.T, err *StructuredError, expectedType ErrorType, name, code, message string) {
+ t.Helper()
+
+ if err.Type != expectedType {
+ t.Errorf(TestFmtExpectedType, expectedType, err.Type)
+ }
+
+ if name != "NewMissingSourceError" {
+ if err.Code != code {
+ t.Errorf(TestFmtExpectedCode, code, err.Code)
+ }
+ if err.Message != message {
+ t.Errorf(TestFmtExpectedMessage, message, err.Message)
+ }
+ }
+}
+
+func TestPredefinedErrorConstructors(t *testing.T) {
+ tests := []struct {
+ name string
+ constructor func(string, string) *StructuredError
+ expectedType ErrorType
+ }{
+ {
+ name: "NewMissingSourceError",
+ constructor: func(_, _ string) *StructuredError { return NewMissingSourceError() },
+ expectedType: ErrorTypeCLI,
+ },
+ {
+ name: "NewFileSystemError",
+ constructor: NewFileSystemError,
+ expectedType: ErrorTypeFileSystem,
+ },
+ {
+ name: "NewProcessingError",
+ constructor: NewProcessingError,
+ expectedType: ErrorTypeProcessing,
+ },
+ {
+ name: "NewIOError",
+ constructor: NewIOError,
+ expectedType: ErrorTypeIO,
+ },
+ {
+ name: "NewValidationError",
+ constructor: NewValidationError,
+ expectedType: ErrorTypeValidation,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(
+ tt.name, func(t *testing.T) {
+ code := "TEST_CODE"
+ message := "test message"
+
+ var err *StructuredError
+ if tt.name == "NewMissingSourceError" {
+ err = NewMissingSourceError()
+ } else {
+ err = tt.constructor(code, message)
+ }
+
+ validatePredefinedError(t, err, tt.expectedType, tt.name, code, message)
+ },
+ )
+ }
+}
+
+func TestStructuredErrorIntegration(t *testing.T) {
+ // Test a complete structured error workflow
+ originalErr := errors.New("connection timeout")
+
+ // Create and modify error through chaining
+ err := WrapError(originalErr, ErrorTypeIO, "READ_TIMEOUT", "failed to read from network").
+ WithFilePath(TestPathTmpNetworkData).
+ WithLine(42).
+ WithContext("host", "example.com").
+ WithContext("port", 8080)
+
+ // Test error interface implementation
+ errorMsg := err.Error()
+ expectedMsg := "IO [READ_TIMEOUT]: failed to read from network: connection timeout"
+ if errorMsg != expectedMsg {
+ t.Errorf("Expected error message %q, got %q", expectedMsg, errorMsg)
+ }
+
+ // Test unwrapping
+ if !errors.Is(err, originalErr) {
+ t.Error("Expected errors.Is to return true for wrapped error")
+ }
+
+ // Test properties
+ if err.FilePath != TestPathTmpNetworkData {
+ t.Errorf(TestFmtExpectedFilePath, TestPathTmpNetworkData, err.FilePath)
+ }
+ if err.Line != 42 {
+ t.Errorf(TestFmtExpectedLine, 42, err.Line)
+ }
+ if len(err.Context) != 2 {
+ t.Errorf("Expected context length 2, got %d", len(err.Context))
+ }
+ if err.Context["host"] != "example.com" {
+ t.Errorf("Expected context host=example.com, got %v", err.Context["host"])
+ }
+ if err.Context["port"] != 8080 {
+ t.Errorf("Expected context port=8080, got %v", err.Context["port"])
+ }
+}
+
+func TestErrorTypeConstants(t *testing.T) {
+ // Test that all error type constants are properly defined
+ types := []ErrorType{
+ ErrorTypeCLI,
+ ErrorTypeFileSystem,
+ ErrorTypeProcessing,
+ ErrorTypeConfiguration,
+ ErrorTypeIO,
+ ErrorTypeValidation,
+ ErrorTypeUnknown,
+ }
+
+ // Ensure all types have unique string representations
+ seen := make(map[string]bool)
+ for _, errType := range types {
+ str := errType.String()
+ if seen[str] {
+ t.Errorf("Duplicate string representation: %q", str)
+ }
+ seen[str] = true
+
+ if str == "" {
+ t.Errorf("Empty string representation for error type %v", errType)
+ }
+ }
+}
+
+// Benchmark tests for StructuredError operations.
+func BenchmarkNewStructuredError(b *testing.B) {
+ context := map[string]any{
+ "key1": "value1",
+ "key2": 42,
+ }
+
+ for b.Loop() {
+ _ = NewStructuredError( // nolint:errcheck // benchmark test
+ ErrorTypeFileSystem,
+ "ACCESS_DENIED",
+ TestErrAccessDenied,
+ "/test/file.txt",
+ context,
+ )
+ }
+}
+
+func BenchmarkStructuredErrorError(b *testing.B) {
+ err := NewStructuredError(ErrorTypeIO, "WRITE_FAILED", "write operation failed", "/tmp/file.txt", nil)
+
+ for b.Loop() {
+ _ = err.Error()
+ }
+}
+
+func BenchmarkStructuredErrorWithContext(b *testing.B) {
+ err := NewStructuredError(ErrorTypeProcessing, "PROC_FAILED", TestErrProcessingFailed, "", nil)
+
+ for i := 0; b.Loop(); i++ {
+ _ = err.WithContext(fmt.Sprintf("key%d", i), fmt.Sprintf("value%d", i)) // nolint:errcheck // benchmark test
+ }
+}
diff --git a/shared/logger.go b/shared/logger.go
new file mode 100644
index 0000000..52a227f
--- /dev/null
+++ b/shared/logger.go
@@ -0,0 +1,164 @@
+// Package shared provides logging utilities for gibidify.
+package shared
+
+import (
+ "io"
+ "os"
+ "sync"
+
+ "github.com/sirupsen/logrus"
+)
+
+// Logger interface defines the logging contract for gibidify.
+type Logger interface {
+ Debug(args ...any)
+ Debugf(format string, args ...any)
+ Info(args ...any)
+ Infof(format string, args ...any)
+ Warn(args ...any)
+ Warnf(format string, args ...any)
+ Error(args ...any)
+ Errorf(format string, args ...any)
+ WithFields(fields map[string]any) Logger
+ SetLevel(level LogLevel)
+ SetOutput(output io.Writer)
+}
+
+// LogLevel represents available log levels.
+type LogLevel string
+
+// logService implements the Logger interface using logrus.
+type logService struct {
+ logger *logrus.Logger
+ entry *logrus.Entry
+}
+
+var (
+ instance Logger
+ once sync.Once
+)
+
+// GetLogger returns the singleton logger instance.
+// Default level is WARNING to reduce noise in CLI output.
+func GetLogger() Logger {
+ once.Do(
+ func() {
+ logger := logrus.New()
+ logger.SetLevel(logrus.WarnLevel) // Default to WARNING level
+ logger.SetOutput(os.Stderr)
+ logger.SetFormatter(
+ &logrus.TextFormatter{
+ DisableColors: false,
+ FullTimestamp: false,
+ },
+ )
+
+ instance = &logService{
+ logger: logger,
+ entry: logger.WithFields(logrus.Fields{}),
+ }
+ },
+ )
+
+ return instance
+}
+
+// Debug logs a debug message.
+func (l *logService) Debug(args ...any) {
+ l.entry.Debug(args...)
+}
+
+// Debugf logs a formatted debug message.
+func (l *logService) Debugf(format string, args ...any) {
+ l.entry.Debugf(format, args...)
+}
+
+// Info logs an info message.
+func (l *logService) Info(args ...any) {
+ l.entry.Info(args...)
+}
+
+// Infof logs a formatted info message.
+func (l *logService) Infof(format string, args ...any) {
+ l.entry.Infof(format, args...)
+}
+
+// Warn logs a warning message.
+func (l *logService) Warn(args ...any) {
+ l.entry.Warn(args...)
+}
+
+// Warnf logs a formatted warning message.
+func (l *logService) Warnf(format string, args ...any) {
+ l.entry.Warnf(format, args...)
+}
+
+// Error logs an error message.
+func (l *logService) Error(args ...any) {
+ l.entry.Error(args...)
+}
+
+// Errorf logs a formatted error message.
+func (l *logService) Errorf(format string, args ...any) {
+ l.entry.Errorf(format, args...)
+}
+
+// WithFields adds structured fields to log entries.
+func (l *logService) WithFields(fields map[string]any) Logger {
+ logrusFields := make(logrus.Fields)
+ for k, v := range fields {
+ logrusFields[k] = v
+ }
+
+ return &logService{
+ logger: l.logger,
+ entry: l.entry.WithFields(logrusFields),
+ }
+}
+
+// SetLevel sets the logging level.
+func (l *logService) SetLevel(level LogLevel) {
+ var logrusLevel logrus.Level
+ switch level {
+ case LogLevelDebug:
+ logrusLevel = logrus.DebugLevel
+ case LogLevelInfo:
+ logrusLevel = logrus.InfoLevel
+ case LogLevelError:
+ logrusLevel = logrus.ErrorLevel
+ default:
+ // LogLevelWarn and unknown levels default to warn
+ logrusLevel = logrus.WarnLevel
+ }
+ l.logger.SetLevel(logrusLevel)
+}
+
+// SetOutput sets the output destination for logs.
+func (l *logService) SetOutput(output io.Writer) {
+ l.logger.SetOutput(output)
+}
+
+// ParseLogLevel parses string log level to LogLevel.
+func ParseLogLevel(level string) LogLevel {
+ switch level {
+ case string(LogLevelDebug):
+ return LogLevelDebug
+ case string(LogLevelInfo):
+ return LogLevelInfo
+ case string(LogLevelError):
+ return LogLevelError
+ default:
+ // "warn", "warning", and unknown levels default to warn
+ return LogLevelWarn
+ }
+}
+
+// ValidateLogLevel validates if the provided log level is valid.
+func ValidateLogLevel(level string) bool {
+ switch level {
+ case string(LogLevelDebug), string(LogLevelInfo), string(LogLevelWarn), LogLevelWarningAlias, string(LogLevelError):
+ return true
+ default:
+ return false
+ }
+}
diff --git a/shared/logger_test.go b/shared/logger_test.go
new file mode 100644
index 0000000..e35d9f0
--- /dev/null
+++ b/shared/logger_test.go
@@ -0,0 +1,376 @@
+package shared
+
+import (
+ "bytes"
+ "io"
+ "strings"
+ "testing"
+)
+
+func TestGetLogger(t *testing.T) {
+ // Test singleton behavior
+ logger1 := GetLogger()
+ logger2 := GetLogger()
+
+ if logger1 != logger2 {
+ t.Error("GetLogger should return the same instance (singleton)")
+ }
+}
+
+func TestLogServiceLevels(t *testing.T) {
+ tests := []struct {
+ name string
+ level LogLevel
+ logFunc func(Logger)
+ expected bool
+ }{
+ {
+ name: "debug level allows debug messages",
+ level: LogLevelDebug,
+ logFunc: func(l Logger) {
+ l.Debug(TestLoggerDebugMsg)
+ },
+ expected: true,
+ },
+ {
+ name: "info level blocks debug messages",
+ level: LogLevelInfo,
+ logFunc: func(l Logger) {
+ l.Debug(TestLoggerDebugMsg)
+ },
+ expected: false,
+ },
+ {
+ name: "info level allows info messages",
+ level: LogLevelInfo,
+ logFunc: func(l Logger) {
+ l.Info(TestLoggerInfoMsg)
+ },
+ expected: true,
+ },
+ {
+ name: "warn level blocks info messages",
+ level: LogLevelWarn,
+ logFunc: func(l Logger) {
+ l.Info(TestLoggerInfoMsg)
+ },
+ expected: false,
+ },
+ {
+ name: "warn level allows warn messages",
+ level: LogLevelWarn,
+ logFunc: func(l Logger) {
+ l.Warn(TestLoggerWarnMsg)
+ },
+ expected: true,
+ },
+ {
+ name: "error level blocks warn messages",
+ level: LogLevelError,
+ logFunc: func(l Logger) {
+ l.Warn(TestLoggerWarnMsg)
+ },
+ expected: false,
+ },
+ {
+ name: "error level allows error messages",
+ level: LogLevelError,
+ logFunc: func(l Logger) {
+ l.Error("error message")
+ },
+ expected: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(
+ tt.name, func(t *testing.T) {
+ var buf bytes.Buffer
+ logger := GetLogger()
+ logger.SetOutput(&buf)
+ logger.SetLevel(tt.level)
+
+ tt.logFunc(logger)
+
+ output := buf.String()
+ hasOutput := len(strings.TrimSpace(output)) > 0
+ if hasOutput != tt.expected {
+ t.Errorf("Expected output: %v, got output: %v, output: %s", tt.expected, hasOutput, output)
+ }
+ },
+ )
+ }
+}
+
+func TestLogServiceFormattedLogging(t *testing.T) {
+ tests := []struct {
+ name string
+ level LogLevel
+ logFunc func(Logger)
+ contains string
+ }{
+ {
+ name: "debugf formats correctly",
+ level: LogLevelDebug,
+ logFunc: func(l Logger) {
+ l.Debugf("debug %s %d", "message", 42)
+ },
+ contains: "debug message 42",
+ },
+ {
+ name: "infof formats correctly",
+ level: LogLevelInfo,
+ logFunc: func(l Logger) {
+ l.Infof("info %s %d", "message", 42)
+ },
+ contains: "info message 42",
+ },
+ {
+ name: "warnf formats correctly",
+ level: LogLevelWarn,
+ logFunc: func(l Logger) {
+ l.Warnf("warn %s %d", "message", 42)
+ },
+ contains: "warn message 42",
+ },
+ {
+ name: "errorf formats correctly",
+ level: LogLevelError,
+ logFunc: func(l Logger) {
+ l.Errorf("error %s %d", "message", 42)
+ },
+ contains: "error message 42",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(
+ tt.name, func(t *testing.T) {
+ var buf bytes.Buffer
+ logger := GetLogger()
+ logger.SetOutput(&buf)
+ logger.SetLevel(tt.level)
+
+ tt.logFunc(logger)
+
+ output := buf.String()
+ if !strings.Contains(output, tt.contains) {
+ t.Errorf("Expected output to contain %q, got: %s", tt.contains, output)
+ }
+ },
+ )
+ }
+}
+
+func TestLogServiceWithFields(t *testing.T) {
+ var buf bytes.Buffer
+ logger := GetLogger()
+ logger.SetOutput(&buf)
+ logger.SetLevel(LogLevelInfo)
+
+ fields := map[string]any{
+ "component": "test",
+ "count": 42,
+ "enabled": true,
+ }
+
+ fieldLogger := logger.WithFields(fields)
+ fieldLogger.Info("test message")
+
+ output := buf.String()
+ expectedFields := []string{"component=test", "count=42", "enabled=true", "test message"}
+ for _, expected := range expectedFields {
+ if !strings.Contains(output, expected) {
+ t.Errorf("Expected output to contain %q, got: %s", expected, output)
+ }
+ }
+}
+
+func TestLogServiceSetOutput(t *testing.T) {
+ var buf1, buf2 bytes.Buffer
+ logger := GetLogger()
+
+ // Set initial output
+ logger.SetOutput(&buf1)
+ logger.SetLevel(LogLevelInfo)
+ logger.Info("message1")
+
+ // Change output
+ logger.SetOutput(&buf2)
+ logger.Info("message2")
+
+ // Verify messages went to correct outputs
+ if !strings.Contains(buf1.String(), "message1") {
+ t.Error("First message should be in first buffer")
+ }
+ if strings.Contains(buf1.String(), "message2") {
+ t.Error("Second message should not be in first buffer")
+ }
+ if !strings.Contains(buf2.String(), "message2") {
+ t.Error("Second message should be in second buffer")
+ }
+}
+
+func TestParseLogLevel(t *testing.T) {
+ tests := []struct {
+ input string
+ expected LogLevel
+ }{
+ {"debug", LogLevelDebug},
+ {"info", LogLevelInfo},
+ {"warn", LogLevelWarn},
+ {"warning", LogLevelWarn},
+ {"error", LogLevelError},
+ {"invalid", LogLevelWarn}, // default
+ {"", LogLevelWarn}, // default
+ }
+
+ for _, tt := range tests {
+ t.Run(
+ tt.input, func(t *testing.T) {
+ result := ParseLogLevel(tt.input)
+ if result != tt.expected {
+ t.Errorf("ParseLogLevel(%q) = %v, want %v", tt.input, result, tt.expected)
+ }
+ },
+ )
+ }
+}
+
+func TestValidateLogLevel(t *testing.T) {
+ tests := []struct {
+ input string
+ expected bool
+ }{
+ {"debug", true},
+ {"info", true},
+ {"warn", true},
+ {"warning", true},
+ {"error", true},
+ {"invalid", false},
+ {"", false},
+ {"DEBUG", false}, // case-sensitive
+ {"INFO", false}, // case-sensitive
+ }
+
+ for _, tt := range tests {
+ t.Run(
+ tt.input, func(t *testing.T) {
+ result := ValidateLogLevel(tt.input)
+ if result != tt.expected {
+ t.Errorf("ValidateLogLevel(%q) = %v, want %v", tt.input, result, tt.expected)
+ }
+ },
+ )
+ }
+}
+
+func TestLogServiceDefaultLevel(t *testing.T) {
+ var buf bytes.Buffer
+ logger := GetLogger()
+ logger.SetOutput(&buf)
+ logger.SetLevel(LogLevelWarn) // Ensure we're at WARN level for this test
+
+ // Test that default level is WARN (should block info messages)
+ logger.Info(TestLoggerInfoMsg)
+ if strings.TrimSpace(buf.String()) != "" {
+ t.Error("Info message should be blocked at default WARN level")
+ }
+
+ // Test that warning messages are allowed
+ buf.Reset()
+ logger.Warn(TestLoggerWarnMsg)
+ if !strings.Contains(buf.String(), TestLoggerWarnMsg) {
+ t.Error("Warn message should be allowed at default WARN level")
+ }
+}
+
+func TestLogServiceSetLevel(t *testing.T) {
+ tests := []struct {
+ name string
+ setLevel LogLevel
+ logFunc func(Logger)
+ expected bool
+ }{
+ {
+ name: "set debug level allows debug",
+ setLevel: LogLevelDebug,
+ logFunc: func(l Logger) {
+ l.Debug(TestLoggerDebugMsg)
+ },
+ expected: true,
+ },
+ {
+ name: "set info level blocks debug",
+ setLevel: LogLevelInfo,
+ logFunc: func(l Logger) {
+ l.Debug(TestLoggerDebugMsg)
+ },
+ expected: false,
+ },
+ {
+ name: "set warn level blocks info",
+ setLevel: LogLevelWarn,
+ logFunc: func(l Logger) {
+ l.Info(TestLoggerInfoMsg)
+ },
+ expected: false,
+ },
+ {
+ name: "set error level blocks warn",
+ setLevel: LogLevelError,
+ logFunc: func(l Logger) {
+ l.Warn(TestLoggerWarnMsg)
+ },
+ expected: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(
+ tt.name, func(t *testing.T) {
+ var buf bytes.Buffer
+ logger := GetLogger()
+ logger.SetOutput(&buf)
+ logger.SetLevel(tt.setLevel)
+
+ tt.logFunc(logger)
+
+ output := buf.String()
+ hasOutput := len(strings.TrimSpace(output)) > 0
+ if hasOutput != tt.expected {
+ t.Errorf("Expected output: %v, got output: %v, level: %v", tt.expected, hasOutput, tt.setLevel)
+ }
+ },
+ )
+ }
+}
+
+// Benchmark tests.
+func BenchmarkLoggerInfo(b *testing.B) {
+ logger := GetLogger()
+ logger.SetOutput(io.Discard)
+ logger.SetLevel(LogLevelInfo)
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ logger.Info("benchmark message")
+ }
+}
+
+func BenchmarkLoggerWithFields(b *testing.B) {
+ logger := GetLogger()
+ logger.SetOutput(io.Discard)
+ logger.SetLevel(LogLevelInfo)
+
+ fields := map[string]any{
+ "component": "benchmark",
+ "iteration": 0,
+ }
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ fields["iteration"] = i
+ logger.WithFields(fields).Info("benchmark message")
+ }
+}
diff --git a/shared/paths.go b/shared/paths.go
new file mode 100644
index 0000000..b6fe7ae
--- /dev/null
+++ b/shared/paths.go
@@ -0,0 +1,217 @@
+// Package shared provides common utility functions.
+package shared
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+// AbsolutePath returns the absolute path for the given path.
+// It wraps filepath.Abs with consistent error handling.
+func AbsolutePath(path string) (string, error) {
+ abs, err := filepath.Abs(path)
+ if err != nil {
+ return "", fmt.Errorf("failed to get absolute path for %s: %w", path, err)
+ }
+
+ return abs, nil
+}
+
+// BaseName returns the base name for the given path, handling special cases.
+func BaseName(absPath string) string {
+ baseName := filepath.Base(absPath)
+ if baseName == "." || baseName == "" {
+ return "output"
+ }
+
+ return baseName
+}
+
+// ValidateSourcePath validates a source directory path for security.
+// It ensures the path exists, is a directory, and doesn't contain path traversal attempts.
+func ValidateSourcePath(path string) error {
+ if path == "" {
+ return NewStructuredError(
+ ErrorTypeValidation,
+ CodeValidationRequired,
+ TestMsgSourcePath+" is required",
+ "",
+ nil,
+ )
+ }
+
+ // Check for path traversal patterns before cleaning
+ if strings.Contains(path, "..") {
+ return NewStructuredError(
+ ErrorTypeValidation, CodeValidationPath,
+ "path traversal attempt detected in "+TestMsgSourcePath, path, map[string]any{
+ "original_path": path,
+ },
+ )
+ }
+
+ // Clean and get absolute path
+ cleaned := filepath.Clean(path)
+ abs, err := filepath.Abs(cleaned)
+ if err != nil {
+ return NewStructuredError(
+ ErrorTypeFileSystem, CodeFSPathResolution, "cannot resolve "+TestMsgSourcePath, path, map[string]any{
+ "error": err.Error(),
+ },
+ )
+ }
+
+ // Get current working directory to ensure we're not escaping it for relative paths
+ if !filepath.IsAbs(path) {
+ cwd, err := os.Getwd()
+ if err != nil {
+ return NewStructuredError(
+ ErrorTypeFileSystem, CodeFSPathResolution, "cannot get current working directory", path, map[string]any{
+ "error": err.Error(),
+ },
+ )
+ }
+
+ // Ensure the resolved path is within or below the current working directory
+ cwdAbs, err := filepath.Abs(cwd)
+ if err != nil {
+ return NewStructuredError(
+ ErrorTypeFileSystem, CodeFSPathResolution,
+ "cannot resolve current working directory", path, map[string]any{
+ "error": err.Error(),
+ },
+ )
+ }
+
+ // Check if the absolute path tries to escape the current working directory
+ if !strings.HasPrefix(abs, cwdAbs) {
+ return NewStructuredError(
+ ErrorTypeValidation,
+ CodeValidationPath,
+ "source path attempts to access directories outside current working directory",
+ path,
+ map[string]any{
+ "resolved_path": abs,
+ "working_dir": cwdAbs,
+ },
+ )
+ }
+ }
+
+ // Check if path exists and is a directory
+ info, err := os.Stat(cleaned)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return NewStructuredError(ErrorTypeFileSystem, CodeFSNotFound, "source directory does not exist", path, nil)
+ }
+
+ return NewStructuredError(
+ ErrorTypeFileSystem, CodeFSAccess, "cannot access source directory", path, map[string]any{
+ "error": err.Error(),
+ },
+ )
+ }
+
+ if !info.IsDir() {
+ return NewStructuredError(
+ ErrorTypeValidation, CodeValidationPath, "source path must be a directory", path, map[string]any{
+ "is_file": true,
+ },
+ )
+ }
+
+ return nil
+}
+
+// ValidateDestinationPath validates a destination file path for security.
+// It ensures the path doesn't contain path traversal attempts and the parent directory exists.
+func ValidateDestinationPath(path string) error {
+ if path == "" {
+ return NewStructuredError(ErrorTypeValidation, CodeValidationRequired, "destination path is required", "", nil)
+ }
+
+ // Check for path traversal patterns before cleaning
+ if strings.Contains(path, "..") {
+ return NewStructuredError(
+ ErrorTypeValidation,
+ CodeValidationPath,
+ "path traversal attempt detected in destination path",
+ path,
+ map[string]any{
+ "original_path": path,
+ },
+ )
+ }
+
+ // Clean and validate the path
+ cleaned := filepath.Clean(path)
+
+ // Get absolute path to ensure it's not trying to escape current working directory
+ abs, err := filepath.Abs(cleaned)
+ if err != nil {
+ return NewStructuredError(
+ ErrorTypeFileSystem, CodeFSPathResolution, "cannot resolve destination path", path, map[string]any{
+ "error": err.Error(),
+ },
+ )
+ }
+
+ // Ensure the destination is not a directory
+ if info, err := os.Stat(abs); err == nil && info.IsDir() {
+ return NewStructuredError(
+ ErrorTypeValidation, CodeValidationPath, "destination cannot be a directory", path, map[string]any{
+ "is_directory": true,
+ },
+ )
+ }
+
+ // Check if parent directory exists and is writable
+ parentDir := filepath.Dir(abs)
+ if parentInfo, err := os.Stat(parentDir); err != nil {
+ if os.IsNotExist(err) {
+ return NewStructuredError(
+ ErrorTypeFileSystem, CodeFSNotFound,
+ "destination parent directory does not exist", path, map[string]any{
+ "parent_dir": parentDir,
+ },
+ )
+ }
+
+ return NewStructuredError(
+ ErrorTypeFileSystem, CodeFSAccess, "cannot access destination parent directory", path, map[string]any{
+ "parent_dir": parentDir,
+ "error": err.Error(),
+ },
+ )
+ } else if !parentInfo.IsDir() {
+ return NewStructuredError(
+ ErrorTypeValidation, CodeValidationPath, "destination parent is not a directory", path, map[string]any{
+ "parent_dir": parentDir,
+ },
+ )
+ }
+
+ return nil
+}
+
+// ValidateConfigPath validates a configuration file path for security.
+// It ensures the path doesn't contain path traversal attempts.
+func ValidateConfigPath(path string) error {
+ if path == "" {
+ return nil // Empty path is allowed for config
+ }
+
+ // Check for path traversal patterns before cleaning
+ if strings.Contains(path, "..") {
+ return NewStructuredError(
+ ErrorTypeValidation, CodeValidationPath,
+ "path traversal attempt detected in config path", path, map[string]any{
+ "original_path": path,
+ },
+ )
+ }
+
+ return nil
+}
diff --git a/shared/paths_test.go b/shared/paths_test.go
new file mode 100644
index 0000000..e7784e3
--- /dev/null
+++ b/shared/paths_test.go
@@ -0,0 +1,788 @@
+package shared
+
+import (
+ "errors"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "testing"
+)
+
+const (
+ windowsOS = "windows"
+)
+
+// validatePathTestCase represents a test case for path validation functions.
+type validatePathTestCase struct {
+ name string
+ path string
+ wantErr bool
+ errType ErrorType
+ errCode string
+ errContains string
+}
+
+// validateExpectedError validates expected error structure and content.
+func validateExpectedError(t *testing.T, err error, validatorName string, testCase validatePathTestCase) {
+ t.Helper()
+
+ if err == nil {
+ t.Errorf("%s() expected error, got nil", validatorName)
+
+ return
+ }
+
+ var structErr *StructuredError
+ if !errors.As(err, &structErr) {
+ t.Errorf("Expected StructuredError, got %T", err)
+
+ return
+ }
+
+ if structErr.Type != testCase.errType {
+ t.Errorf("Expected error type %v, got %v", testCase.errType, structErr.Type)
+ }
+ if structErr.Code != testCase.errCode {
+ t.Errorf("Expected error code %v, got %v", testCase.errCode, structErr.Code)
+ }
+ if testCase.errContains != "" && !strings.Contains(err.Error(), testCase.errContains) {
+ t.Errorf("Error should contain %q, got: %v", testCase.errContains, err.Error())
+ }
+}
+
+// testPathValidation is a helper function to test path validation functions without duplication.
+func testPathValidation(
+ t *testing.T,
+ validatorName string,
+ validatorFunc func(string) error,
+ tests []validatePathTestCase,
+) {
+ t.Helper()
+
+ for _, tt := range tests {
+ t.Run(
+ tt.name, func(t *testing.T) {
+ err := validatorFunc(tt.path)
+
+ if tt.wantErr {
+ validateExpectedError(t, err, validatorName, tt)
+
+ return
+ }
+
+ if err != nil {
+ t.Errorf("%s() unexpected error: %v", validatorName, err)
+ }
+ },
+ )
+ }
+}
+
+func TestAbsolutePath(t *testing.T) {
+ // Get current working directory for tests
+ cwd, err := os.Getwd()
+ if err != nil {
+ t.Fatalf("Failed to get current directory: %v", err)
+ }
+
+ tests := createAbsolutePathTestCases(cwd)
+
+ for _, tt := range tests {
+ t.Run(
+ tt.name, func(t *testing.T) {
+ verifyAbsolutePathResult(t, tt.path, tt.wantPrefix, tt.wantErr, tt.wantErrMsg, tt.skipWindows)
+ },
+ )
+ }
+}
+
+// createAbsolutePathTestCases creates test cases for AbsolutePath.
+func createAbsolutePathTestCases(cwd string) []struct {
+ name string
+ path string
+ wantPrefix string
+ wantErr bool
+ wantErrMsg string
+ skipWindows bool
+} {
+ return []struct {
+ name string
+ path string
+ wantPrefix string
+ wantErr bool
+ wantErrMsg string
+ skipWindows bool
+ }{
+ {
+ name: "absolute path unchanged",
+ path: cwd,
+ wantPrefix: cwd,
+ wantErr: false,
+ },
+ {
+ name: "relative path current directory",
+ path: ".",
+ wantPrefix: cwd,
+ wantErr: false,
+ },
+ {
+ name: "relative path parent directory",
+ path: "..",
+ wantPrefix: filepath.Dir(cwd),
+ wantErr: false,
+ },
+ {
+ name: "relative path with file",
+ path: "test.txt",
+ wantPrefix: filepath.Join(cwd, "test.txt"),
+ wantErr: false,
+ },
+ {
+ name: "relative path with subdirectory",
+ path: "subdir/file.go",
+ wantPrefix: filepath.Join(cwd, "subdir", "file.go"),
+ wantErr: false,
+ },
+ {
+ name: TestMsgEmptyPath,
+ path: "",
+ wantPrefix: cwd,
+ wantErr: false,
+ },
+ {
+ name: "path with tilde",
+ path: "~/test",
+ wantPrefix: filepath.Join(cwd, "~", "test"),
+ wantErr: false,
+ skipWindows: false,
+ },
+ {
+ name: "path with multiple separators",
+ path: "path//to///file",
+ wantPrefix: filepath.Join(cwd, "path", "to", "file"),
+ wantErr: false,
+ },
+ {
+ name: "path with trailing separator",
+ path: "path/",
+ wantPrefix: filepath.Join(cwd, "path"),
+ wantErr: false,
+ },
+ }
+}
+
+// verifyAbsolutePathResult verifies the result of AbsolutePath.
+func verifyAbsolutePathResult(
+ t *testing.T,
+ path, wantPrefix string,
+ wantErr bool,
+ wantErrMsg string,
+ skipWindows bool,
+) {
+ t.Helper()
+
+ if skipWindows && runtime.GOOS == windowsOS {
+ t.Skip("Skipping test on Windows")
+ }
+
+ got, err := AbsolutePath(path)
+
+ if wantErr {
+ verifyExpectedError(t, err, wantErrMsg)
+
+ return
+ }
+
+ if err != nil {
+ t.Errorf("AbsolutePath() unexpected error = %v", err)
+
+ return
+ }
+
+ //nolint:errcheck // Test helper, error intentionally ignored for testing
+ verifyAbsolutePathOutput(t, got, wantPrefix)
+}
+
+// verifyExpectedError verifies that an expected error occurred.
+func verifyExpectedError(t *testing.T, err error, wantErrMsg string) {
+ t.Helper()
+
+ if err == nil {
+ t.Error("AbsolutePath() error = nil, wantErr true")
+
+ return
+ }
+
+ if wantErrMsg != "" && !strings.Contains(err.Error(), wantErrMsg) {
+ t.Errorf("AbsolutePath() error = %v, want error containing %v", err, wantErrMsg)
+ }
+}
+
+// verifyAbsolutePathOutput verifies the output of AbsolutePath.
+func verifyAbsolutePathOutput(t *testing.T, got, wantPrefix string) {
+ t.Helper()
+
+ // Clean the expected path for comparison
+ wantClean := filepath.Clean(wantPrefix)
+ gotClean := filepath.Clean(got)
+
+ if gotClean != wantClean {
+ t.Errorf("AbsolutePath() = %v, want %v", gotClean, wantClean)
+ }
+
+ // Verify the result is actually absolute
+ if !filepath.IsAbs(got) {
+ t.Errorf("AbsolutePath() returned non-absolute path: %v", got)
+ }
+}
+
+func TestAbsolutePathSpecialCases(t *testing.T) {
+ if runtime.GOOS == windowsOS {
+ t.Skip("Skipping Unix-specific tests on Windows")
+ }
+
+ tests := []struct {
+ name string
+ setup func(*testing.T) (string, func())
+ path string
+ wantErr bool
+ }{
+ {
+ name: "symlink to directory",
+ setup: setupSymlinkToDirectory,
+ path: "",
+ wantErr: false,
+ },
+ {
+ name: "broken symlink",
+ setup: setupBrokenSymlink,
+ path: "",
+ wantErr: false, // filepath.Abs still works with broken symlinks
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(
+ tt.name, func(t *testing.T) {
+ verifySpecialCaseAbsolutePath(t, tt.setup, tt.path, tt.wantErr)
+ },
+ )
+ }
+}
+
+// setupSymlinkToDirectory creates a symlink pointing to a directory.
+func setupSymlinkToDirectory(t *testing.T) (string, func()) {
+ t.Helper()
+ tmpDir := t.TempDir()
+ target := filepath.Join(tmpDir, "target")
+ link := filepath.Join(tmpDir, "link")
+
+ if err := os.Mkdir(target, 0o750); err != nil {
+ t.Fatalf("Failed to create target directory: %v", err)
+ }
+ if err := os.Symlink(target, link); err != nil {
+ t.Fatalf("Failed to create symlink: %v", err)
+ }
+
+ return link, func() {
+ // Cleanup handled automatically by t.TempDir()
+ }
+}
+
+// setupBrokenSymlink creates a broken symlink.
+func setupBrokenSymlink(t *testing.T) (string, func()) {
+ t.Helper()
+ tmpDir := t.TempDir()
+ link := filepath.Join(tmpDir, "broken_link")
+
+ if err := os.Symlink("/nonexistent/path", link); err != nil {
+ t.Fatalf("Failed to create broken symlink: %v", err)
+ }
+
+ return link, func() {
+ // Cleanup handled automatically by t.TempDir()
+ }
+}
+
+// verifySpecialCaseAbsolutePath verifies AbsolutePath with special cases.
+func verifySpecialCaseAbsolutePath(t *testing.T, setup func(*testing.T) (string, func()), path string, wantErr bool) {
+ t.Helper()
+ testPath, cleanup := setup(t)
+ //nolint:errcheck // Test helper, error intentionally ignored for testing
+ defer cleanup()
+
+ if path == "" {
+ path = testPath
+ }
+
+ got, err := AbsolutePath(path)
+ if (err != nil) != wantErr {
+ t.Errorf("AbsolutePath() error = %v, wantErr %v", err, wantErr)
+
+ return
+ }
+
+ if err == nil && !filepath.IsAbs(got) {
+ t.Errorf("AbsolutePath() returned non-absolute path: %v", got)
+ }
+}
+
+func TestAbsolutePathConcurrency(_ *testing.T) {
+ // Test that AbsolutePath is safe for concurrent use
+ paths := []string{".", "..", "test.go", "subdir/file.txt", "/tmp/test"}
+ done := make(chan bool)
+
+ for _, p := range paths {
+ go func(path string) {
+ _, _ = AbsolutePath(path) //nolint:errcheck // Testing concurrency safety only, result not needed
+ done <- true
+ }(p)
+ }
+
+ // Wait for all goroutines to complete
+ for range paths {
+ <-done
+ }
+}
+
+func TestAbsolutePathErrorFormatting(t *testing.T) {
+ // This test verifies error message formatting
+ // We need to trigger an actual error from filepath.Abs
+ // On Unix systems, we can't easily trigger filepath.Abs errors
+ // so we'll just verify the error wrapping works correctly
+
+ // Create a test that would fail if filepath.Abs returns an error
+ path := "test/path"
+ got, err := AbsolutePath(path)
+ if err != nil {
+ // If we somehow get an error, verify it's properly formatted
+ if !strings.Contains(err.Error(), "failed to get absolute path for") {
+ t.Errorf("Error message format incorrect: %v", err)
+ }
+ if !strings.Contains(err.Error(), path) {
+ t.Errorf("Error message should contain original path: %v", err)
+ }
+ } else if !filepath.IsAbs(got) {
+ // Normal case - just verify we got a valid absolute path
+ t.Errorf("Expected absolute path, got: %v", got)
+ }
+}
+
+// BenchmarkAbsolutePath benchmarks the AbsolutePath function.
+func BenchmarkAbsolutePath(b *testing.B) {
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, _ = AbsolutePath("test/path/file.go") //nolint:errcheck // Benchmark test, result not needed
+ }
+}
+
+// BenchmarkAbsolutePathAbs benchmarks with already absolute path.
+func BenchmarkAbsolutePathAbs(b *testing.B) {
+ absPath := "/home/user/test/file.go"
+ if runtime.GOOS == windowsOS {
+ absPath = "C:\\Users\\test\\file.go"
+ }
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, _ = AbsolutePath(absPath) //nolint:errcheck // Benchmark test, result not needed
+ }
+}
+
+// BenchmarkAbsolutePathCurrent benchmarks with current directory.
+func BenchmarkAbsolutePathCurrent(b *testing.B) {
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, _ = AbsolutePath(".") //nolint:errcheck // Benchmark test, result not needed
+ }
+}
+
+func TestValidateSourcePath(t *testing.T) {
+ // Create test directories for validation
+ tmpDir := t.TempDir()
+ validDir := filepath.Join(tmpDir, "validdir")
+ validFile := filepath.Join(tmpDir, "validfile.txt")
+
+ // Create test directory and file
+ if err := os.Mkdir(validDir, 0o750); err != nil {
+ t.Fatalf(TestMsgFailedToCreateTestDir, err)
+ }
+ if err := os.WriteFile(validFile, []byte("test"), 0o600); err != nil {
+ t.Fatalf("Failed to create test file: %v", err)
+ }
+
+ tests := []validatePathTestCase{
+ {
+ name: TestMsgEmptyPath,
+ path: "",
+ wantErr: true,
+ errType: ErrorTypeValidation,
+ errCode: CodeValidationRequired,
+ errContains: "source path is required",
+ },
+ {
+ name: "path traversal with double dots",
+ path: TestPathEtcPasswdTraversal,
+ wantErr: true,
+ errType: ErrorTypeValidation,
+ errCode: CodeValidationPath,
+ errContains: TestMsgPathTraversalAttempt,
+ },
+ {
+ name: "path traversal in middle",
+ path: "valid/../../../secrets",
+ wantErr: true,
+ errType: ErrorTypeValidation,
+ errCode: CodeValidationPath,
+ errContains: TestMsgPathTraversalAttempt,
+ },
+ {
+ name: "nonexistent directory",
+ path: "/nonexistent/directory",
+ wantErr: true,
+ errType: ErrorTypeFileSystem,
+ errCode: CodeFSNotFound,
+ errContains: "source directory does not exist",
+ },
+ {
+ name: "file instead of directory",
+ path: validFile,
+ wantErr: true,
+ errType: ErrorTypeValidation,
+ errCode: CodeValidationPath,
+ errContains: "source path must be a directory",
+ },
+ {
+ name: "valid directory (absolute)",
+ path: validDir,
+ wantErr: false,
+ },
+ {
+ name: "valid directory (relative)",
+ path: ".",
+ wantErr: false,
+ },
+ {
+ name: "valid directory (current)",
+ path: tmpDir,
+ wantErr: false,
+ },
+ }
+
+ // Save and restore current directory for relative path tests
+ originalWd, err := os.Getwd()
+ if err != nil {
+ t.Fatalf("Failed to get working directory: %v", err)
+ }
+ defer func() {
+ // Need to use os.Chdir here since t.Chdir only works in the current function context
+ if err := os.Chdir(originalWd); err != nil { // nolint:usetesting // needed in defer function
+ t.Logf("Failed to restore working directory: %v", err)
+ }
+ }()
+ t.Chdir(tmpDir)
+
+ testPathValidation(t, "ValidateSourcePath", ValidateSourcePath, tests)
+}
+
+func TestValidateDestinationPath(t *testing.T) {
+ tmpDir := t.TempDir()
+ existingDir := filepath.Join(tmpDir, "existing")
+ existingFile := filepath.Join(tmpDir, "existing.txt")
+ validDest := filepath.Join(tmpDir, TestFileOutputTXT)
+
+ // Create test directory and file
+ if err := os.Mkdir(existingDir, 0o750); err != nil {
+ t.Fatalf(TestMsgFailedToCreateTestDir, err)
+ }
+ if err := os.WriteFile(existingFile, []byte("test"), 0o600); err != nil {
+ t.Fatalf("Failed to create test file: %v", err)
+ }
+
+ tests := []validatePathTestCase{
+ {
+ name: TestMsgEmptyPath,
+ path: "",
+ wantErr: true,
+ errType: ErrorTypeValidation,
+ errCode: CodeValidationRequired,
+ errContains: "destination path is required",
+ },
+ {
+ name: "path traversal attack",
+ path: "../../../tmp/malicious.txt",
+ wantErr: true,
+ errType: ErrorTypeValidation,
+ errCode: CodeValidationPath,
+ errContains: TestMsgPathTraversalAttempt,
+ },
+ {
+ name: "destination is existing directory",
+ path: existingDir,
+ wantErr: true,
+ errType: ErrorTypeValidation,
+ errCode: CodeValidationPath,
+ errContains: "destination cannot be a directory",
+ },
+ {
+ name: "parent directory doesn't exist",
+ path: "/nonexistent/dir/TestFileOutputTXT",
+ wantErr: true,
+ errType: ErrorTypeFileSystem,
+ errCode: CodeFSNotFound,
+ errContains: "destination parent directory does not exist",
+ },
+ {
+ name: "valid destination path",
+ path: validDest,
+ wantErr: false,
+ },
+ {
+ name: "overwrite existing file (should be valid)",
+ path: existingFile,
+ wantErr: false,
+ },
+ }
+
+ testPathValidation(t, "ValidateDestinationPath", ValidateDestinationPath, tests)
+}
+
+func TestValidateConfigPath(t *testing.T) {
+ tests := []validatePathTestCase{
+ {
+ name: "empty path (allowed for config)",
+ path: "",
+ wantErr: false,
+ },
+ {
+ name: "path traversal attack",
+ path: TestPathEtcPasswdTraversal,
+ wantErr: true,
+ errType: ErrorTypeValidation,
+ errCode: CodeValidationPath,
+ errContains: TestMsgPathTraversalAttempt,
+ },
+ {
+ name: "complex path traversal",
+ path: "config/../../../secrets/" + TestFileConfigYAML,
+ wantErr: true,
+ errType: ErrorTypeValidation,
+ errCode: CodeValidationPath,
+ errContains: TestMsgPathTraversalAttempt,
+ },
+ {
+ name: "valid config path",
+ path: TestFileConfigYAML,
+ wantErr: false,
+ },
+ {
+ name: "valid absolute config path",
+ path: "/etc/myapp/" + TestFileConfigYAML,
+ wantErr: false,
+ },
+ {
+ name: "config in subdirectory",
+ path: "configs/production.yaml",
+ wantErr: false,
+ },
+ }
+
+ testPathValidation(t, "ValidateConfigPath", ValidateConfigPath, tests)
+}
+
+func TestBaseName(t *testing.T) {
+ tests := []struct {
+ name string
+ path string
+ expected string
+ }{
+ {
+ name: "simple filename",
+ path: "/path/to/file.txt",
+ expected: "file.txt",
+ },
+ {
+ name: "directory path",
+ path: "/path/to/directory",
+ expected: "directory",
+ },
+ {
+ name: "root path",
+ path: "/",
+ expected: "/",
+ },
+ {
+ name: "current directory",
+ path: ".",
+ expected: "output", // Special case: . returns "output"
+ },
+ {
+ name: TestMsgEmptyPath,
+ path: "",
+ expected: "output", // Special case: empty returns "output"
+ },
+ {
+ name: "path with trailing separator",
+ path: "/path/to/dir/",
+ expected: "dir",
+ },
+ {
+ name: "relative path",
+ path: "subdir/file.go",
+ expected: "file.go",
+ },
+ {
+ name: "single filename",
+ path: "README.md",
+ expected: "README.md",
+ },
+ {
+ name: "path with spaces",
+ path: "/path/to/my file.txt",
+ expected: "my file.txt",
+ },
+ {
+ name: "path with special characters",
+ path: "/path/to/file-name_123.ext",
+ expected: "file-name_123.ext",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(
+ tt.name, func(t *testing.T) {
+ result := BaseName(tt.path)
+ if result != tt.expected {
+ t.Errorf("BaseName(%q) = %q, want %q", tt.path, result, tt.expected)
+ }
+ },
+ )
+ }
+}
+
+// Security-focused integration tests.
+func TestPathValidationIntegration(t *testing.T) {
+ tmpDir := t.TempDir()
+ validSourceDir := filepath.Join(tmpDir, "source")
+ validDestFile := filepath.Join(tmpDir, TestFileOutputTXT)
+
+ // Create source directory
+ if err := os.Mkdir(validSourceDir, 0o750); err != nil {
+ t.Fatalf(TestMsgFailedToCreateTestDir, err)
+ }
+
+ // Test complete validation workflow
+ tests := []struct {
+ name string
+ sourcePath string
+ destPath string
+ configPath string
+ expectSourceErr bool
+ expectDestErr bool
+ expectConfigErr bool
+ }{
+ {
+ name: "valid paths",
+ sourcePath: validSourceDir,
+ destPath: validDestFile,
+ configPath: TestFileConfigYAML,
+ expectSourceErr: false,
+ expectDestErr: false,
+ expectConfigErr: false,
+ },
+ {
+ name: "source path traversal attack",
+ sourcePath: "../../../etc",
+ destPath: validDestFile,
+ configPath: TestFileConfigYAML,
+ expectSourceErr: true,
+ expectDestErr: false,
+ expectConfigErr: false,
+ },
+ {
+ name: "destination path traversal attack",
+ sourcePath: validSourceDir,
+ destPath: "../../../tmp/malicious.txt",
+ configPath: TestFileConfigYAML,
+ expectSourceErr: false,
+ expectDestErr: true,
+ expectConfigErr: false,
+ },
+ {
+ name: "config path traversal attack",
+ sourcePath: validSourceDir,
+ destPath: validDestFile,
+ configPath: TestPathEtcPasswdTraversal,
+ expectSourceErr: false,
+ expectDestErr: false,
+ expectConfigErr: true,
+ },
+ {
+ name: "multiple path traversal attacks",
+ sourcePath: "../../../var",
+ destPath: "../../../tmp/bad.txt",
+ configPath: "../../../etc/config",
+ expectSourceErr: true,
+ expectDestErr: true,
+ expectConfigErr: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(
+ tt.name, func(t *testing.T) {
+ // Test source validation
+ sourceErr := ValidateSourcePath(tt.sourcePath)
+ if (sourceErr != nil) != tt.expectSourceErr {
+ t.Errorf("Source validation: expected error %v, got %v", tt.expectSourceErr, sourceErr)
+ }
+
+ // Test destination validation
+ destErr := ValidateDestinationPath(tt.destPath)
+ if (destErr != nil) != tt.expectDestErr {
+ t.Errorf("Destination validation: expected error %v, got %v", tt.expectDestErr, destErr)
+ }
+
+ // Test config validation
+ configErr := ValidateConfigPath(tt.configPath)
+ if (configErr != nil) != tt.expectConfigErr {
+ t.Errorf("Config validation: expected error %v, got %v", tt.expectConfigErr, configErr)
+ }
+ },
+ )
+ }
+}
+
+// Benchmark the validation functions for performance.
+func BenchmarkValidateSourcePath(b *testing.B) {
+ tmpDir := b.TempDir()
+ validDir := filepath.Join(tmpDir, "testdir")
+ if err := os.Mkdir(validDir, 0o750); err != nil {
+ b.Fatalf(TestMsgFailedToCreateTestDir, err)
+ }
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _ = ValidateSourcePath(validDir) // nolint:errcheck // benchmark test
+ }
+}
+
+func BenchmarkValidateDestinationPath(b *testing.B) {
+ tmpDir := b.TempDir()
+ validDest := filepath.Join(tmpDir, TestFileOutputTXT)
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _ = ValidateDestinationPath(validDest) // nolint:errcheck // benchmark test
+ }
+}
+
+func BenchmarkBaseName(b *testing.B) {
+ path := "/very/long/path/to/some/deeply/nested/file.txt"
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _ = BaseName(path)
+ }
+}
diff --git a/gibidiutils/writers.go b/shared/writers.go
similarity index 62%
rename from gibidiutils/writers.go
rename to shared/writers.go
index f0e5545..372d50b 100644
--- a/gibidiutils/writers.go
+++ b/shared/writers.go
@@ -1,10 +1,11 @@
-// Package gibidiutils provides common utility functions for gibidify.
-package gibidiutils
+// Package shared provides common utility functions.
+package shared
import (
+ "context"
"encoding/json"
+ "fmt"
"io"
- "math"
"strings"
)
@@ -29,15 +30,15 @@ func WriteWithErrorWrap(writer io.Writer, content, errorMsg, filePath string) er
if filePath != "" {
wrappedErr = wrappedErr.WithFilePath(filePath)
}
+
return wrappedErr
}
+
return nil
}
// StreamContent provides a common streaming implementation with chunk processing.
// This eliminates the similar streaming patterns across JSON and Markdown writers.
-//
-//revive:disable-next-line:cognitive-complexity
func StreamContent(
reader io.Reader,
writer io.Writer,
@@ -49,42 +50,70 @@ func StreamContent(
for {
n, err := reader.Read(buf)
if n > 0 {
- processed := buf[:n]
- if processChunk != nil {
- processed = processChunk(processed)
- }
- if _, writeErr := writer.Write(processed); writeErr != nil {
- wrappedErr := WrapError(writeErr, ErrorTypeIO, CodeIOWrite, "failed to write content chunk")
- if filePath != "" {
- wrappedErr = wrappedErr.WithFilePath(filePath)
- }
- return wrappedErr
+ if err := writeProcessedChunk(writer, buf[:n], filePath, processChunk); err != nil {
+ return err
}
}
if err == io.EOF {
break
}
if err != nil {
- wrappedErr := WrapError(err, ErrorTypeIO, CodeIOFileRead, "failed to read content chunk")
- if filePath != "" {
- wrappedErr = wrappedErr.WithFilePath(filePath)
- }
- return wrappedErr
+ return wrapReadError(err, filePath)
}
}
+
return nil
}
+// writeProcessedChunk processes and writes a chunk of data.
+func writeProcessedChunk(writer io.Writer, chunk []byte, filePath string, processChunk func([]byte) []byte) error {
+ processed := chunk
+ if processChunk != nil {
+ processed = processChunk(processed)
+ }
+ if _, writeErr := writer.Write(processed); writeErr != nil {
+ return wrapWriteError(writeErr, filePath)
+ }
+
+ return nil
+}
+
+// wrapWriteError wraps a write error with context.
+func wrapWriteError(err error, filePath string) error {
+ wrappedErr := WrapError(err, ErrorTypeIO, CodeIOWrite, "failed to write content chunk")
+ if filePath != "" {
+ //nolint:errcheck // WithFilePath error doesn't affect wrapped error integrity
+ wrappedErr = wrappedErr.WithFilePath(filePath)
+ }
+
+ return wrappedErr
+}
+
+// wrapReadError wraps a read error with context.
+func wrapReadError(err error, filePath string) error {
+ wrappedErr := WrapError(err, ErrorTypeIO, CodeIORead, "failed to read content chunk")
+ if filePath != "" {
+ wrappedErr = wrappedErr.WithFilePath(filePath)
+ }
+
+ return wrappedErr
+}
+
// EscapeForJSON escapes content for JSON output using the standard library.
// This replaces the custom escapeJSONString function with a more robust implementation.
func EscapeForJSON(content string) string {
// Use the standard library's JSON marshaling for proper escaping
- jsonBytes, _ := json.Marshal(content)
+ jsonBytes, err := json.Marshal(content)
+ if err != nil {
+ // If marshaling fails (which is very unlikely for a string), return the original
+ return content
+ }
// Remove the surrounding quotes that json.Marshal adds
jsonStr := string(jsonBytes)
if len(jsonStr) >= 2 && jsonStr[0] == '"' && jsonStr[len(jsonStr)-1] == '"' {
return jsonStr[1 : len(jsonStr)-1]
}
+
return jsonStr
}
@@ -97,30 +126,37 @@ func EscapeForYAML(content string) string {
strings.HasPrefix(content, "?") ||
strings.HasPrefix(content, ":") ||
content == "" ||
- content == "true" || content == "false" ||
- content == "null" || content == "~"
+ content == LiteralTrue || content == LiteralFalse ||
+ content == LiteralNull || content == "~"
if needsQuotes {
// Use double quotes and escape internal quotes
escaped := strings.ReplaceAll(content, "\\", "\\\\")
escaped = strings.ReplaceAll(escaped, "\"", "\\\"")
+
return "\"" + escaped + "\""
}
+
return content
}
-// SafeUint64ToInt64WithDefault safely converts uint64 to int64, returning a default value if overflow would occur.
-// When defaultValue is 0 (the safe default), clamps to MaxInt64 on overflow to keep guardrails active.
-// This prevents overflow from making monitors think memory usage is zero when it's actually maxed out.
-func SafeUint64ToInt64WithDefault(value uint64, defaultValue int64) int64 {
- if value > math.MaxInt64 {
- // When caller uses 0 as "safe" default, clamp to max so overflow still trips guardrails
- if defaultValue == 0 {
- return math.MaxInt64
- }
- return defaultValue
+// CheckContextCancellation is a helper function that checks if context is canceled and returns appropriate error.
+func CheckContextCancellation(ctx context.Context, operation string) error {
+ select {
+ case <-ctx.Done():
+ return fmt.Errorf("%s canceled: %w", operation, ctx.Err())
+ default:
+ return nil
}
- return int64(value) //#nosec G115 -- Safe: value <= MaxInt64 checked above
+}
+
+// WithContextCheck wraps an operation with context cancellation checking.
+func WithContextCheck(ctx context.Context, operation string, fn func() error) error {
+ if err := CheckContextCancellation(ctx, operation); err != nil {
+ return err
+ }
+
+ return fn()
}
// StreamLines provides line-based streaming for YAML content.
@@ -129,10 +165,11 @@ func StreamLines(reader io.Reader, writer io.Writer, filePath string, lineProces
// Read all content first (for small files this is fine)
content, err := io.ReadAll(reader)
if err != nil {
- wrappedErr := WrapError(err, ErrorTypeIO, CodeIOFileRead, "failed to read content for line processing")
+ wrappedErr := WrapError(err, ErrorTypeIO, CodeIORead, "failed to read content for line processing")
if filePath != "" {
wrappedErr = wrappedErr.WithFilePath(filePath)
}
+
return wrappedErr
}
@@ -155,8 +192,10 @@ func StreamLines(reader io.Reader, writer io.Writer, filePath string, lineProces
if filePath != "" {
wrappedErr = wrappedErr.WithFilePath(filePath)
}
+
return wrappedErr
}
}
+
return nil
}
diff --git a/shared/writers_test.go b/shared/writers_test.go
new file mode 100644
index 0000000..30a21ab
--- /dev/null
+++ b/shared/writers_test.go
@@ -0,0 +1,1038 @@
+package shared
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+ "testing"
+ "time"
+)
+
+// Mock test objects - local to avoid import cycles.
+
+// mockCloser implements io.ReadCloser with configurable close error.
+type mockCloser struct {
+ closeError error
+ closed bool
+}
+
+func (m *mockCloser) Read(_ []byte) (n int, err error) {
+ return 0, io.EOF
+}
+
+func (m *mockCloser) Close() error {
+ m.closed = true
+
+ return m.closeError
+}
+
+// mockReader implements io.Reader that returns EOF.
+type mockReader struct{}
+
+func (m *mockReader) Read(_ []byte) (n int, err error) {
+ return 0, io.EOF
+}
+
+// mockWriter implements io.Writer with configurable write error.
+type mockWriter struct {
+ writeError error
+ written []byte
+}
+
+func (m *mockWriter) Write(p []byte) (n int, err error) {
+ if m.writeError != nil {
+ return 0, m.writeError
+ }
+ m.written = append(m.written, p...)
+
+ return len(p), nil
+}
+
+func TestSafeCloseReader(t *testing.T) {
+ tests := []struct {
+ name string
+ reader io.Reader
+ path string
+ expectClosed bool
+ expectError bool
+ closeError error
+ }{
+ {
+ name: "closer reader success",
+ reader: &mockCloser{},
+ path: TestPathBase,
+ expectClosed: true,
+ expectError: false,
+ },
+ {
+ name: "closer reader with error",
+ reader: &mockCloser{closeError: errors.New("close failed")},
+ path: TestPathBase,
+ expectClosed: true,
+ expectError: true,
+ closeError: errors.New("close failed"),
+ },
+ {
+ name: "non-closer reader",
+ reader: &mockReader{},
+ path: TestPathBase,
+ expectClosed: false,
+ expectError: false,
+ },
+ {
+ name: "closer reader with empty path",
+ reader: &mockCloser{},
+ path: "",
+ expectClosed: true,
+ expectError: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(
+ tt.name, func(t *testing.T) {
+ // Capture the reader if it's a mockCloser
+ var closerMock *mockCloser
+ if closer, ok := tt.reader.(*mockCloser); ok {
+ closerMock = closer
+ }
+
+ // Call SafeCloseReader (should not panic)
+ SafeCloseReader(tt.reader, tt.path)
+
+ // Verify expectations
+ if closerMock != nil {
+ if closerMock.closed != tt.expectClosed {
+ t.Errorf("Expected closed=%v, got %v", tt.expectClosed, closerMock.closed)
+ }
+ }
+ // Note: Error logging is tested indirectly through no panic
+ },
+ )
+ }
+}
+
+// validateWriteError validates error expectations for write operations.
+func validateWriteError(t *testing.T, err error, errContains, filePath string) {
+ t.Helper()
+
+ if err == nil {
+ t.Error("Expected error, got nil")
+
+ return
+ }
+
+ if errContains != "" && !strings.Contains(err.Error(), errContains) {
+ t.Errorf("Error should contain %q, got: %v", errContains, err.Error())
+ }
+
+ var structErr *StructuredError
+ if !errors.As(err, &structErr) {
+ t.Error("Expected StructuredError")
+
+ return
+ }
+
+ if structErr.Type != ErrorTypeIO {
+ t.Errorf(TestFmtExpectedErrorTypeIO, structErr.Type)
+ }
+ if structErr.Code != CodeIOWrite {
+ t.Errorf("Expected CodeIOWrite, got %v", structErr.Code)
+ }
+ if filePath != "" && structErr.FilePath != filePath {
+ t.Errorf(TestFmtExpectedFilePath, filePath, structErr.FilePath)
+ }
+}
+
+func TestWriteWithErrorWrap(t *testing.T) {
+ tests := []struct {
+ name string
+ content string
+ errorMsg string
+ filePath string
+ writeError error
+ wantErr bool
+ errContains string
+ }{
+ {
+ name: "successful write",
+ content: TestContentTest,
+ errorMsg: "write failed",
+ filePath: TestPathTestFileTXT,
+ writeError: nil,
+ wantErr: false,
+ },
+ {
+ name: "write error with file path",
+ content: TestContentTest,
+ errorMsg: "custom error message",
+ filePath: TestPathTestFileTXT,
+ writeError: errors.New(TestErrDiskFull),
+ wantErr: true,
+ errContains: "custom error message",
+ },
+ {
+ name: "write error without file path",
+ content: TestContentTest,
+ errorMsg: "write operation failed",
+ filePath: "",
+ writeError: errors.New("network error"),
+ wantErr: true,
+ errContains: "write operation failed",
+ },
+ {
+ name: TestContentEmpty,
+ content: "",
+ errorMsg: "empty write",
+ filePath: TestPathTestEmptyTXT,
+ writeError: nil,
+ wantErr: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(
+ tt.name, func(t *testing.T) {
+ writer := &mockWriter{writeError: tt.writeError}
+ err := WriteWithErrorWrap(writer, tt.content, tt.errorMsg, tt.filePath)
+
+ if tt.wantErr {
+ validateWriteError(t, err, tt.errContains, tt.filePath)
+
+ return
+ }
+
+ if err != nil {
+ t.Errorf("WriteWithErrorWrap() unexpected error: %v", err)
+ }
+ if string(writer.written) != tt.content {
+ t.Errorf(TestFmtExpectedContent, tt.content, string(writer.written))
+ }
+ },
+ )
+ }
+}
+
+// validateStreamError validates error expectations for stream operations.
+func validateStreamError(t *testing.T, err error, errContains, filePath string) {
+ t.Helper()
+
+ if err == nil {
+ t.Error("Expected error, got nil")
+
+ return
+ }
+
+ if errContains != "" && !strings.Contains(err.Error(), errContains) {
+ t.Errorf("Error should contain %q, got: %v", errContains, err.Error())
+ }
+
+ var structErr *StructuredError
+ if !errors.As(err, &structErr) {
+ return
+ }
+
+ if structErr.Type != ErrorTypeIO {
+ t.Errorf(TestFmtExpectedErrorTypeIO, structErr.Type)
+ }
+ if filePath != "" && structErr.FilePath != filePath {
+ t.Errorf(TestFmtExpectedFilePath, filePath, structErr.FilePath)
+ }
+}
+
+func TestStreamContent(t *testing.T) {
+ tests := []struct {
+ name string
+ content string
+ chunkSize int
+ filePath string
+ writeError error
+ processChunk func([]byte) []byte
+ wantErr bool
+ expectedContent string
+ errContains string
+ }{
+ {
+ name: "successful streaming",
+ content: "hello world test content",
+ chunkSize: 8,
+ filePath: TestPathTestFileTXT,
+ expectedContent: "hello world test content",
+ },
+ {
+ name: "streaming with chunk processor",
+ content: "abc def ghi",
+ chunkSize: 4,
+ filePath: TestPathTestFileTXT,
+ processChunk: bytes.ToUpper,
+ expectedContent: "ABC DEF GHI",
+ },
+ {
+ name: "write error during streaming",
+ content: TestContentTest,
+ chunkSize: 4,
+ filePath: TestPathTestFileTXT,
+ writeError: errors.New(TestErrDiskFull),
+ wantErr: true,
+ errContains: "failed to write content chunk",
+ },
+ {
+ name: TestContentEmpty,
+ content: "",
+ chunkSize: 1024,
+ filePath: TestPathTestEmptyTXT,
+ expectedContent: "",
+ },
+ {
+ name: "large chunk size",
+ content: "small content",
+ chunkSize: 1024,
+ filePath: TestPathTestFileTXT,
+ expectedContent: "small content",
+ },
+ {
+ name: "nil processor function",
+ content: "unchanged content",
+ chunkSize: 8,
+ filePath: TestPathTestFileTXT,
+ processChunk: nil,
+ expectedContent: "unchanged content",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(
+ tt.name, func(t *testing.T) {
+ reader := strings.NewReader(tt.content)
+ writer := &mockWriter{writeError: tt.writeError}
+ err := StreamContent(reader, writer, tt.chunkSize, tt.filePath, tt.processChunk)
+
+ if tt.wantErr {
+ validateStreamError(t, err, tt.errContains, tt.filePath)
+
+ return
+ }
+
+ if err != nil {
+ t.Errorf("StreamContent() unexpected error: %v", err)
+ }
+ if string(writer.written) != tt.expectedContent {
+ t.Errorf(TestFmtExpectedContent, tt.expectedContent, string(writer.written))
+ }
+ },
+ )
+ }
+}
+
+func TestEscapeForJSON(t *testing.T) {
+ tests := []struct {
+ name string
+ input string
+ expected string
+ }{
+ {
+ name: "simple string",
+ input: TestContentHelloWorld,
+ expected: TestContentHelloWorld,
+ },
+ {
+ name: "string with quotes",
+ input: `hello "quoted" world`,
+ expected: `hello \"quoted\" world`,
+ },
+ {
+ name: "string with newlines",
+ input: "line 1\nline 2\nline 3",
+ expected: "line 1\\nline 2\\nline 3",
+ },
+ {
+ name: "string with tabs",
+ input: "col1\tcol2\tcol3",
+ expected: "col1\\tcol2\\tcol3",
+ },
+ {
+ name: "string with backslashes",
+ input: `path\to\file`,
+ expected: `path\\to\\file`,
+ },
+ {
+ name: "string with unicode",
+ input: "Hello 世界 🌍",
+ expected: "Hello 世界 🌍",
+ },
+ {
+ name: "empty string",
+ input: "",
+ expected: "",
+ },
+ {
+ name: "control characters",
+ input: "\x00\x01\x1f",
+ expected: "\\u0000\\u0001\\u001f",
+ },
+ {
+ name: "mixed special characters",
+ input: "Line 1\n\t\"Quoted\"\r\nLine 2\\",
+ expected: "Line 1\\n\\t\\\"Quoted\\\"\\r\\nLine 2\\\\",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(
+ tt.name, func(t *testing.T) {
+ result := EscapeForJSON(tt.input)
+ if result != tt.expected {
+ t.Errorf("EscapeForJSON() = %q, want %q", result, tt.expected)
+ }
+ },
+ )
+ }
+}
+
+func TestEscapeForYAML(t *testing.T) {
+ tests := []struct {
+ name string
+ input string
+ expected string
+ }{
+ {
+ name: "simple string no quotes needed",
+ input: "hello",
+ expected: "hello",
+ },
+ {
+ name: "string with spaces needs quotes",
+ input: TestContentHelloWorld,
+ expected: `"hello world"`,
+ },
+ {
+ name: "string with colon needs quotes",
+ input: "key:value",
+ expected: `"key:value"`,
+ },
+ {
+ name: "string starting with dash",
+ input: "-value",
+ expected: `"-value"`,
+ },
+ {
+ name: "string starting with question mark",
+ input: "?value",
+ expected: `"?value"`,
+ },
+ {
+ name: "string starting with colon",
+ input: ":value",
+ expected: `":value"`,
+ },
+ {
+ name: "boolean true",
+ input: "true",
+ expected: `"true"`,
+ },
+ {
+ name: "boolean false",
+ input: "false",
+ expected: `"false"`,
+ },
+ {
+ name: "null value",
+ input: "null",
+ expected: `"null"`,
+ },
+ {
+ name: "tilde null",
+ input: "~",
+ expected: `"~"`,
+ },
+ {
+ name: "empty string",
+ input: "",
+ expected: `""`,
+ },
+ {
+ name: "string with newlines",
+ input: "line1\nline2",
+ expected: "\"line1\nline2\"",
+ },
+ {
+ name: "string with tabs",
+ input: "col1\tcol2",
+ expected: "\"col1\tcol2\"",
+ },
+ {
+ name: "string with brackets",
+ input: "[list]",
+ expected: `"[list]"`,
+ },
+ {
+ name: "string with braces",
+ input: "{object}",
+ expected: `"{object}"`,
+ },
+ {
+ name: "string with pipe",
+ input: "value|other",
+ expected: `"value|other"`,
+ },
+ {
+ name: "string with greater than",
+ input: "value>other",
+ expected: `"value>other"`,
+ },
+ {
+ name: "string with quotes and backslashes",
+ input: `path\to"file"`,
+ expected: `"path\\to\"file\""`,
+ },
+ {
+ name: "normal identifier",
+ input: "normalValue123",
+ expected: "normalValue123",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(
+ tt.name, func(t *testing.T) {
+ result := EscapeForYAML(tt.input)
+ if result != tt.expected {
+ t.Errorf("EscapeForYAML() = %q, want %q", result, tt.expected)
+ }
+ },
+ )
+ }
+}
+
+func TestStreamLines(t *testing.T) {
+ tests := []struct {
+ name string
+ content string
+ filePath string
+ readError bool
+ writeError error
+ lineProcessor func(string) string
+ wantErr bool
+ expectedContent string
+ errContains string
+ }{
+ {
+ name: "successful line streaming",
+ content: "line1\nline2\nline3",
+ filePath: TestPathTestFileTXT,
+ expectedContent: "line1\nline2\nline3\n",
+ },
+ {
+ name: "line streaming with processor",
+ content: "hello\nworld",
+ filePath: TestPathTestFileTXT,
+ lineProcessor: strings.ToUpper,
+ expectedContent: "HELLO\nWORLD\n",
+ },
+ {
+ name: TestContentEmpty,
+ content: "",
+ filePath: TestPathTestEmptyTXT,
+ expectedContent: "",
+ },
+ {
+ name: "single line no newline",
+ content: "single line",
+ filePath: TestPathTestFileTXT,
+ expectedContent: "single line\n",
+ },
+ {
+ name: "content ending with newline",
+ content: "line1\nline2\n",
+ filePath: TestPathTestFileTXT,
+ expectedContent: "line1\nline2\n",
+ },
+ {
+ name: "write error during processing",
+ content: "line1\nline2",
+ filePath: TestPathTestFileTXT,
+ writeError: errors.New(TestErrDiskFull),
+ wantErr: true,
+ errContains: "failed to write processed line",
+ },
+ {
+ name: "nil line processor",
+ content: "unchanged\ncontent",
+ filePath: TestPathTestFileTXT,
+ lineProcessor: nil,
+ expectedContent: "unchanged\ncontent\n",
+ },
+ {
+ name: "multiple empty lines",
+ content: "\n\n\n",
+ filePath: TestPathTestFileTXT,
+ expectedContent: "\n\n\n",
+ },
+ {
+ name: "line processor with special characters",
+ content: "hello\t world\ntest\rline",
+ filePath: TestPathTestFileTXT,
+ lineProcessor: func(line string) string {
+ return strings.ReplaceAll(line, "\t", " ")
+ },
+ expectedContent: "hello world\ntest\rline\n",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(
+ tt.name, func(t *testing.T) {
+ reader := strings.NewReader(tt.content)
+ writer := &mockWriter{writeError: tt.writeError}
+ err := StreamLines(reader, writer, tt.filePath, tt.lineProcessor)
+
+ if tt.wantErr {
+ validateStreamError(t, err, tt.errContains, tt.filePath)
+
+ return
+ }
+
+ if err != nil {
+ t.Errorf("StreamLines() unexpected error: %v", err)
+ }
+ if string(writer.written) != tt.expectedContent {
+ t.Errorf(TestFmtExpectedContent, tt.expectedContent, string(writer.written))
+ }
+ },
+ )
+ }
+}
+
+// validateWriteProcessedChunkResult validates the result of writeProcessedChunk operation.
+func validateWriteProcessedChunkResult(t *testing.T, writer *mockWriter, err error, wantErr bool, expected string) {
+ t.Helper()
+
+ if wantErr {
+ if err == nil {
+ t.Error("writeProcessedChunk() expected error, got nil")
+ }
+ return
+ }
+
+ if err != nil {
+ t.Errorf("writeProcessedChunk() unexpected error: %v", err)
+ return
+ }
+
+ if string(writer.written) != expected {
+ t.Errorf("Expected %q, got %q", expected, string(writer.written))
+ }
+}
+
+// Test helper functions indirectly through their usage.
+func TestWriteProcessedChunk(t *testing.T) {
+ tests := []struct {
+ name string
+ chunk []byte
+ filePath string
+ processChunk func([]byte) []byte
+ writeError error
+ wantErr bool
+ expected string
+ }{
+ {
+ name: "successful chunk processing",
+ chunk: []byte("hello"),
+ filePath: TestPathTestFileTXT,
+ processChunk: bytes.ToUpper,
+ expected: "HELLO",
+ },
+ {
+ name: "no processor",
+ chunk: []byte("unchanged"),
+ filePath: TestPathTestFileTXT,
+ processChunk: nil,
+ expected: "unchanged",
+ },
+ {
+ name: "write error",
+ chunk: []byte("test"),
+ filePath: TestPathTestFileTXT,
+ writeError: errors.New("write failed"),
+ wantErr: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(
+ tt.name, func(t *testing.T) {
+ writer := &mockWriter{writeError: tt.writeError}
+ err := writeProcessedChunk(writer, tt.chunk, tt.filePath, tt.processChunk)
+ validateWriteProcessedChunkResult(t, writer, err, tt.wantErr, tt.expected)
+ },
+ )
+ }
+}
+
+// testWrapErrorFunc is a helper function to test error wrapping functions without duplication.
+func testWrapErrorFunc(
+ t *testing.T,
+ wrapFunc func(error, string) error,
+ expectedCode string,
+ expectedMessage string,
+ testName string,
+) {
+ t.Helper()
+
+ originalErr := errors.New("original " + testName + " error")
+ filePath := TestPathTestFileTXT
+
+ wrappedErr := wrapFunc(originalErr, filePath)
+
+ // Should return a StructuredError
+ var structErr *StructuredError
+ if !errors.As(wrappedErr, &structErr) {
+ t.Fatal("Expected StructuredError")
+ }
+
+ // Verify error properties
+ if structErr.Type != ErrorTypeIO {
+ t.Errorf(TestFmtExpectedErrorTypeIO, structErr.Type)
+ }
+ if structErr.Code != expectedCode {
+ t.Errorf("Expected %v, got %v", expectedCode, structErr.Code)
+ }
+ if structErr.FilePath != filePath {
+ t.Errorf(TestFmtExpectedFilePath, filePath, structErr.FilePath)
+ }
+ if structErr.Message != expectedMessage {
+ t.Errorf("Expected message %q, got %q", expectedMessage, structErr.Message)
+ }
+
+ // Test with empty file path
+ wrappedErrEmpty := wrapFunc(originalErr, "")
+ var structErrEmpty *StructuredError
+ if errors.As(wrappedErrEmpty, &structErrEmpty) && structErrEmpty.FilePath != "" {
+ t.Errorf("Expected empty FilePath, got %q", structErrEmpty.FilePath)
+ }
+}
+
+func TestWrapWriteError(t *testing.T) {
+ testWrapErrorFunc(t, wrapWriteError, CodeIOWrite, "failed to write content chunk", "write")
+}
+
+func TestWrapReadError(t *testing.T) {
+ testWrapErrorFunc(t, wrapReadError, CodeIORead, "failed to read content chunk", "read")
+}
+
+// Benchmark tests for performance-critical functions.
+func BenchmarkEscapeForJSON(b *testing.B) {
+ testString := `This is a "test string" with various characters: \n\t\r and some unicode: 世界`
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _ = EscapeForJSON(testString)
+ }
+}
+
+func BenchmarkEscapeForYAML(b *testing.B) {
+ testString := `This is a test string with: spaces, "quotes", and special chars -?:`
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _ = EscapeForYAML(testString)
+ }
+}
+
+func BenchmarkStreamContent(b *testing.B) {
+ content := strings.Repeat("This is test content that will be streamed in chunks.\n", 1000)
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ reader := strings.NewReader(content)
+ writer := &bytes.Buffer{}
+ _ = StreamContent(reader, writer, 1024, TestPathTestFileTXT, nil) // nolint:errcheck // benchmark test
+ }
+}
+
+// Integration test.
+func TestWriterIntegration(t *testing.T) {
+ // Test a complete workflow using multiple writer utilities
+ content := `Line 1 with "quotes"
+Line 2 with special chars: {}[]
+Line 3 with unicode: 世界`
+
+ // Test JSON escaping in content
+ var jsonBuf bytes.Buffer
+ processedContent := EscapeForJSON(content)
+ err := WriteWithErrorWrap(
+ &jsonBuf,
+ fmt.Sprintf(`{"content":"%s"}`, processedContent),
+ "JSON write failed",
+ "/test/file.json",
+ )
+ if err != nil {
+ t.Fatalf("JSON integration failed: %v", err)
+ }
+
+ // Test YAML escaping and line streaming
+ var yamlBuf bytes.Buffer
+ reader := strings.NewReader(content)
+ err = StreamLines(
+ reader, &yamlBuf, "/test/file.yaml", func(line string) string {
+ return "content: " + EscapeForYAML(line)
+ },
+ )
+ if err != nil {
+ t.Fatalf("YAML integration failed: %v", err)
+ }
+
+ // Verify both outputs contain expected content
+ jsonOutput := jsonBuf.String()
+ yamlOutput := yamlBuf.String()
+
+ if !strings.Contains(jsonOutput, `\"quotes\"`) {
+ t.Error("JSON output should contain escaped quotes")
+ }
+ if !strings.Contains(yamlOutput, `"Line 2 with special chars: {}[]"`) {
+ t.Error("YAML output should contain quoted special characters line")
+ }
+}
+
+// TestCheckContextCancellation tests the CheckContextCancellation function.
+func TestCheckContextCancellation(t *testing.T) {
+ tests := []struct {
+ name string
+ setupContext func() context.Context
+ operation string
+ expectError bool
+ errorContains string
+ }{
+ {
+ name: "active context",
+ setupContext: func() context.Context {
+ return context.Background()
+ },
+ operation: "test operation",
+ expectError: false,
+ },
+ {
+ name: "canceled context",
+ setupContext: func() context.Context {
+ ctx, cancel := context.WithCancel(context.Background())
+ cancel() // Cancel immediately
+ return ctx
+ },
+ operation: "test operation",
+ expectError: true,
+ errorContains: "test operation canceled",
+ },
+ {
+ name: "timeout context",
+ setupContext: func() context.Context {
+ ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
+ defer cancel()
+ // Wait for timeout
+ time.Sleep(1 * time.Millisecond)
+ return ctx
+ },
+ operation: "timeout operation",
+ expectError: true,
+ errorContains: "timeout operation canceled",
+ },
+ {
+ name: "context with deadline exceeded",
+ setupContext: func() context.Context {
+ ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(-1*time.Hour))
+ defer cancel()
+ return ctx
+ },
+ operation: "deadline operation",
+ expectError: true,
+ errorContains: "deadline operation canceled",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ ctx := tt.setupContext()
+ err := CheckContextCancellation(ctx, tt.operation)
+
+ if tt.expectError {
+ if err == nil {
+ t.Errorf("Expected error for %s, but got none", tt.name)
+ } else if !strings.Contains(err.Error(), tt.errorContains) {
+ t.Errorf("Error %q should contain %q", err.Error(), tt.errorContains)
+ }
+ } else {
+ if err != nil {
+ t.Errorf("Unexpected error for %s: %v", tt.name, err)
+ }
+ }
+ })
+ }
+}
+
+// TestWithContextCheck tests the WithContextCheck function.
+func TestWithContextCheck(t *testing.T) {
+ tests := []struct {
+ name string
+ setupContext func() context.Context
+ operation string
+ fn func() error
+ expectError bool
+ errorContains string
+ }{
+ {
+ name: "active context with successful operation",
+ setupContext: func() context.Context {
+ return context.Background()
+ },
+ operation: "successful operation",
+ fn: func() error {
+ return nil
+ },
+ expectError: false,
+ },
+ {
+ name: "active context with failing operation",
+ setupContext: func() context.Context {
+ return context.Background()
+ },
+ operation: "failing operation",
+ fn: func() error {
+ return errors.New("operation failed")
+ },
+ expectError: true,
+ errorContains: "operation failed",
+ },
+ {
+ name: "canceled context before operation",
+ setupContext: func() context.Context {
+ ctx, cancel := context.WithCancel(context.Background())
+ cancel() // Cancel immediately
+ return ctx
+ },
+ operation: "canceled operation",
+ fn: func() error {
+ t.Error("Function should not be called with canceled context")
+ return nil
+ },
+ expectError: true,
+ errorContains: "canceled operation canceled",
+ },
+ {
+ name: "timeout context before operation",
+ setupContext: func() context.Context {
+ ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
+ defer cancel()
+ // Wait for timeout
+ time.Sleep(1 * time.Millisecond)
+ return ctx
+ },
+ operation: "timeout operation",
+ fn: func() error {
+ t.Error("Function should not be called with timed out context")
+ return nil
+ },
+ expectError: true,
+ errorContains: "timeout operation canceled",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ ctx := tt.setupContext()
+ err := WithContextCheck(ctx, tt.operation, tt.fn)
+
+ if tt.expectError {
+ if err == nil {
+ t.Errorf("Expected error for %s, but got none", tt.name)
+ } else if !strings.Contains(err.Error(), tt.errorContains) {
+ t.Errorf("Error %q should contain %q", err.Error(), tt.errorContains)
+ }
+ } else {
+ if err != nil {
+ t.Errorf("Unexpected error for %s: %v", tt.name, err)
+ }
+ }
+ })
+ }
+}
+
+// assertNoError is a helper that fails the test if err is not nil.
+func assertNoError(t *testing.T, err error, msg string) {
+ t.Helper()
+ if err != nil {
+ t.Errorf("%s: %v", msg, err)
+ }
+}
+
+// assertError is a helper that fails the test if err is nil.
+func assertError(t *testing.T, err error, msg string) {
+ t.Helper()
+ if err == nil {
+ t.Error(msg)
+ }
+}
+
+// TestContextCancellationIntegration tests integration scenarios.
+func TestContextCancellationIntegration(t *testing.T) {
+ t.Run("multiple operations with context check", func(t *testing.T) {
+ ctx := context.Background()
+
+ // First operation should succeed
+ err := WithContextCheck(ctx, "operation 1", func() error {
+ return nil
+ })
+ assertNoError(t, err, "First operation failed")
+
+ // Second operation should also succeed
+ err = WithContextCheck(ctx, "operation 2", func() error {
+ return nil
+ })
+ assertNoError(t, err, "Second operation failed")
+ })
+
+ t.Run("chained context checks", func(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+
+ // First check should pass
+ err := CheckContextCancellation(ctx, "first check")
+ assertNoError(t, err, "First check should pass")
+
+ // Cancel context
+ cancel()
+
+ // Second check should fail
+ err = CheckContextCancellation(ctx, "second check")
+ assertError(t, err, "Second check should fail after cancellation")
+
+ // Third operation should also fail
+ err = WithContextCheck(ctx, "third operation", func() error {
+ t.Error("Function should not be called")
+ return nil
+ })
+ assertError(t, err, "Third operation should fail after cancellation")
+ })
+
+ t.Run("context cancellation propagation", func(t *testing.T) {
+ // Test that context cancellation properly propagates through nested calls
+ parentCtx, parentCancel := context.WithCancel(context.Background())
+ childCtx, childCancel := context.WithCancel(parentCtx)
+
+ defer parentCancel()
+ defer childCancel()
+
+ // Both contexts should be active initially
+ err := CheckContextCancellation(parentCtx, "parent")
+ assertNoError(t, err, "Parent context should be active")
+
+ err = CheckContextCancellation(childCtx, "child")
+ assertNoError(t, err, "Child context should be active")
+
+ // Cancel parent - child should also be canceled
+ parentCancel()
+
+ err = CheckContextCancellation(childCtx, "child after parent cancel")
+ assertError(t, err, "Child context should be canceled when parent is canceled")
+ })
+}
diff --git a/sonar-project.properties b/sonar-project.properties
new file mode 100644
index 0000000..205bcb6
--- /dev/null
+++ b/sonar-project.properties
@@ -0,0 +1,2 @@
+sonar.organization=ivuorinen
+sonar.projectKey=gibidify
diff --git a/templates/engine.go b/templates/engine.go
new file mode 100644
index 0000000..0af0a07
--- /dev/null
+++ b/templates/engine.go
@@ -0,0 +1,389 @@
+// Package templates provides templating engine functionality for output formatting.
+package templates
+
+import (
+ "bytes"
+ "fmt"
+ "path/filepath"
+ "strings"
+ "text/template"
+ "time"
+
+ "golang.org/x/text/cases"
+ "golang.org/x/text/language"
+
+ "github.com/ivuorinen/gibidify/shared"
+)
+
+// bufferBuilder wraps bytes.Buffer with error accumulation for robust error handling.
+type bufferBuilder struct {
+ buf *bytes.Buffer
+ err error
+}
+
+// newBufferBuilder creates a new buffer builder.
+func newBufferBuilder() *bufferBuilder {
+ return &bufferBuilder{buf: &bytes.Buffer{}}
+}
+
+// writeString writes a string, accumulating any errors.
+func (bb *bufferBuilder) writeString(s string) {
+ if bb.err != nil {
+ return
+ }
+ _, bb.err = bb.buf.WriteString(s)
+}
+
+// String returns the accumulated string, or empty string if there was an error.
+func (bb *bufferBuilder) String() string {
+ if bb.err != nil {
+ return ""
+ }
+ return bb.buf.String()
+}
+
+// Engine handles template processing and output generation.
+type Engine struct {
+ template OutputTemplate
+ context TemplateContext
+}
+
+// NewEngine creates a new template engine with the specified template.
+func NewEngine(templateName string, context TemplateContext) (*Engine, error) {
+ tmpl, exists := BuiltinTemplates[templateName]
+ if !exists {
+ return nil, fmt.Errorf("template '%s' not found", templateName)
+ }
+
+ // Apply custom variables to context
+ if context.Variables == nil {
+ context.Variables = make(map[string]string)
+ }
+
+ // Merge template variables with context variables
+ for k, v := range tmpl.Variables {
+ if _, exists := context.Variables[k]; !exists {
+ context.Variables[k] = v
+ }
+ }
+
+ return &Engine{
+ template: tmpl,
+ context: context,
+ }, nil
+}
+
+// NewEngineWithCustomTemplate creates a new template engine with a custom template.
+func NewEngineWithCustomTemplate(customTemplate OutputTemplate, context TemplateContext) *Engine {
+ if context.Variables == nil {
+ context.Variables = make(map[string]string)
+ }
+
+ // Merge template variables with context variables
+ for k, v := range customTemplate.Variables {
+ if _, exists := context.Variables[k]; !exists {
+ context.Variables[k] = v
+ }
+ }
+
+ return &Engine{
+ template: customTemplate,
+ context: context,
+ }
+}
+
+// RenderHeader renders the template header.
+func (e *Engine) RenderHeader() (string, error) {
+ return e.renderTemplate(e.template.Header, e.context)
+}
+
+// RenderFooter renders the template footer.
+func (e *Engine) RenderFooter() (string, error) {
+ return e.renderTemplate(e.template.Footer, e.context)
+}
+
+// RenderFileHeader renders the file header for a specific file.
+func (e *Engine) RenderFileHeader(fileCtx FileContext) (string, error) {
+ return e.renderTemplate(e.template.FileHeader, fileCtx)
+}
+
+// RenderFileFooter renders the file footer for a specific file.
+func (e *Engine) RenderFileFooter(fileCtx FileContext) (string, error) {
+ return e.renderTemplate(e.template.FileFooter, fileCtx)
+}
+
+// RenderFileContent renders file content according to template options.
+func (e *Engine) RenderFileContent(fileCtx FileContext) (string, error) {
+ content := fileCtx.Content
+
+ // Apply markdown-specific formatting if needed
+ if e.template.Format == shared.FormatMarkdown && e.template.Markdown.UseCodeBlocks {
+ // Content is wrapped in code blocks via FileHeader/FileFooter
+ return content, nil
+ }
+
+ // Apply line length limits if specified
+ if e.template.Markdown.MaxLineLength > 0 {
+ content = e.wrapLongLines(content, e.template.Markdown.MaxLineLength)
+ }
+
+ // Apply folding for long files if enabled
+ if e.template.Markdown.FoldLongFiles && len(strings.Split(content, "\n")) > 100 {
+ lines := strings.Split(content, "\n")
+ if len(lines) > 100 {
+ content = strings.Join(lines[:50], "\n") + "\n\n\n\n" +
+ strings.Join(lines[len(lines)-50:], "\n")
+ }
+ }
+
+ return content, nil
+}
+
+// RenderMetadata renders metadata based on template options.
+func (e *Engine) RenderMetadata() (string, error) {
+ if !e.hasAnyMetadata() {
+ return "", nil
+ }
+
+ buf := newBufferBuilder()
+
+ if e.template.Format == shared.FormatMarkdown {
+ buf.writeString("## Metadata\n\n")
+ }
+
+ if e.template.Metadata.IncludeTimestamp {
+ buf.writeString(fmt.Sprintf("**Generated**: %s\n", e.context.Timestamp.Format(shared.TemplateFmtTimestamp)))
+ }
+
+ if e.template.Metadata.IncludeSourcePath {
+ buf.writeString(fmt.Sprintf("**Source**: %s\n", e.context.SourcePath))
+ }
+
+ if e.template.Metadata.IncludeFileCount {
+ buf.writeString(
+ fmt.Sprintf(
+ "**Files**: %d total (%d processed, %d skipped, %d errors)\n",
+ e.context.TotalFiles, e.context.ProcessedFiles, e.context.SkippedFiles, e.context.ErrorFiles,
+ ),
+ )
+ }
+
+ if e.template.Metadata.IncludeTotalSize {
+ buf.writeString(fmt.Sprintf("**Total Size**: %d bytes\n", e.context.TotalSize))
+ }
+
+ if e.template.Metadata.IncludeProcessingTime {
+ buf.writeString(fmt.Sprintf("**Processing Time**: %s\n", e.context.ProcessingTime))
+ }
+
+ if e.template.Metadata.IncludeMetrics && e.context.FilesPerSecond > 0 {
+ buf.writeString(
+ fmt.Sprintf(
+ "**Performance**: %.1f files/sec, %.1f MB/sec\n",
+ e.context.FilesPerSecond, e.context.BytesPerSecond/float64(shared.BytesPerMB),
+ ),
+ )
+ }
+
+ if e.template.Metadata.IncludeFileTypes && len(e.context.FileTypes) > 0 {
+ buf.writeString("**File Types**:\n")
+ for fileType, count := range e.context.FileTypes {
+ buf.writeString(fmt.Sprintf("- %s: %d files\n", fileType, count))
+ }
+ }
+
+ buf.writeString("\n")
+
+ return buf.String(), nil
+}
+
+// RenderTableOfContents renders a table of contents if enabled.
+func (e *Engine) RenderTableOfContents(files []FileContext) (string, error) {
+ if !e.template.Markdown.TableOfContents {
+ return "", nil
+ }
+
+ buf := newBufferBuilder()
+ buf.writeString("## Table of Contents\n\n")
+
+ for _, file := range files {
+ // Create markdown anchor from file path
+ anchor := strings.ToLower(strings.ReplaceAll(file.RelativePath, "/", "-"))
+ anchor = strings.ReplaceAll(anchor, ".", "")
+ anchor = strings.ReplaceAll(anchor, " ", "-")
+
+ buf.writeString(fmt.Sprintf("- [%s](#%s)\n", file.RelativePath, anchor))
+ }
+
+ buf.writeString("\n")
+
+ return buf.String(), nil
+}
+
+// Template returns the current template.
+func (e *Engine) Template() OutputTemplate {
+ return e.template
+}
+
+// Context returns the current context.
+func (e *Engine) Context() TemplateContext {
+ return e.context
+}
+
+// renderTemplate renders a template string with the given context.
+func (e *Engine) renderTemplate(templateStr string, data any) (string, error) {
+ if templateStr == "" {
+ return "", nil
+ }
+
+ tmpl, err := template.New("template").Funcs(e.getTemplateFunctions()).Parse(templateStr)
+ if err != nil {
+ return "", fmt.Errorf("failed to parse template: %w", err)
+ }
+
+ var buf bytes.Buffer
+ if err := tmpl.Execute(&buf, data); err != nil {
+ return "", fmt.Errorf("failed to execute template: %w", err)
+ }
+
+ return buf.String(), nil
+}
+
+// getTemplateFunctions returns custom functions available in templates.
+func (e *Engine) getTemplateFunctions() template.FuncMap {
+ return template.FuncMap{
+ "formatSize": func(size int64) string {
+ return e.formatBytes(size)
+ },
+ "formatTime": func(t time.Time) string {
+ return t.Format(shared.TemplateFmtTimestamp)
+ },
+ "basename": filepath.Base,
+ "ext": filepath.Ext,
+ "dir": filepath.Dir,
+ "upper": strings.ToUpper,
+ "lower": strings.ToLower,
+ "title": func(s string) string {
+ return cases.Title(language.English).String(s)
+ },
+ "trim": strings.TrimSpace,
+ "replace": func(old, replacement, str string) string {
+ return strings.ReplaceAll(str, old, replacement)
+ },
+ "join": strings.Join,
+ "split": func(sep, str string) []string {
+ return strings.Split(str, sep)
+ },
+ }
+}
+
+// formatBytes formats byte counts in human-readable format.
+func (e *Engine) formatBytes(byteCount int64) string {
+ if byteCount == 0 {
+ return "0B"
+ }
+
+ if byteCount < shared.BytesPerKB {
+ return fmt.Sprintf(shared.MetricsFmtBytesShort, byteCount)
+ }
+
+ exp := 0
+ for n := byteCount / shared.BytesPerKB; n >= shared.BytesPerKB; n /= shared.BytesPerKB {
+ exp++
+ }
+
+ divisor := int64(1)
+ for i := 0; i < exp+1; i++ {
+ divisor *= shared.BytesPerKB
+ }
+
+ return fmt.Sprintf(shared.MetricsFmtBytesHuman, float64(byteCount)/float64(divisor), "KMGTPE"[exp])
+}
+
+// wrapLongLines wraps lines that exceed the specified length.
+func (e *Engine) wrapLongLines(content string, maxLength int) string {
+ lines := strings.Split(content, "\n")
+ var wrappedLines []string
+
+ for _, line := range lines {
+ wrappedLines = append(wrappedLines, e.wrapSingleLine(line, maxLength)...)
+ }
+
+ return strings.Join(wrappedLines, "\n")
+}
+
+// wrapSingleLine wraps a single line if it exceeds maxLength.
+func (e *Engine) wrapSingleLine(line string, maxLength int) []string {
+ if len(line) <= maxLength {
+ return []string{line}
+ }
+
+ return e.wrapLongLineWithWords(line, maxLength)
+}
+
+// wrapLongLineWithWords wraps a long line by breaking it into words.
+func (e *Engine) wrapLongLineWithWords(line string, maxLength int) []string {
+ words := strings.Fields(line)
+ var wrappedLines []string
+ var currentLine strings.Builder
+
+ for _, word := range words {
+ if e.wouldExceedLength(¤tLine, word, maxLength) {
+ if currentLine.Len() > 0 {
+ wrappedLines = append(wrappedLines, currentLine.String())
+ currentLine.Reset()
+ }
+ }
+
+ e.addWordToLine(¤tLine, word)
+ }
+
+ if currentLine.Len() > 0 {
+ wrappedLines = append(wrappedLines, currentLine.String())
+ }
+
+ return wrappedLines
+}
+
+// wouldExceedLength checks if adding a word would exceed the maximum length.
+func (e *Engine) wouldExceedLength(currentLine *strings.Builder, word string, maxLength int) bool {
+ return currentLine.Len()+len(word)+1 > maxLength
+}
+
+// addWordToLine adds a word to the current line with appropriate spacing.
+func (e *Engine) addWordToLine(currentLine *strings.Builder, word string) {
+ if currentLine.Len() > 0 {
+ // These errors are highly unlikely and would only occur in extreme memory conditions
+ // We intentionally ignore them as recovering would be complex and the impact minimal
+ _ = currentLine.WriteByte(' ')
+ }
+ // Similar rationale - memory exhaustion is the only realistic failure case
+ _, _ = currentLine.WriteString(word)
+}
+
+// hasAnyMetadata checks if any metadata options are enabled.
+func (e *Engine) hasAnyMetadata() bool {
+ m := e.template.Metadata
+
+ return m.IncludeStats || m.IncludeTimestamp || m.IncludeFileCount ||
+ m.IncludeSourcePath || m.IncludeFileTypes || m.IncludeProcessingTime ||
+ m.IncludeTotalSize || m.IncludeMetrics
+}
+
+// ListBuiltinTemplates returns a list of available builtin templates.
+func ListBuiltinTemplates() []string {
+ names := make([]string, 0, len(BuiltinTemplates))
+ for name := range BuiltinTemplates {
+ names = append(names, name)
+ }
+
+ return names
+}
+
+// BuiltinTemplate returns a builtin template by name.
+func BuiltinTemplate(name string) (OutputTemplate, bool) {
+ tmpl, exists := BuiltinTemplates[name]
+
+ return tmpl, exists
+}
diff --git a/templates/engine_test.go b/templates/engine_test.go
new file mode 100644
index 0000000..6aa62fb
--- /dev/null
+++ b/templates/engine_test.go
@@ -0,0 +1,540 @@
+package templates
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/ivuorinen/gibidify/shared"
+)
+
+func TestNewEngine(t *testing.T) {
+ context := TemplateContext{
+ Timestamp: time.Now(),
+ SourcePath: "/test/source",
+ Format: "markdown",
+ }
+
+ engine, err := NewEngine("default", context)
+ if err != nil {
+ t.Fatalf(shared.TestMsgNewEngineFailed, err)
+ }
+
+ if engine == nil {
+ t.Fatal("Engine is nil")
+ }
+
+ if engine.template.Name != "Default" {
+ t.Errorf("Expected template name 'Default', got '%s'", engine.template.Name)
+ }
+}
+
+func TestNewEngineUnknownTemplate(t *testing.T) {
+ context := TemplateContext{}
+
+ _, err := NewEngine("nonexistent", context)
+ if err == nil {
+ t.Error("Expected error for unknown template")
+ }
+
+ if !strings.Contains(err.Error(), "template 'nonexistent' not found") {
+ t.Errorf("Unexpected error message: %v", err)
+ }
+}
+
+func TestNewEngineWithCustomTemplate(t *testing.T) {
+ customTemplate := OutputTemplate{
+ Name: "Custom",
+ Description: "Custom template",
+ Format: "markdown",
+ Header: "# Custom Header",
+ Footer: "Custom Footer",
+ }
+
+ context := TemplateContext{
+ SourcePath: "/test",
+ }
+
+ engine := NewEngineWithCustomTemplate(customTemplate, context)
+
+ if engine == nil {
+ t.Fatal("Engine is nil")
+ }
+
+ if engine.template.Name != "Custom" {
+ t.Errorf("Expected template name 'Custom', got '%s'", engine.template.Name)
+ }
+}
+
+func TestRenderHeader(t *testing.T) {
+ context := TemplateContext{
+ Timestamp: time.Date(2023, 12, 25, 10, 0, 0, 0, time.UTC),
+ SourcePath: shared.TestPathTestProject,
+ Format: "markdown",
+ }
+
+ engine, err := NewEngine("default", context)
+ if err != nil {
+ t.Fatalf(shared.TestMsgNewEngineFailed, err)
+ }
+
+ header, err := engine.RenderHeader()
+ if err != nil {
+ t.Fatalf("RenderHeader failed: %v", err)
+ }
+
+ if !strings.Contains(header, shared.TestPathTestProject) {
+ t.Errorf("Header should contain source path, got: %s", header)
+ }
+
+ if !strings.Contains(header, "2023-12-25") {
+ t.Errorf("Header should contain timestamp, got: %s", header)
+ }
+}
+
+func TestRenderFooter(t *testing.T) {
+ engine, err := NewEngine("default", TemplateContext{})
+ if err != nil {
+ t.Fatalf(shared.TestMsgNewEngineFailed, err)
+ }
+
+ footer, err := engine.RenderFooter()
+ if err != nil {
+ t.Fatalf("RenderFooter failed: %v", err)
+ }
+
+ if !strings.Contains(footer, "gibidify") {
+ t.Errorf("Footer should contain 'gibidify', got: %s", footer)
+ }
+}
+
+func TestRenderFileHeader(t *testing.T) {
+ engine, err := NewEngine("default", TemplateContext{})
+ if err != nil {
+ t.Fatalf(shared.TestMsgNewEngineFailed, err)
+ }
+
+ fileCtx := FileContext{
+ Path: shared.TestPathTestFileGo,
+ RelativePath: shared.TestFileGoExt,
+ Name: shared.TestFileGoExt,
+ Language: "go",
+ Size: 1024,
+ }
+
+ header, err := engine.RenderFileHeader(fileCtx)
+ if err != nil {
+ t.Fatalf("RenderFileHeader failed: %v", err)
+ }
+
+ if !strings.Contains(header, shared.TestFileGoExt) {
+ t.Errorf("File header should contain filename, got: %s", header)
+ }
+
+ if !strings.Contains(header, "```go") {
+ t.Errorf("File header should contain language code block, got: %s", header)
+ }
+}
+
+func TestRenderFileFooter(t *testing.T) {
+ engine, err := NewEngine("default", TemplateContext{})
+ if err != nil {
+ t.Fatalf(shared.TestMsgNewEngineFailed, err)
+ }
+
+ fileCtx := FileContext{
+ Path: shared.TestPathTestFileGo,
+ }
+
+ footer, err := engine.RenderFileFooter(fileCtx)
+ if err != nil {
+ t.Fatalf("RenderFileFooter failed: %v", err)
+ }
+
+ if !strings.Contains(footer, "```") {
+ t.Errorf("File footer should contain code block close, got: %s", footer)
+ }
+}
+
+func TestRenderFileContentBasic(t *testing.T) {
+ engine, err := NewEngine("default", TemplateContext{})
+ if err != nil {
+ t.Fatalf(shared.TestMsgNewEngineFailed, err)
+ }
+
+ fileCtx := FileContext{
+ Content: "package main\n\nfunc main() {\n fmt.Println(\"hello\")\n}",
+ }
+
+ content, err := engine.RenderFileContent(fileCtx)
+ if err != nil {
+ t.Fatalf(shared.TestMsgRenderFileContentFailed, err)
+ }
+
+ if content != fileCtx.Content {
+ t.Errorf("Content should be unchanged for basic case, got: %s", content)
+ }
+}
+
+func TestRenderFileContentLongLines(t *testing.T) {
+ customTemplate := OutputTemplate{
+ Format: "markdown",
+ Markdown: MarkdownOptions{
+ MaxLineLength: 20,
+ },
+ }
+
+ engine := NewEngineWithCustomTemplate(customTemplate, TemplateContext{})
+
+ fileCtx := FileContext{
+ Content: "this is a very long line that should be wrapped",
+ }
+
+ content, err := engine.RenderFileContent(fileCtx)
+ if err != nil {
+ t.Fatalf(shared.TestMsgRenderFileContentFailed, err)
+ }
+
+ lines := strings.Split(content, "\n")
+ for _, line := range lines {
+ if len(line) > 20 {
+ t.Errorf("Line length should not exceed 20 characters, got line: %s (len=%d)", line, len(line))
+ }
+ }
+}
+
+func TestRenderFileContentFoldLongFiles(t *testing.T) {
+ customTemplate := OutputTemplate{
+ Format: "markdown",
+ Markdown: MarkdownOptions{
+ FoldLongFiles: true,
+ },
+ }
+
+ engine := NewEngineWithCustomTemplate(customTemplate, TemplateContext{})
+
+ // Create content with more than 100 lines
+ lines := make([]string, 150)
+ for i := range lines {
+ lines[i] = fmt.Sprintf("line %d", i+1)
+ }
+
+ fileCtx := FileContext{
+ Content: strings.Join(lines, "\n"),
+ }
+
+ content, err := engine.RenderFileContent(fileCtx)
+ if err != nil {
+ t.Fatalf(shared.TestMsgRenderFileContentFailed, err)
+ }
+
+ if !strings.Contains(content, "lines truncated") {
+ t.Error("Content should contain truncation message")
+ }
+}
+
+func TestRenderMetadata(t *testing.T) {
+ context := TemplateContext{
+ Timestamp: time.Date(2023, 12, 25, 10, 0, 0, 0, time.UTC),
+ SourcePath: shared.TestPathTestProject,
+ TotalFiles: 10,
+ ProcessedFiles: 8,
+ SkippedFiles: 1,
+ ErrorFiles: 1,
+ TotalSize: 1024000,
+ ProcessingTime: "2.5s",
+ FilesPerSecond: 3.2,
+ BytesPerSecond: 409600,
+ FileTypes: map[string]int{
+ "go": 5,
+ "js": 3,
+ "yaml": 2,
+ },
+ }
+
+ engine, err := NewEngine("detailed", context)
+ if err != nil {
+ t.Fatalf(shared.TestMsgNewEngineFailed, err)
+ }
+
+ metadata, err := engine.RenderMetadata()
+ if err != nil {
+ t.Fatalf("RenderMetadata failed: %v", err)
+ }
+
+ if !strings.Contains(metadata, "2023-12-25") {
+ t.Error("Metadata should contain timestamp")
+ }
+
+ if !strings.Contains(metadata, shared.TestPathTestProject) {
+ t.Error("Metadata should contain source path")
+ }
+
+ if !strings.Contains(metadata, "10 total") {
+ t.Error("Metadata should contain file count")
+ }
+
+ if !strings.Contains(metadata, "1024000 bytes") {
+ t.Error("Metadata should contain total size")
+ }
+
+ if !strings.Contains(metadata, "2.5s") {
+ t.Error("Metadata should contain processing time")
+ }
+
+ if !strings.Contains(metadata, "3.2 files/sec") {
+ t.Error("Metadata should contain performance metrics")
+ }
+
+ if !strings.Contains(metadata, "go: 5 files") {
+ t.Error("Metadata should contain file types")
+ }
+}
+
+func TestRenderTableOfContents(t *testing.T) {
+ engine, err := NewEngine("detailed", TemplateContext{})
+ if err != nil {
+ t.Fatalf(shared.TestMsgNewEngineFailed, err)
+ }
+
+ files := []FileContext{
+ {RelativePath: "main.go"},
+ {RelativePath: "utils/helper.go"},
+ {RelativePath: "config.yaml"},
+ }
+
+ toc, err := engine.RenderTableOfContents(files)
+ if err != nil {
+ t.Fatalf("RenderTableOfContents failed: %v", err)
+ }
+
+ if !strings.Contains(toc, "Table of Contents") {
+ t.Error("TOC should contain header")
+ }
+
+ if !strings.Contains(toc, "[main.go]") {
+ t.Error("TOC should contain main.go link")
+ }
+
+ if !strings.Contains(toc, "[utils/helper.go]") {
+ t.Error("TOC should contain utils/helper.go link")
+ }
+
+ if !strings.Contains(toc, "[config.yaml]") {
+ t.Error("TOC should contain config.yaml link")
+ }
+}
+
+func TestRenderTableOfContentsDisabled(t *testing.T) {
+ engine, err := NewEngine("default", TemplateContext{})
+ if err != nil {
+ t.Fatalf(shared.TestMsgNewEngineFailed, err)
+ }
+
+ files := []FileContext{{RelativePath: "test.go"}}
+
+ toc, err := engine.RenderTableOfContents(files)
+ if err != nil {
+ t.Fatalf("RenderTableOfContents failed: %v", err)
+ }
+
+ if toc != "" {
+ t.Errorf("TOC should be empty when disabled, got: %s", toc)
+ }
+}
+
+func TestTemplateFunctions(t *testing.T) {
+ engine, err := NewEngine("default", TemplateContext{})
+ if err != nil {
+ t.Fatalf(shared.TestMsgNewEngineFailed, err)
+ }
+
+ testCases := []struct {
+ name string
+ template string
+ context any
+ expected string
+ }{
+ {
+ name: "formatSize",
+ template: "{{formatSize .Size}}",
+ context: struct{ Size int64 }{Size: 1024},
+ expected: "1.0KB",
+ },
+ {
+ name: "basename",
+ template: "{{basename .Path}}",
+ context: struct{ Path string }{Path: shared.TestPathTestFileGo},
+ expected: shared.TestFileGoExt,
+ },
+ {
+ name: "ext",
+ template: "{{ext .Path}}",
+ context: struct{ Path string }{Path: shared.TestPathTestFileGo},
+ expected: ".go",
+ },
+ {
+ name: "upper",
+ template: "{{upper .Text}}",
+ context: struct{ Text string }{Text: "hello"},
+ expected: "HELLO",
+ },
+ {
+ name: "lower",
+ template: "{{lower .Text}}",
+ context: struct{ Text string }{Text: "HELLO"},
+ expected: "hello",
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(
+ tc.name, func(t *testing.T) {
+ result, err := engine.renderTemplate(tc.template, tc.context)
+ if err != nil {
+ t.Fatalf("Template rendering failed: %v", err)
+ }
+
+ if result != tc.expected {
+ t.Errorf("Expected %q, got %q", tc.expected, result)
+ }
+ },
+ )
+ }
+}
+
+func TestListBuiltinTemplates(t *testing.T) {
+ templates := ListBuiltinTemplates()
+
+ if len(templates) == 0 {
+ t.Error("Should have builtin templates")
+ }
+
+ expectedTemplates := []string{"default", "minimal", "detailed", "compact"}
+ for _, expected := range expectedTemplates {
+ found := false
+ for _, tmpl := range templates {
+ if tmpl == expected {
+ found = true
+
+ break
+ }
+ }
+ if !found {
+ t.Errorf("Expected template %s not found in list", expected)
+ }
+ }
+}
+
+func TestBuiltinTemplate(t *testing.T) {
+ tmpl, exists := BuiltinTemplate("default")
+ if !exists {
+ t.Error("Default template should exist")
+ }
+
+ if tmpl.Name != "Default" {
+ t.Errorf("Expected name 'Default', got %s", tmpl.Name)
+ }
+
+ _, exists = BuiltinTemplate("nonexistent")
+ if exists {
+ t.Error("Nonexistent template should not exist")
+ }
+}
+
+func TestFormatBytes(t *testing.T) {
+ engine, err := NewEngine("default", TemplateContext{})
+ if err != nil {
+ t.Fatalf(shared.TestMsgNewEngineFailed, err)
+ }
+
+ testCases := []struct {
+ bytes int64
+ expected string
+ }{
+ {0, "0B"},
+ {512, "512B"},
+ {1024, "1.0KB"},
+ {1536, "1.5KB"},
+ {1024 * 1024, "1.0MB"},
+ {5 * 1024 * 1024 * 1024, "5.0GB"},
+ }
+
+ for _, tc := range testCases {
+ t.Run(
+ tc.expected, func(t *testing.T) {
+ result := engine.formatBytes(tc.bytes)
+ if result != tc.expected {
+ t.Errorf("formatBytes(%d) = %s, want %s", tc.bytes, result, tc.expected)
+ }
+ },
+ )
+ }
+}
+
+// validateTemplateRendering validates all template rendering functions for a given engine.
+func validateTemplateRendering(t *testing.T, engine *Engine, name string) {
+ t.Helper()
+
+ // Test header rendering
+ _, err := engine.RenderHeader()
+ if err != nil {
+ t.Errorf("Failed to render header for template %s: %v", name, err)
+ }
+
+ // Test footer rendering
+ _, err = engine.RenderFooter()
+ if err != nil {
+ t.Errorf("Failed to render footer for template %s: %v", name, err)
+ }
+
+ // Test file rendering
+ validateFileRendering(t, engine, name)
+}
+
+// validateFileRendering validates file header and footer rendering for a given engine.
+func validateFileRendering(t *testing.T, engine *Engine, name string) {
+ t.Helper()
+
+ fileCtx := FileContext{
+ Path: "/test.go",
+ RelativePath: "test.go",
+ Language: "go",
+ Size: 100,
+ }
+
+ // Test file header rendering
+ _, err := engine.RenderFileHeader(fileCtx)
+ if err != nil {
+ t.Errorf("Failed to render file header for template %s: %v", name, err)
+ }
+
+ // Test file footer rendering
+ _, err = engine.RenderFileFooter(fileCtx)
+ if err != nil {
+ t.Errorf("Failed to render file footer for template %s: %v", name, err)
+ }
+}
+
+func TestBuiltinTemplatesIntegrity(t *testing.T) {
+ // Test that all builtin templates are valid and can be used
+ context := TemplateContext{
+ Timestamp: time.Now(),
+ SourcePath: "/test",
+ Format: "markdown",
+ }
+
+ for name := range BuiltinTemplates {
+ t.Run(
+ name, func(t *testing.T) {
+ engine, err := NewEngine(name, context)
+ if err != nil {
+ t.Fatalf("Failed to create engine for template %s: %v", name, err)
+ }
+
+ validateTemplateRendering(t, engine, name)
+ },
+ )
+ }
+}
diff --git a/templates/types.go b/templates/types.go
new file mode 100644
index 0000000..8efbf38
--- /dev/null
+++ b/templates/types.go
@@ -0,0 +1,222 @@
+// Package templates provides output formatting templates and customization options.
+package templates
+
+import (
+ "time"
+
+ "github.com/ivuorinen/gibidify/shared"
+)
+
+// OutputTemplate represents a customizable output template.
+type OutputTemplate struct {
+ Name string `json:"name" yaml:"name"`
+ Description string `json:"description" yaml:"description"`
+ Format string `json:"format" yaml:"format"` // markdown, json, yaml
+ Header string `json:"header" yaml:"header"`
+ Footer string `json:"footer" yaml:"footer"`
+ FileHeader string `json:"file_header" yaml:"file_header"`
+ FileFooter string `json:"file_footer" yaml:"file_footer"`
+ Metadata MetadataOptions `json:"metadata" yaml:"metadata"`
+ Markdown MarkdownOptions `json:"markdown" yaml:"markdown"`
+ Variables map[string]string `json:"variables" yaml:"variables"`
+}
+
+// MetadataOptions controls what metadata to include in the output.
+type MetadataOptions struct {
+ IncludeStats bool `json:"include_stats" yaml:"include_stats"`
+ IncludeTimestamp bool `json:"include_timestamp" yaml:"include_timestamp"`
+ IncludeFileCount bool `json:"include_file_count" yaml:"include_file_count"`
+ IncludeSourcePath bool `json:"include_source_path" yaml:"include_source_path"`
+ IncludeFileTypes bool `json:"include_file_types" yaml:"include_file_types"`
+ IncludeProcessingTime bool `json:"include_processing_time" yaml:"include_processing_time"`
+ IncludeTotalSize bool `json:"include_total_size" yaml:"include_total_size"`
+ IncludeMetrics bool `json:"include_metrics" yaml:"include_metrics"`
+}
+
+// MarkdownOptions controls markdown-specific formatting.
+type MarkdownOptions struct {
+ UseCodeBlocks bool `json:"use_code_blocks" yaml:"use_code_blocks"`
+ IncludeLanguage bool `json:"include_language" yaml:"include_language"`
+ HeaderLevel int `json:"header_level" yaml:"header_level"` // 1-6 for # levels
+ TableOfContents bool `json:"table_of_contents" yaml:"table_of_contents"`
+ UseCollapsible bool `json:"use_collapsible" yaml:"use_collapsible"`
+ SyntaxHighlighting bool `json:"syntax_highlighting" yaml:"syntax_highlighting"`
+ LineNumbers bool `json:"line_numbers" yaml:"line_numbers"`
+ FoldLongFiles bool `json:"fold_long_files" yaml:"fold_long_files"`
+ MaxLineLength int `json:"max_line_length" yaml:"max_line_length"`
+ CustomCSS string `json:"custom_css" yaml:"custom_css"`
+}
+
+// TemplateContext provides data available for template substitution.
+type TemplateContext struct {
+ // Basic information
+ Timestamp time.Time `json:"timestamp"`
+ SourcePath string `json:"source_path"`
+ Format string `json:"format"`
+
+ // File statistics
+ TotalFiles int `json:"total_files"`
+ ProcessedFiles int `json:"processed_files"`
+ SkippedFiles int `json:"skipped_files"`
+ ErrorFiles int `json:"error_files"`
+ TotalSize int64 `json:"total_size"`
+
+ // Processing metrics
+ ProcessingTime string `json:"processing_time"`
+ FilesPerSecond float64 `json:"files_per_second"`
+ BytesPerSecond float64 `json:"bytes_per_second"`
+ FileTypes map[string]int `json:"file_types"`
+
+ // Custom variables
+ Variables map[string]string `json:"variables"`
+}
+
+// FileContext provides data for individual file formatting.
+type FileContext struct {
+ Path string `json:"path"`
+ RelativePath string `json:"relative_path"`
+ Name string `json:"name"`
+ Extension string `json:"extension"`
+ Language string `json:"language"`
+ Size int64 `json:"size"`
+ ModTime time.Time `json:"mod_time"`
+ Content string `json:"content"`
+ LineCount int `json:"line_count"`
+ IsLarge bool `json:"is_large"`
+ Truncated bool `json:"truncated"`
+}
+
+// BuiltinTemplates contains predefined templates.
+var BuiltinTemplates = map[string]OutputTemplate{
+ "default": {
+ Name: "Default",
+ Description: "Standard output template",
+ Format: shared.FormatMarkdown,
+ Header: "# {{.SourcePath}}\n\nGenerated on {{.Timestamp.Format \"2006-01-02 15:04:05\"}}\n",
+ Footer: "\n---\nGenerated by gibidify\n",
+ FileHeader: "## {{.Path}}\n\n```{{.Language}}\n",
+ FileFooter: "```\n\n",
+ Metadata: MetadataOptions{
+ IncludeStats: true,
+ IncludeTimestamp: true,
+ IncludeFileCount: true,
+ IncludeSourcePath: true,
+ },
+ Markdown: MarkdownOptions{
+ UseCodeBlocks: true,
+ IncludeLanguage: true,
+ HeaderLevel: 2,
+ SyntaxHighlighting: true,
+ },
+ },
+ "minimal": {
+ Name: "Minimal",
+ Description: "Minimal output with just file contents",
+ Format: shared.FormatMarkdown,
+ Header: "",
+ Footer: "",
+ FileHeader: "\n",
+ FileFooter: "\n",
+ Metadata: MetadataOptions{
+ IncludeStats: false,
+ IncludeTimestamp: false,
+ IncludeFileCount: false,
+ IncludeSourcePath: false,
+ },
+ Markdown: MarkdownOptions{
+ UseCodeBlocks: false,
+ IncludeLanguage: false,
+ },
+ },
+ "detailed": {
+ Name: "Detailed",
+ Description: "Comprehensive output with full metadata",
+ Format: shared.FormatMarkdown,
+ Header: `# Project Analysis: {{.SourcePath}}
+
+Generated on {{.Timestamp.Format "January 2, 2006 at 3:04 PM"}}
+
+## Summary
+
+- **Total Files**: {{.TotalFiles}}
+- **Processed Files**: {{.ProcessedFiles}}
+- **Total Size**: {{.TotalSize}} bytes
+- **Processing Time**: {{.ProcessingTime}}
+- **Rate**: {{.FilesPerSecond}} files/sec
+
+`,
+ Footer: "\n---\n*Generated by gibidify*\n",
+ FileHeader: "### {{.RelativePath}}\n\n**Language**: {{.Language}} \n" +
+ "**Size**: {{.Size}} bytes \n**Lines**: {{.LineCount}} \n\n```{{.Language}}\n",
+ FileFooter: "```\n\n",
+ Metadata: MetadataOptions{
+ IncludeStats: true,
+ IncludeTimestamp: true,
+ IncludeFileCount: true,
+ IncludeSourcePath: true,
+ IncludeFileTypes: true,
+ IncludeProcessingTime: true,
+ IncludeTotalSize: true,
+ IncludeMetrics: true,
+ },
+ Markdown: MarkdownOptions{
+ UseCodeBlocks: true,
+ IncludeLanguage: true,
+ HeaderLevel: 3,
+ TableOfContents: true,
+ SyntaxHighlighting: true,
+ LineNumbers: false,
+ },
+ },
+ "compact": {
+ Name: "Compact",
+ Description: "Space-efficient output with collapsible sections",
+ Format: shared.FormatMarkdown,
+ Header: "# {{.SourcePath}}\n\n📊 Stats ({{.TotalFiles}} files)
\n\n" +
+ "- Processed: {{.ProcessedFiles}}\n- Size: {{.TotalSize}} bytes\n" +
+ "- Time: {{.ProcessingTime}}\n\n \n\n",
+ Footer: "\n---\n*Compressed with gibidify*\n",
+ FileHeader: "📄 {{.RelativePath}} ({{.Size}} bytes)
\n\n```{{.Language}}\n",
+ FileFooter: "```\n\n \n\n",
+ Metadata: MetadataOptions{
+ IncludeStats: true,
+ IncludeFileCount: true,
+ IncludeTotalSize: true,
+ },
+ Markdown: MarkdownOptions{
+ UseCodeBlocks: true,
+ IncludeLanguage: true,
+ UseCollapsible: true,
+ SyntaxHighlighting: true,
+ },
+ },
+}
+
+// DefaultMetadataOptions returns the default metadata options.
+func DefaultMetadataOptions() MetadataOptions {
+ return MetadataOptions{
+ IncludeStats: true,
+ IncludeTimestamp: true,
+ IncludeFileCount: true,
+ IncludeSourcePath: true,
+ IncludeFileTypes: false,
+ IncludeProcessingTime: false,
+ IncludeTotalSize: false,
+ IncludeMetrics: false,
+ }
+}
+
+// DefaultMarkdownOptions returns the default markdown options.
+func DefaultMarkdownOptions() MarkdownOptions {
+ return MarkdownOptions{
+ UseCodeBlocks: true,
+ IncludeLanguage: true,
+ HeaderLevel: 2,
+ TableOfContents: false,
+ UseCollapsible: false,
+ SyntaxHighlighting: true,
+ LineNumbers: false,
+ FoldLongFiles: false,
+ MaxLineLength: 120,
+ }
+}
diff --git a/testutil/assertions_test.go b/testutil/assertions_test.go
new file mode 100644
index 0000000..1e5a53a
--- /dev/null
+++ b/testutil/assertions_test.go
@@ -0,0 +1,352 @@
+package testutil
+
+import (
+ "errors"
+ "strings"
+ "testing"
+
+ "github.com/ivuorinen/gibidify/shared"
+)
+
+// TestAssertError tests the AssertError function.
+func TestAssertError(t *testing.T) {
+ t.Run(
+ shared.TestCaseSuccessCases, func(t *testing.T) {
+ // Test expect error and get error
+ t.Run(
+ "expect error and get error", func(t *testing.T) {
+ AssertError(t, errors.New(shared.TestErrTestErrorMsg), true, shared.TestCaseTestOperation)
+ // If we get here, the assertion passed
+ },
+ )
+
+ // Test expect no error and get no error
+ t.Run(
+ "expect no error and get no error", func(t *testing.T) {
+ AssertError(t, nil, false, "successful operation")
+ // If we get here, the assertion passed
+ },
+ )
+
+ // Test with empty operation name
+ t.Run(
+ shared.TestCaseEmptyOperationName, func(t *testing.T) {
+ AssertError(t, nil, false, "")
+ // Should still work with empty operation
+ },
+ )
+
+ // Test with complex operation name
+ t.Run(
+ "complex operation name", func(t *testing.T) {
+ AssertError(t, nil, false, "complex operation with special chars: !@#$%^&*()")
+ // Should handle special characters
+ },
+ )
+ },
+ )
+
+ // Test edge cases
+ t.Run(
+ "edge cases", func(t *testing.T) {
+ // Test various error types
+ t.Run(
+ shared.TestCaseDifferentErrorTypes, func(t *testing.T) {
+ AssertError(t, shared.ErrTestError, true, "using shared.ErrTestError")
+ AssertError(t, errors.New("wrapped: original"), true, "wrapped error")
+ },
+ )
+ },
+ )
+}
+
+// TestAssertNoError tests the AssertNoError function.
+func TestAssertNoError(t *testing.T) {
+ t.Run(
+ shared.TestCaseSuccessCases, func(t *testing.T) {
+ // Test with nil error
+ t.Run(
+ "nil error", func(t *testing.T) {
+ AssertNoError(t, nil, "successful operation")
+ },
+ )
+
+ // Test with empty operation name
+ t.Run(
+ shared.TestCaseEmptyOperationName, func(t *testing.T) {
+ AssertNoError(t, nil, "")
+ },
+ )
+
+ // Test with complex operation name
+ t.Run(
+ "complex operation", func(t *testing.T) {
+ AssertNoError(t, nil, "complex operation with special chars: !@#$%^&*()")
+ },
+ )
+ },
+ )
+
+ // We can't easily test the failure case in a unit test since it would cause test failure
+ // But we can verify the function signature and basic functionality
+ t.Run(
+ shared.TestCaseFunctionAvailability, func(t *testing.T) {
+ // Verify the function doesn't panic with valid inputs
+ defer func() {
+ if r := recover(); r != nil {
+ t.Errorf("AssertNoError should not panic: %v", r)
+ }
+ }()
+
+ // Call with success case to ensure function works
+ AssertNoError(t, nil, shared.TestCaseTestOperation)
+ },
+ )
+}
+
+// TestAssertExpectedError tests the AssertExpectedError function.
+func TestAssertExpectedError(t *testing.T) {
+ t.Run(
+ shared.TestCaseSuccessCases, func(t *testing.T) {
+ // Test with error present
+ t.Run(
+ "error present as expected", func(t *testing.T) {
+ AssertExpectedError(t, errors.New("expected error"), "operation that should fail")
+ },
+ )
+
+ // Test with different error types
+ t.Run(
+ shared.TestCaseDifferentErrorTypes, func(t *testing.T) {
+ AssertExpectedError(t, shared.ErrTestError, "test error operation")
+ AssertExpectedError(t, errors.New("complex error with details"), "complex operation")
+ },
+ )
+
+ // Test with empty operation name
+ t.Run(
+ shared.TestCaseEmptyOperationName, func(t *testing.T) {
+ AssertExpectedError(t, errors.New("error"), "")
+ },
+ )
+ },
+ )
+
+ // Verify function availability and basic properties
+ t.Run(
+ shared.TestCaseFunctionAvailability, func(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ t.Errorf("AssertExpectedError should not panic: %v", r)
+ }
+ }()
+
+ // Call with success case
+ AssertExpectedError(t, errors.New("test"), shared.TestCaseTestOperation)
+ },
+ )
+}
+
+// TestAssertErrorContains tests the AssertErrorContains function.
+func TestAssertErrorContains(t *testing.T) {
+ t.Run(
+ shared.TestCaseSuccessCases, func(t *testing.T) {
+ // Test error contains substring
+ t.Run(
+ "error contains substring", func(t *testing.T) {
+ err := errors.New("database connection failed")
+ AssertErrorContains(t, err, "connection", "database operation")
+ },
+ )
+
+ // Test exact match
+ t.Run(
+ "exact match", func(t *testing.T) {
+ err := errors.New("exact error message")
+ AssertErrorContains(t, err, "exact error message", "exact operation")
+ },
+ )
+
+ // Test empty substring (should match any error)
+ t.Run(
+ "empty substring matches any error", func(t *testing.T) {
+ err := errors.New("any error")
+ AssertErrorContains(t, err, "", "any operation")
+ },
+ )
+
+ // Test special characters
+ t.Run(
+ "special characters in substring", func(t *testing.T) {
+ err := errors.New("error: failed with code 500")
+ AssertErrorContains(t, err, "code 500", "special chars operation")
+ },
+ )
+
+ // Test case sensitivity
+ t.Run(
+ "case sensitive operations", func(t *testing.T) {
+ err := errors.New("error Message")
+ AssertErrorContains(t, err, "error Message", "case operation")
+ },
+ )
+ },
+ )
+
+ // Test with various error types
+ t.Run(
+ shared.TestCaseDifferentErrorTypes, func(t *testing.T) {
+ t.Run(
+ "standard error", func(t *testing.T) {
+ AssertErrorContains(
+ t, shared.ErrTestError, shared.TestErrTestErrorMsg, shared.TestCaseTestOperation,
+ )
+ },
+ )
+
+ t.Run(
+ "wrapped error", func(t *testing.T) {
+ wrappedErr := errors.New("wrapped: original error")
+ AssertErrorContains(t, wrappedErr, "original", "wrapped operation")
+ },
+ )
+ },
+ )
+
+ // Verify function availability
+ t.Run(
+ shared.TestCaseFunctionAvailability, func(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ t.Errorf("AssertErrorContains should not panic: %v", r)
+ }
+ }()
+
+ // Call with success case
+ err := errors.New(shared.TestErrTestErrorMsg)
+ AssertErrorContains(t, err, "test", "availability check")
+ },
+ )
+}
+
+// TestAssertionHelpers tests edge cases and combinations of assertion helpers.
+func TestAssertionHelpers(t *testing.T) {
+ t.Run(
+ "error types compatibility", func(t *testing.T) {
+ // Test that all assertion functions work with shared.ErrTestError
+ AssertError(t, shared.ErrTestError, true, "shared.ErrTestError compatibility")
+ AssertExpectedError(t, shared.ErrTestError, "shared.ErrTestError expected")
+ AssertErrorContains(t, shared.ErrTestError, "test", "shared.ErrTestError contains")
+ },
+ )
+
+ t.Run(
+ "operation name handling", func(t *testing.T) {
+ operations := []string{
+ "",
+ "simple operation",
+ "operation with spaces",
+ "operation-with-dashes",
+ "operation_with_underscores",
+ "operation.with.dots",
+ "operation/with/slashes",
+ "operation\\with\\backslashes",
+ "operation with special chars: !@#$%^&*()",
+ "operation with unicode: αβγ",
+ strings.Repeat("very long operation name ", 10),
+ }
+
+ for i, op := range operations {
+ t.Run(
+ "operation_"+string(rune(i+'a')), func(t *testing.T) {
+ // Test each assertion function with this operation name
+ AssertError(t, nil, false, op)
+ AssertNoError(t, nil, op)
+ AssertExpectedError(t, errors.New("test"), op)
+ AssertErrorContains(t, errors.New(shared.TestErrTestErrorMsg), "test", op)
+ },
+ )
+ }
+ },
+ )
+
+ t.Run(
+ "error message variations", func(t *testing.T) {
+ errorMessages := []string{
+ "",
+ "simple error",
+ "error with spaces",
+ "error\nwith\nnewlines",
+ "error\twith\ttabs",
+ "error with unicode: αβγδε",
+ "error: with: colons: everywhere",
+ strings.Repeat("very long error message ", 20),
+ "error with special chars: !@#$%^&*()",
+ }
+
+ for i, msg := range errorMessages {
+ t.Run(
+ "error_message_"+string(rune(i+'a')), func(t *testing.T) {
+ err := errors.New(msg)
+ AssertError(t, err, true, shared.TestCaseMessageTest)
+ AssertExpectedError(t, err, shared.TestCaseMessageTest)
+ if msg != "" {
+ // Only test contains if message is not empty
+ AssertErrorContains(t, err, msg, shared.TestCaseMessageTest)
+ }
+ },
+ )
+ }
+ },
+ )
+}
+
+// BenchmarkStringOperations benchmarks string operations used by assertion functions.
+func BenchmarkStringOperations(b *testing.B) {
+ testErr := errors.New("this is a long error message with many words for testing performance of substring matching")
+ errorMessage := testErr.Error()
+ substring := "error message"
+
+ b.Run(
+ "contains_operation", func(b *testing.B) {
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _ = strings.Contains(errorMessage, substring)
+ }
+ },
+ )
+
+ b.Run(
+ "error_to_string", func(b *testing.B) {
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _ = testErr.Error()
+ }
+ },
+ )
+}
+
+// BenchmarkAssertionLogic benchmarks the core logic of assertion functions.
+func BenchmarkAssertionLogic(b *testing.B) {
+ testErr := errors.New("benchmark error")
+
+ b.Run(
+ "error_nil_check", func(b *testing.B) {
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _ = testErr != nil
+ }
+ },
+ )
+
+ b.Run(
+ "boolean_comparison", func(b *testing.B) {
+ hasErr := testErr != nil
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _ = !hasErr
+ }
+ },
+ )
+}
diff --git a/testutil/concurrency_test.go b/testutil/concurrency_test.go
index 05c9626..063f6bb 100644
--- a/testutil/concurrency_test.go
+++ b/testutil/concurrency_test.go
@@ -5,9 +5,11 @@ import (
"path/filepath"
"strings"
"testing"
+
+ "github.com/ivuorinen/gibidify/shared"
)
-// Test thread safety of functions that might be called concurrently
+// Test thread safety of functions that might be called concurrently.
func TestConcurrentOperations(t *testing.T) {
tempDir := t.TempDir()
done := make(chan bool)
@@ -34,7 +36,7 @@ func TestConcurrentOperations(t *testing.T) {
}
}
-// Benchmarks
+// Benchmarks.
func BenchmarkCreateTestFile(b *testing.B) {
tempDir := b.TempDir()
content := []byte("benchmark content")
@@ -44,7 +46,7 @@ func BenchmarkCreateTestFile(b *testing.B) {
// Use a unique filename for each iteration to avoid conflicts
filename := "bench" + string(rune(i%26+'a')) + ".txt"
filePath := filepath.Join(tempDir, filename)
- if err := os.WriteFile(filePath, content, FilePermission); err != nil {
+ if err := os.WriteFile(filePath, content, shared.TestFilePermission); err != nil {
b.Fatalf("Failed to write file: %v", err)
}
}
@@ -64,7 +66,7 @@ func BenchmarkCreateTestFiles(b *testing.B) {
for _, spec := range specs {
filePath := filepath.Join(tempDir, spec.Name)
- if err := os.WriteFile(filePath, []byte(spec.Content), FilePermission); err != nil {
+ if err := os.WriteFile(filePath, []byte(spec.Content), shared.TestFilePermission); err != nil {
b.Fatalf("Failed to write file: %v", err)
}
}
diff --git a/testutil/config_test.go b/testutil/config_test.go
index 7352860..2f25e59 100644
--- a/testutil/config_test.go
+++ b/testutil/config_test.go
@@ -5,6 +5,8 @@ import (
"testing"
"github.com/spf13/viper"
+
+ "github.com/ivuorinen/gibidify/shared"
)
func TestResetViperConfig(t *testing.T) {
@@ -18,10 +20,11 @@ func TestResetViperConfig(t *testing.T) {
name: "reset with empty config path",
configPath: "",
preSetup: func() {
- viper.Set("test.key", "value")
+ viper.Set(shared.TestKeyName, "value")
},
verify: func(t *testing.T) {
- if viper.IsSet("test.key") {
+ t.Helper()
+ if viper.IsSet(shared.TestKeyName) {
t.Error("Viper config not reset properly")
}
},
@@ -30,10 +33,11 @@ func TestResetViperConfig(t *testing.T) {
name: "reset with config path",
configPath: t.TempDir(),
preSetup: func() {
- viper.Set("test.key", "value")
+ viper.Set(shared.TestKeyName, "value")
},
verify: func(t *testing.T) {
- if viper.IsSet("test.key") {
+ t.Helper()
+ if viper.IsSet(shared.TestKeyName) {
t.Error("Viper config not reset properly")
}
// Verify config path was added
@@ -47,11 +51,13 @@ func TestResetViperConfig(t *testing.T) {
}
for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- tt.preSetup()
- ResetViperConfig(t, tt.configPath)
- tt.verify(t)
- })
+ t.Run(
+ tt.name, func(t *testing.T) {
+ tt.preSetup()
+ ResetViperConfig(t, tt.configPath)
+ tt.verify(t)
+ },
+ )
}
}
@@ -78,7 +84,7 @@ func TestSetupCLIArgs(t *testing.T) {
prefix: "PREFIX",
suffix: "SUFFIX",
concurrency: 4,
- wantLen: 11,
+ wantLen: 12,
},
{
name: "empty strings",
@@ -87,7 +93,7 @@ func TestSetupCLIArgs(t *testing.T) {
prefix: "",
suffix: "",
concurrency: 1,
- wantLen: 11,
+ wantLen: 12,
},
{
name: "special characters in args",
@@ -96,37 +102,50 @@ func TestSetupCLIArgs(t *testing.T) {
prefix: "Prefix with\nnewline",
suffix: "Suffix with\ttab",
concurrency: 8,
- wantLen: 11,
+ wantLen: 12,
},
}
for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- SetupCLIArgs(tt.srcDir, tt.outFile, tt.prefix, tt.suffix, tt.concurrency)
-
- if len(os.Args) != tt.wantLen {
- t.Errorf("os.Args length = %d, want %d", len(os.Args), tt.wantLen)
- }
-
- // Verify specific args
- if os.Args[0] != "gibidify" {
- t.Errorf("Program name = %s, want gibidify", os.Args[0])
- }
- if os.Args[2] != tt.srcDir {
- t.Errorf("Source dir = %s, want %s", os.Args[2], tt.srcDir)
- }
- if os.Args[4] != tt.outFile {
- t.Errorf("Output file = %s, want %s", os.Args[4], tt.outFile)
- }
- if os.Args[6] != tt.prefix {
- t.Errorf("Prefix = %s, want %s", os.Args[6], tt.prefix)
- }
- if os.Args[8] != tt.suffix {
- t.Errorf("Suffix = %s, want %s", os.Args[8], tt.suffix)
- }
- if os.Args[10] != string(rune(tt.concurrency+'0')) {
- t.Errorf("Concurrency = %s, want %d", os.Args[10], tt.concurrency)
- }
- })
+ t.Run(
+ tt.name, func(t *testing.T) {
+ SetupCLIArgs(tt.srcDir, tt.outFile, tt.prefix, tt.suffix, tt.concurrency)
+ verifySetupCLIArgs(t, tt.srcDir, tt.outFile, tt.prefix, tt.suffix, tt.concurrency, tt.wantLen)
+ },
+ )
+ }
+}
+
+// verifySetupCLIArgs verifies that CLI arguments are set correctly.
+func verifySetupCLIArgs(t *testing.T, srcDir, outFile, prefix, suffix string, concurrency, wantLen int) {
+ t.Helper()
+
+ if len(os.Args) != wantLen {
+ t.Errorf("os.Args length = %d, want %d", len(os.Args), wantLen)
+ }
+
+ // Verify specific args
+ if os.Args[0] != "gibidify" {
+ t.Errorf("Program name = %s, want gibidify", os.Args[0])
+ }
+ if os.Args[2] != srcDir {
+ t.Errorf("Source dir = %s, want %s", os.Args[2], srcDir)
+ }
+ if os.Args[4] != outFile {
+ t.Errorf("Output file = %s, want %s", os.Args[4], outFile)
+ }
+ if os.Args[6] != prefix {
+ t.Errorf("Prefix = %s, want %s", os.Args[6], prefix)
+ }
+ if os.Args[8] != suffix {
+ t.Errorf("Suffix = %s, want %s", os.Args[8], suffix)
+ }
+ if os.Args[10] != string(rune(concurrency+'0')) {
+ t.Errorf("Concurrency = %s, want %d", os.Args[10], concurrency)
+ }
+
+ // Verify the -no-ui flag is present
+ if os.Args[11] != "-no-ui" {
+ t.Errorf("NoUI flag = %s, want -no-ui", os.Args[11])
}
}
diff --git a/testutil/directory_structure_test.go b/testutil/directory_structure_test.go
new file mode 100644
index 0000000..c19b56e
--- /dev/null
+++ b/testutil/directory_structure_test.go
@@ -0,0 +1,495 @@
+package testutil
+
+import (
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "github.com/ivuorinen/gibidify/shared"
+)
+
+// verifySingleDirectoryFiles verifies single directory with files test case.
+func verifySingleDirectoryFiles(t *testing.T, rootDir string, _ []string) {
+ t.Helper()
+
+ srcDir := filepath.Join(rootDir, "src")
+ if _, err := os.Stat(srcDir); err != nil {
+ t.Errorf("Directory %s should exist", srcDir)
+ }
+
+ mainFile := filepath.Join(srcDir, shared.TestFileMainGo)
+ content, err := os.ReadFile(mainFile)
+ if err != nil {
+ t.Errorf("Failed to read %s: %v", shared.TestFileMainGo, err)
+ }
+ if string(content) != shared.LiteralPackageMain {
+ t.Errorf("%s content = %q, want %q", shared.TestFileMainGo, content, shared.LiteralPackageMain)
+ }
+
+ utilsFile := filepath.Join(srcDir, shared.TestFileSharedGo)
+ content, err = os.ReadFile(utilsFile)
+ if err != nil {
+ t.Errorf("Failed to read shared.go: %v", err)
+ }
+ if string(content) != shared.TestSharedGoContent {
+ t.Errorf("shared.go content = %q, want %q", content, shared.TestSharedGoContent)
+ }
+}
+
+// verifyMultipleDirectories verifies multiple directories with nested structure.
+func verifyMultipleDirectories(t *testing.T, rootDir string, _ []string) {
+ t.Helper()
+
+ expectedDirs := []string{
+ filepath.Join(rootDir, "src"),
+ filepath.Join(rootDir, "src", "handlers"),
+ filepath.Join(rootDir, "test"),
+ }
+ for _, dir := range expectedDirs {
+ if info, err := os.Stat(dir); err != nil {
+ t.Errorf(shared.TestFmtDirectoryShouldExist, dir, err)
+ } else if !info.IsDir() {
+ t.Errorf(shared.TestFmtPathShouldBeDirectory, dir)
+ }
+ }
+
+ handlerFile := filepath.Join(rootDir, "src", "handlers", "handler.go")
+ content, err := os.ReadFile(handlerFile)
+ if err != nil {
+ t.Errorf("Failed to read handler.go: %v", err)
+ }
+ if string(content) != shared.TestContentPackageHandlers {
+ t.Errorf("handler.go content = %q, want 'package handlers'", content)
+ }
+}
+
+// verifyEmptyDirectory verifies directory with no files.
+func verifyEmptyDirectory(t *testing.T, rootDir string, _ []string) {
+ t.Helper()
+
+ emptyDir := filepath.Join(rootDir, "empty")
+ if info, err := os.Stat(emptyDir); err != nil {
+ t.Errorf(shared.TestFmtDirectoryShouldExist, emptyDir, err)
+ } else if !info.IsDir() {
+ t.Errorf(shared.TestFmtPathShouldBeDirectory, emptyDir)
+ }
+}
+
+// verifySpecialCharacters verifies directories with special characters.
+func verifySpecialCharacters(t *testing.T, rootDir string, _ []string) {
+ t.Helper()
+
+ specialDir := filepath.Join(rootDir, "special-dir_123")
+ if _, err := os.Stat(specialDir); err != nil {
+ t.Errorf("Special directory should exist: %v", err)
+ }
+
+ spacedDir := filepath.Join(rootDir, "dir with spaces")
+ if _, err := os.Stat(spacedDir); err != nil {
+ t.Errorf("Spaced directory should exist: %v", err)
+ }
+
+ spacedFile := filepath.Join(spacedDir, "file with spaces.txt")
+ content, err := os.ReadFile(spacedFile)
+ if err != nil {
+ t.Errorf("Failed to read spaced file: %v", err)
+ }
+ if string(content) != "spaced content" {
+ t.Errorf("Spaced file content = %q, want 'spaced content'", content)
+ }
+}
+
+// runCreateDirectoryTest runs a single create directory structure test.
+func runCreateDirectoryTest(
+ t *testing.T,
+ dirSpecs []DirSpec,
+ wantPaths int,
+ verifyFunc func(t *testing.T, rootDir string, createdPaths []string),
+) {
+ t.Helper()
+
+ rootDir := t.TempDir()
+ createdPaths := CreateTestDirectoryStructure(t, rootDir, dirSpecs)
+
+ if len(createdPaths) != wantPaths {
+ t.Errorf("Created %d paths, want %d", len(createdPaths), wantPaths)
+ }
+
+ for _, path := range createdPaths {
+ if _, err := os.Stat(path); err != nil {
+ t.Errorf("Created path %s should exist: %v", path, err)
+ }
+ }
+
+ verifyFunc(t, rootDir, createdPaths)
+}
+
+// TestCreateTestDirectoryStructure tests the CreateTestDirectoryStructure function.
+func TestCreateTestDirectoryStructure(t *testing.T) {
+ tests := []struct {
+ name string
+ dirSpecs []DirSpec
+ wantPaths int
+ verifyFunc func(t *testing.T, rootDir string, createdPaths []string)
+ }{
+ {
+ name: "single directory with files",
+ dirSpecs: []DirSpec{
+ {
+ Path: "src",
+ Files: []FileSpec{
+ {Name: shared.TestFileMainGo, Content: shared.LiteralPackageMain},
+ {Name: shared.TestFileSharedGo, Content: shared.TestSharedGoContent},
+ },
+ },
+ },
+ wantPaths: 3, // 1 directory + 2 files
+ verifyFunc: verifySingleDirectoryFiles,
+ },
+ {
+ name: "multiple directories with nested structure",
+ dirSpecs: []DirSpec{
+ {
+ Path: "src",
+ Files: []FileSpec{
+ {Name: shared.TestFileMainGo, Content: shared.LiteralPackageMain},
+ },
+ },
+ {
+ Path: "src/handlers",
+ Files: []FileSpec{
+ {Name: "handler.go", Content: shared.TestContentPackageHandlers},
+ {Name: "middleware.go", Content: "package handlers\n\ntype Middleware struct {}"},
+ },
+ },
+ {
+ Path: "test",
+ Files: []FileSpec{
+ {Name: "main_test.go", Content: "package main\n\nimport \"testing\""},
+ },
+ },
+ },
+ wantPaths: 7, // 3 directories + 4 files
+ verifyFunc: verifyMultipleDirectories,
+ },
+ {
+ name: "directory with no files",
+ dirSpecs: []DirSpec{
+ {
+ Path: "empty",
+ Files: []FileSpec{},
+ },
+ },
+ wantPaths: 1, // 1 directory only
+ verifyFunc: verifyEmptyDirectory,
+ },
+ {
+ name: "empty directory specs",
+ dirSpecs: []DirSpec{},
+ wantPaths: 0,
+ verifyFunc: func(t *testing.T, _ string, _ []string) {
+ t.Helper()
+ // Nothing to verify for empty specs
+ },
+ },
+ {
+ name: "directories with special characters and edge cases",
+ dirSpecs: []DirSpec{
+ {
+ Path: "special-dir_123",
+ Files: []FileSpec{
+ {Name: "file-with-dashes.txt", Content: "content"},
+ {Name: "file_with_underscores.go", Content: "package main"},
+ },
+ },
+ {
+ Path: "dir with spaces",
+ Files: []FileSpec{
+ {Name: "file with spaces.txt", Content: "spaced content"},
+ },
+ },
+ },
+ wantPaths: 5, // 2 directories + 3 files
+ verifyFunc: verifySpecialCharacters,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(
+ tt.name, func(t *testing.T) {
+ runCreateDirectoryTest(t, tt.dirSpecs, tt.wantPaths, tt.verifyFunc)
+ },
+ )
+ }
+}
+
+// verifyBasicDirectoryStructure verifies basic directory structure.
+func verifyBasicDirectoryStructure(t *testing.T, rootDir string) {
+ t.Helper()
+
+ if !strings.Contains(rootDir, os.TempDir()) {
+ t.Errorf("Root directory %s should be in temp directory", rootDir)
+ }
+
+ appDir := filepath.Join(rootDir, "app")
+ if info, err := os.Stat(appDir); err != nil {
+ t.Errorf("App directory should exist: %v", err)
+ } else if !info.IsDir() {
+ t.Error("App path should be a directory")
+ }
+
+ mainFile := filepath.Join(appDir, shared.TestFileMainGo)
+ content, err := os.ReadFile(mainFile)
+ if err != nil {
+ t.Errorf("Failed to read %s: %v", shared.TestFileMainGo, err)
+ }
+ expectedMain := "package main\n\nfunc main() {}"
+ if string(content) != expectedMain {
+ t.Errorf("%s content = %q, want %q", shared.TestFileMainGo, content, expectedMain)
+ }
+
+ configFile := filepath.Join(appDir, shared.TestFileConfigJSON)
+ content, err = os.ReadFile(configFile)
+ if err != nil {
+ t.Errorf("Failed to read %s: %v", shared.TestFileConfigJSON, err)
+ }
+ if string(content) != `{"debug": true}` {
+ t.Errorf("%s content = %q, want %q", shared.TestFileConfigJSON, content, `{"debug": true}`)
+ }
+
+ docsDir := filepath.Join(rootDir, "docs")
+ if info, err := os.Stat(docsDir); err != nil {
+ t.Errorf("Docs directory should exist: %v", err)
+ } else if !info.IsDir() {
+ t.Error("Docs path should be a directory")
+ }
+
+ readmeFile := filepath.Join(docsDir, shared.TestFileReadmeMD)
+ content, err = os.ReadFile(readmeFile)
+ if err != nil {
+ t.Errorf("Failed to read %s: %v", shared.TestFileReadmeMD, err)
+ }
+ if string(content) != shared.TestContentDocumentation {
+ t.Errorf("%s content = %q, want '# Documentation'", shared.TestFileReadmeMD, content)
+ }
+}
+
+// verifyEmptyDirectorySpecs verifies empty directory specs.
+func verifyEmptyDirectorySpecs(t *testing.T, rootDir string) {
+ t.Helper()
+
+ if info, err := os.Stat(rootDir); err != nil {
+ t.Errorf("Root directory should exist: %v", err)
+ } else if !info.IsDir() {
+ t.Error("Root path should be a directory")
+ }
+
+ entries, err := os.ReadDir(rootDir)
+ if err != nil {
+ t.Errorf("Failed to read root directory: %v", err)
+ }
+ if len(entries) != 0 {
+ t.Errorf("Root directory should be empty, but has %d entries", len(entries))
+ }
+}
+
+// verifyComplexNestedStructure verifies complex nested structure.
+func verifyComplexNestedStructure(t *testing.T, rootDir string) {
+ t.Helper()
+
+ deepPath := filepath.Join(rootDir, "project", "internal", "handlers", "auth.go")
+ content, err := os.ReadFile(deepPath)
+ if err != nil {
+ t.Errorf("Failed to read deep nested file: %v", err)
+ }
+ expectedContent := "package handlers\n\ntype AuthHandler struct{}"
+ if string(content) != expectedContent {
+ t.Errorf("Deep nested file content = %q, want %q", content, expectedContent)
+ }
+
+ expectedDirs := []string{
+ "project",
+ "project/cmd",
+ "project/cmd/server",
+ "project/internal",
+ "project/internal/handlers",
+ "project/test",
+ "project/test/integration",
+ }
+ for _, dir := range expectedDirs {
+ fullPath := filepath.Join(rootDir, dir)
+ if info, err := os.Stat(fullPath); err != nil {
+ t.Errorf(shared.TestFmtDirectoryShouldExist, fullPath, err)
+ } else if !info.IsDir() {
+ t.Errorf(shared.TestFmtPathShouldBeDirectory, fullPath)
+ }
+ }
+}
+
+// runSetupTempDirTest runs a single setup temp dir test.
+func runSetupTempDirTest(t *testing.T, dirSpecs []DirSpec, verifyFunc func(t *testing.T, rootDir string)) {
+ t.Helper()
+
+ rootDir := SetupTempDirWithStructure(t, dirSpecs)
+
+ if info, err := os.Stat(rootDir); err != nil {
+ t.Fatalf("Root directory should exist: %v", err)
+ } else if !info.IsDir() {
+ t.Fatal("Root path should be a directory")
+ }
+
+ verifyFunc(t, rootDir)
+}
+
+// TestSetupTempDirWithStructure tests the SetupTempDirWithStructure function.
+func TestSetupTempDirWithStructure(t *testing.T) {
+ tests := []struct {
+ name string
+ dirSpecs []DirSpec
+ verifyFunc func(t *testing.T, rootDir string)
+ }{
+ {
+ name: "basic directory structure",
+ dirSpecs: []DirSpec{
+ {
+ Path: "app",
+ Files: []FileSpec{
+ {Name: shared.TestFileMainGo, Content: "package main\n\nfunc main() {}"},
+ {Name: shared.TestFileConfigJSON, Content: `{"debug": true}`},
+ },
+ },
+ {
+ Path: "docs",
+ Files: []FileSpec{
+ {Name: shared.TestFileReadmeMD, Content: shared.TestContentDocumentation},
+ },
+ },
+ },
+ verifyFunc: verifyBasicDirectoryStructure,
+ },
+ {
+ name: "empty directory specs",
+ dirSpecs: []DirSpec{},
+ verifyFunc: verifyEmptyDirectorySpecs,
+ },
+ {
+ name: "complex nested structure",
+ dirSpecs: []DirSpec{
+ {
+ Path: "project",
+ Files: []FileSpec{
+ {Name: "go.mod", Content: "module test\n\ngo 1.21"},
+ },
+ },
+ {
+ Path: "project/cmd/server",
+ Files: []FileSpec{
+ {Name: shared.TestFileMainGo, Content: shared.LiteralPackageMain},
+ },
+ },
+ {
+ Path: "project/internal/handlers",
+ Files: []FileSpec{
+ {Name: "health.go", Content: shared.TestContentPackageHandlers},
+ {Name: "auth.go", Content: "package handlers\n\ntype AuthHandler struct{}"},
+ },
+ },
+ {
+ Path: "project/test/integration",
+ Files: []FileSpec{
+ {Name: "server_test.go", Content: "package integration\n\nimport \"testing\""},
+ },
+ },
+ },
+ verifyFunc: verifyComplexNestedStructure,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(
+ tt.name, func(t *testing.T) {
+ runSetupTempDirTest(t, tt.dirSpecs, tt.verifyFunc)
+ },
+ )
+ }
+}
+
+// benchmarkDirectoryStructure benchmarks creation of a single directory structure.
+func benchmarkDirectoryStructure(b *testing.B, dirSpecs []DirSpec) {
+ b.Helper()
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ b.StopTimer()
+ rootDir := b.TempDir()
+ b.StartTimer()
+
+ for _, dirSpec := range dirSpecs {
+ dirPath := filepath.Join(rootDir, dirSpec.Path)
+ if err := os.MkdirAll(dirPath, shared.TestDirPermission); err != nil {
+ b.Fatalf("Failed to create directory: %v", err)
+ }
+
+ for _, fileSpec := range dirSpec.Files {
+ filePath := filepath.Join(dirPath, fileSpec.Name)
+ if err := os.WriteFile(filePath, []byte(fileSpec.Content), shared.TestFilePermission); err != nil {
+ b.Fatalf("Failed to create file: %v", err)
+ }
+ }
+ }
+ }
+}
+
+// BenchmarkDirectoryCreation benchmarks directory structure creation with different specs.
+func BenchmarkDirectoryCreation(b *testing.B) {
+ testCases := []struct {
+ name string
+ dirSpecs []DirSpec
+ }{
+ {
+ name: "simple_source_structure",
+ dirSpecs: []DirSpec{
+ {
+ Path: "src",
+ Files: []FileSpec{
+ {Name: shared.TestFileMainGo, Content: shared.LiteralPackageMain},
+ {Name: shared.TestFileSharedGo, Content: shared.TestSharedGoContent},
+ },
+ },
+ {
+ Path: "test",
+ Files: []FileSpec{
+ {Name: "main_test.go", Content: "package main\n\nimport \"testing\""},
+ },
+ },
+ },
+ },
+ {
+ name: "application_structure",
+ dirSpecs: []DirSpec{
+ {
+ Path: "app",
+ Files: []FileSpec{
+ {Name: shared.TestFileMainGo, Content: shared.LiteralPackageMain},
+ {Name: shared.TestFileConfigJSON, Content: `{"debug": true}`},
+ },
+ },
+ {
+ Path: "docs",
+ Files: []FileSpec{
+ {Name: shared.TestFileReadmeMD, Content: shared.TestContentDocumentation},
+ },
+ },
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ b.Run(
+ tc.name, func(b *testing.B) {
+ benchmarkDirectoryStructure(b, tc.dirSpecs)
+ },
+ )
+ }
+}
diff --git a/testutil/error_scenarios_test.go b/testutil/error_scenarios_test.go
new file mode 100644
index 0000000..23e09d4
--- /dev/null
+++ b/testutil/error_scenarios_test.go
@@ -0,0 +1,314 @@
+package testutil
+
+import (
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "github.com/ivuorinen/gibidify/shared"
+)
+
+// testResetViperConfigVariations tests ResetViperConfig with different paths.
+func testResetViperConfigVariations(t *testing.T) {
+ t.Helper()
+
+ testCases := []string{
+ "", // Empty path
+ "/nonexistent/path", // Non-existent path
+ t.TempDir(), // Valid temporary directory
+ }
+
+ for _, configPath := range testCases {
+ t.Run(
+ "path_"+strings.ReplaceAll(configPath, "/", "_"), func(t *testing.T) {
+ ResetViperConfig(t, configPath)
+ },
+ )
+ }
+}
+
+// testGetBaseNameEdgeCases tests GetBaseName with various edge cases.
+func testGetBaseNameEdgeCases(t *testing.T) {
+ t.Helper()
+
+ edgeCases := []struct {
+ input string
+ expected string
+ }{
+ {"", "."},
+ {".", "."},
+ {"..", ".."},
+ {"/", "/"},
+ {"//", "/"},
+ {"///", "/"},
+ {"file", "file"},
+ {"./file", "file"},
+ {"../file", "file"},
+ {"/a", "a"},
+ {"/a/", "a"},
+ {"/a//", "a"},
+ {"a/b/c", "c"},
+ {"a/b/c/", "c"},
+ }
+
+ for _, tc := range edgeCases {
+ result := BaseName(tc.input)
+ expected := filepath.Base(tc.input)
+ if result != expected {
+ t.Errorf("BaseName(%q) = %q, want %q", tc.input, result, expected)
+ }
+ }
+}
+
+// testVerifyContentContainsScenarios tests VerifyContentContains scenarios.
+func testVerifyContentContainsScenarios(t *testing.T) {
+ t.Helper()
+
+ scenarios := []struct {
+ name string
+ content string
+ expected []string
+ }{
+ {
+ "all_substrings_found",
+ "This is a comprehensive test with multiple search terms",
+ []string{"comprehensive", "test", "multiple", "search"},
+ },
+ {"empty_expected_list", "Any content here", []string{}},
+ {"single_character_matches", "abcdefg", []string{"a", "c", "g"}},
+ {"repeated_substrings", "test test test", []string{"test", "test", "test"}},
+ {"case_sensitive_matching", "Test TEST tEsT", []string{"Test", "TEST"}},
+ }
+
+ for _, scenario := range scenarios {
+ t.Run(
+ scenario.name, func(t *testing.T) {
+ VerifyContentContains(t, scenario.content, scenario.expected)
+ },
+ )
+ }
+}
+
+// testMustSucceedCases tests MustSucceed with various operations.
+func testMustSucceedCases(t *testing.T) {
+ t.Helper()
+
+ operations := []string{
+ "simple operation",
+ "",
+ "operation with special chars: !@#$%",
+ "very " + strings.Repeat("long ", 50) + "operation name",
+ }
+
+ for i, op := range operations {
+ t.Run(
+ "operation_"+string(rune(i+'a')), func(t *testing.T) {
+ MustSucceed(t, nil, op)
+ },
+ )
+ }
+}
+
+// testCloseFileScenarios tests CloseFile with different file scenarios.
+func testCloseFileScenarios(t *testing.T) {
+ t.Helper()
+
+ t.Run(
+ "close_regular_file", func(t *testing.T) {
+ file, err := os.CreateTemp(t.TempDir(), "test")
+ if err != nil {
+ t.Fatalf("Failed to create temp file: %v", err)
+ }
+
+ if _, err = file.WriteString("test content"); err != nil {
+ t.Fatalf("Failed to write to file: %v", err)
+ }
+
+ CloseFile(t, file)
+
+ if _, writeErr := file.Write([]byte("should fail")); writeErr == nil {
+ t.Error("Expected write to fail after close")
+ }
+ },
+ )
+
+ t.Run(
+ "close_empty_file", func(t *testing.T) {
+ file, err := os.CreateTemp(t.TempDir(), "empty")
+ if err != nil {
+ t.Fatalf("Failed to create temp file: %v", err)
+ }
+ CloseFile(t, file)
+ },
+ )
+}
+
+// TestCoverageImprovements focuses on improving coverage for existing functions.
+func TestCoverageImprovements(t *testing.T) {
+ t.Run("ResetViperConfig_variations", testResetViperConfigVariations)
+ t.Run("GetBaseName_comprehensive", testGetBaseNameEdgeCases)
+ t.Run("VerifyContentContains_comprehensive", testVerifyContentContainsScenarios)
+ t.Run("MustSucceed_success_cases", testMustSucceedCases)
+ t.Run("CloseFile_success_cases", testCloseFileScenarios)
+}
+
+// attemptFileCreation attempts to create a file with error handling.
+func attemptFileCreation(t *testing.T, tempDir, specName string) {
+ t.Helper()
+
+ defer func() {
+ if r := recover(); r != nil {
+ t.Logf("File creation panicked (expected for some edge cases): %v", r)
+ }
+ }()
+
+ if _, err := os.Create(filepath.Join(tempDir, specName)); err != nil {
+ t.Logf("File creation failed (expected for some edge cases): %v", err)
+ }
+}
+
+// createDirectoryIfNeeded creates directory if file path contains separators.
+func createDirectoryIfNeeded(t *testing.T, tempDir, specName string) {
+ t.Helper()
+
+ if strings.Contains(specName, "/") || strings.Contains(specName, "\\") {
+ dirPath := filepath.Dir(filepath.Join(tempDir, specName))
+ if err := os.MkdirAll(dirPath, shared.TestDirPermission); err != nil {
+ t.Skipf("Cannot create directory %s: %v", dirPath, err)
+ }
+ }
+}
+
+// testFileSpecVariations tests FileSpec with various edge cases.
+func testFileSpecVariations(t *testing.T) {
+ t.Helper()
+
+ specs := []FileSpec{
+ {Name: "", Content: ""},
+ {Name: "simple.txt", Content: "simple content"},
+ {Name: "with spaces.txt", Content: "content with spaces"},
+ {Name: "unicode-file-αβγ.txt", Content: "unicode content: αβγδε"},
+ {Name: "very-long-filename-" + strings.Repeat("x", 100) + ".txt", Content: "long filename test"},
+ {Name: "file.with.many.dots.txt", Content: "dotted filename"},
+ {Name: "special/chars\\file<>:\"|?*.txt", Content: "special characters"},
+ }
+
+ tempDir := t.TempDir()
+
+ for i, spec := range specs {
+ t.Run(
+ "spec_"+string(rune(i+'a')), func(t *testing.T) {
+ createDirectoryIfNeeded(t, tempDir, spec.Name)
+ attemptFileCreation(t, tempDir, spec.Name)
+ },
+ )
+ }
+}
+
+// testDirSpecVariations tests DirSpec with various configurations.
+func testDirSpecVariations(t *testing.T) {
+ t.Helper()
+
+ specs := []DirSpec{
+ {Path: "empty-dir", Files: []FileSpec{}},
+ {Path: "single-file-dir", Files: []FileSpec{{Name: "single.txt", Content: "single file"}}},
+ {
+ Path: "multi-file-dir", Files: []FileSpec{
+ {Name: "file1.txt", Content: "content1"},
+ {Name: "file2.txt", Content: "content2"},
+ {Name: "file3.txt", Content: "content3"},
+ },
+ },
+ {Path: "nested/deep/structure", Files: []FileSpec{{Name: "deep.txt", Content: "deep content"}}},
+ {Path: "unicode-αβγ", Files: []FileSpec{{Name: "unicode-file.txt", Content: "unicode directory content"}}},
+ }
+
+ tempDir := t.TempDir()
+ createdPaths := CreateTestDirectoryStructure(t, tempDir, specs)
+
+ if len(createdPaths) == 0 && len(specs) > 0 {
+ t.Error("Expected some paths to be created")
+ }
+
+ for _, path := range createdPaths {
+ if _, err := os.Stat(path); err != nil {
+ t.Errorf("Created path should exist: %s, error: %v", path, err)
+ }
+ }
+}
+
+// TestStructOperations tests operations with FileSpec and DirSpec.
+func TestStructOperations(t *testing.T) {
+ t.Run("FileSpec_comprehensive", testFileSpecVariations)
+ t.Run("DirSpec_comprehensive", testDirSpecVariations)
+}
+
+// BenchmarkUtilityFunctions provides comprehensive benchmarks.
+func BenchmarkUtilityFunctions(b *testing.B) {
+ b.Run(
+ "GetBaseName_variations", func(b *testing.B) {
+ paths := []string{
+ "",
+ "simple.txt",
+ "/path/to/file.go",
+ "/very/deep/nested/path/to/file.json",
+ "relative/path/file.txt",
+ ".",
+ "..",
+ "/",
+ strings.Repeat("/very/long/path", 10) + "/file.txt",
+ }
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ path := paths[i%len(paths)]
+ _ = BaseName(path)
+ }
+ },
+ )
+
+ b.Run(
+ "StringOperations", func(b *testing.B) {
+ content := strings.Repeat("benchmark content with search terms ", 100)
+ searchTerms := []string{"benchmark", "content", "search", "terms", "not found"}
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ term := searchTerms[i%len(searchTerms)]
+ _ = strings.Contains(content, term)
+ }
+ },
+ )
+
+ b.Run(
+ "FileSpec_creation", func(b *testing.B) {
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ spec := FileSpec{
+ Name: "benchmark-file-" + string(rune(i%26+'a')) + ".txt",
+ Content: "benchmark content for iteration " + string(rune(i%10+'0')),
+ }
+ _ = len(spec.Name)
+ _ = len(spec.Content)
+ }
+ },
+ )
+
+ b.Run(
+ "DirSpec_creation", func(b *testing.B) {
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ spec := DirSpec{
+ Path: "benchmark-dir-" + string(rune(i%26+'a')),
+ Files: []FileSpec{
+ {Name: "file1.txt", Content: "content1"},
+ {Name: "file2.txt", Content: "content2"},
+ },
+ }
+ _ = len(spec.Path)
+ _ = len(spec.Files)
+ }
+ },
+ )
+}
diff --git a/testutil/file_creation_test.go b/testutil/file_creation_test.go
index 8669f67..00ed5a4 100644
--- a/testutil/file_creation_test.go
+++ b/testutil/file_creation_test.go
@@ -5,10 +5,31 @@ import (
"path/filepath"
"strings"
"testing"
+
+ "github.com/ivuorinen/gibidify/shared"
)
func TestCreateTestFile(t *testing.T) {
- tests := []struct {
+ tests := createTestFileTestCases()
+
+ for _, tt := range tests {
+ t.Run(
+ tt.name, func(t *testing.T) {
+ runCreateTestFileTest(t, tt.dir, tt.filename, tt.content)
+ },
+ )
+ }
+}
+
+// createTestFileTestCases creates test cases for TestCreateTestFile.
+func createTestFileTestCases() []struct {
+ name string
+ dir string
+ filename string
+ content []byte
+ wantErr bool
+} {
+ return []struct {
name string
dir string
filename string
@@ -42,55 +63,88 @@ func TestCreateTestFile(t *testing.T) {
{
name: "create file with special characters",
filename: "special-file_123.go",
- content: []byte("package main"),
+ content: []byte(shared.LiteralPackageMain),
wantErr: false,
},
}
+}
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- // Use a temporary directory for each test
- tempDir := t.TempDir()
- if tt.dir == "" {
- tt.dir = tempDir
- }
+// runCreateTestFileTest runs a single test case for CreateTestFile.
+func runCreateTestFileTest(t *testing.T, dir, filename string, content []byte) {
+ t.Helper()
- // Create subdirectory if needed
- if strings.Contains(tt.filename, "/") {
- subdir := filepath.Join(tt.dir, filepath.Dir(tt.filename))
- if err := os.MkdirAll(subdir, DirPermission); err != nil {
- t.Fatalf("Failed to create subdirectory: %v", err)
- }
- }
+ tempDir := t.TempDir()
+ if dir == "" {
+ dir = tempDir
+ }
- // Test CreateTestFile
- filePath := CreateTestFile(t, tt.dir, tt.filename, tt.content)
+ createSubdirectoryIfNeeded(t, dir, filename)
+ filePath := CreateTestFile(t, dir, filename, content)
+ verifyCreatedFile(t, filePath, content)
+}
- // Verify file exists
- info, err := os.Stat(filePath)
- if err != nil {
- t.Fatalf("Created file does not exist: %v", err)
- }
+// createSubdirectoryIfNeeded creates subdirectory if the filename contains a path separator.
+func createSubdirectoryIfNeeded(t *testing.T, dir, filename string) {
+ t.Helper()
- // Verify it's a regular file
- if !info.Mode().IsRegular() {
- t.Errorf("Created path is not a regular file")
- }
+ if strings.ContainsRune(filename, filepath.Separator) {
+ subdir := filepath.Join(dir, filepath.Dir(filename))
+ if err := os.MkdirAll(subdir, shared.TestDirPermission); err != nil {
+ t.Fatalf("Failed to create subdirectory: %v", err)
+ }
+ }
+}
- // Verify permissions
- if info.Mode().Perm() != FilePermission {
- t.Errorf("File permissions = %v, want %v", info.Mode().Perm(), FilePermission)
- }
+// verifyCreatedFile verifies that the created file has correct properties.
+func verifyCreatedFile(t *testing.T, filePath string, expectedContent []byte) {
+ t.Helper()
- // Verify content
- readContent, err := os.ReadFile(filePath) // #nosec G304 - test file path is controlled
- if err != nil {
- t.Fatalf("Failed to read created file: %v", err)
- }
- if string(readContent) != string(tt.content) {
- t.Errorf("File content = %q, want %q", readContent, tt.content)
- }
- })
+ info := verifyFileExists(t, filePath)
+ verifyFileType(t, info)
+ verifyFilePermissions(t, info)
+ verifyFileContent(t, filePath, expectedContent)
+}
+
+// verifyFileExists verifies that the file exists and returns its info.
+func verifyFileExists(t *testing.T, filePath string) os.FileInfo {
+ t.Helper()
+
+ info, err := os.Stat(filePath)
+ if err != nil {
+ t.Fatalf("Created file does not exist: %v", err)
+ }
+
+ return info
+}
+
+// verifyFileType verifies that the file is a regular file.
+func verifyFileType(t *testing.T, info os.FileInfo) {
+ t.Helper()
+
+ if !info.Mode().IsRegular() {
+ t.Error("Created path is not a regular file")
+ }
+}
+
+// verifyFilePermissions verifies that the file has correct permissions.
+func verifyFilePermissions(t *testing.T, info os.FileInfo) {
+ t.Helper()
+
+ if info.Mode().Perm() != shared.TestFilePermission {
+ t.Errorf("File permissions = %v, want %v", info.Mode().Perm(), shared.TestFilePermission)
+ }
+}
+
+// verifyFileContent verifies that the file has the expected content.
+func verifyFileContent(t *testing.T, filePath string, expectedContent []byte) {
+ t.Helper()
+
+ readContent, err := os.ReadFile(filePath)
+ if err != nil {
+ t.Fatalf("Failed to read created file: %v", err)
+ }
+ if string(readContent) != string(expectedContent) {
+ t.Errorf("File content = %q, want %q", readContent, expectedContent)
}
}
@@ -118,37 +172,56 @@ func TestCreateTempOutputFile(t *testing.T) {
}
for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- file, path := CreateTempOutputFile(t, tt.pattern)
- defer CloseFile(t, file)
+ t.Run(
+ tt.name, func(t *testing.T) {
+ file, path := CreateTempOutputFile(t, tt.pattern)
+ defer CloseFile(t, file)
- // Verify file exists
- info, err := os.Stat(path)
- if err != nil {
- t.Fatalf("Temp file does not exist: %v", err)
- }
+ // Verify file exists
+ info, err := os.Stat(path)
+ if err != nil {
+ t.Fatalf("Temp file does not exist: %v", err)
+ }
- // Verify it's a regular file
- if !info.Mode().IsRegular() {
- t.Errorf("Created path is not a regular file")
- }
+ // Verify it's a regular file
+ if !info.Mode().IsRegular() {
+ t.Error("Created path is not a regular file")
+ }
- // Verify we can write to it
- testContent := []byte("test content")
- if _, err := file.Write(testContent); err != nil {
- t.Errorf("Failed to write to temp file: %v", err)
- }
+ // Verify we can write to it
+ testContent := []byte("test content")
+ if _, err := file.Write(testContent); err != nil {
+ t.Errorf("Failed to write to temp file: %v", err)
+ }
- // Verify the path is in a temp directory (any temp directory)
- if !strings.Contains(path, os.TempDir()) {
- t.Errorf("Temp file not in temp directory: %s", path)
- }
- })
+ // Verify the path is in a temp directory (any temp directory)
+ if !strings.Contains(path, os.TempDir()) {
+ t.Errorf("Temp file not in temp directory: %s", path)
+ }
+ },
+ )
}
}
func TestCreateTestDirectory(t *testing.T) {
- tests := []struct {
+ tests := createTestDirectoryTestCases()
+
+ for _, tt := range tests {
+ t.Run(
+ tt.name, func(t *testing.T) {
+ runCreateTestDirectoryTest(t, tt.parent, tt.dir)
+ },
+ )
+ }
+}
+
+// createTestDirectoryTestCases creates test cases for TestCreateTestDirectory.
+func createTestDirectoryTestCases() []struct {
+ name string
+ parent string
+ dir string
+} {
+ return []struct {
name string
parent string
dir string
@@ -166,53 +239,107 @@ func TestCreateTestDirectory(t *testing.T) {
dir: "nested/dir",
},
}
+}
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- tempDir := t.TempDir()
- if tt.parent == "" {
- tt.parent = tempDir
- }
+// runCreateTestDirectoryTest runs a single test case for CreateTestDirectory.
+func runCreateTestDirectoryTest(t *testing.T, parent, dir string) {
+ t.Helper()
- // For nested directories, create parent first
- if strings.Contains(tt.dir, "/") {
- parentPath := filepath.Join(tt.parent, filepath.Dir(tt.dir))
- if err := os.MkdirAll(parentPath, DirPermission); err != nil {
- t.Fatalf("Failed to create parent directory: %v", err)
- }
- tt.dir = filepath.Base(tt.dir)
- tt.parent = parentPath
- }
+ tempDir := t.TempDir()
+ if parent == "" {
+ parent = tempDir
+ }
- dirPath := CreateTestDirectory(t, tt.parent, tt.dir)
+ parent, dir = prepareNestedDirectoryPath(t, parent, dir)
+ dirPath := CreateTestDirectory(t, parent, dir)
+ verifyCreatedDirectory(t, dirPath)
+}
- // Verify directory exists
- info, err := os.Stat(dirPath)
- if err != nil {
- t.Fatalf("Created directory does not exist: %v", err)
- }
+// prepareNestedDirectoryPath prepares parent and directory paths for nested directories.
+func prepareNestedDirectoryPath(t *testing.T, parent, dir string) (parentPath, fullPath string) {
+ t.Helper()
- // Verify it's a directory
- if !info.IsDir() {
- t.Errorf("Created path is not a directory")
- }
+ if strings.Contains(dir, "/") {
+ parentPath := filepath.Join(parent, filepath.Dir(dir))
+ if err := os.MkdirAll(parentPath, shared.TestDirPermission); err != nil {
+ t.Fatalf("Failed to create parent directory: %v", err)
+ }
- // Verify permissions
- if info.Mode().Perm() != DirPermission {
- t.Errorf("Directory permissions = %v, want %v", info.Mode().Perm(), DirPermission)
- }
+ return parentPath, filepath.Base(dir)
+ }
- // Verify we can create files in it
- testFile := filepath.Join(dirPath, "test.txt")
- if err := os.WriteFile(testFile, []byte("test"), FilePermission); err != nil {
- t.Errorf("Cannot create file in directory: %v", err)
- }
- })
+ return parent, dir
+}
+
+// verifyCreatedDirectory verifies that the created directory has correct properties.
+func verifyCreatedDirectory(t *testing.T, dirPath string) {
+ t.Helper()
+
+ info := verifyDirectoryExists(t, dirPath)
+ verifyIsDirectory(t, info)
+ verifyDirectoryPermissions(t, info)
+ verifyDirectoryUsability(t, dirPath)
+}
+
+// verifyDirectoryExists verifies that the directory exists and returns its info.
+func verifyDirectoryExists(t *testing.T, dirPath string) os.FileInfo {
+ t.Helper()
+
+ info, err := os.Stat(dirPath)
+ if err != nil {
+ t.Fatalf("Created directory does not exist: %v", err)
+ }
+
+ return info
+}
+
+// verifyIsDirectory verifies that the path is a directory.
+func verifyIsDirectory(t *testing.T, info os.FileInfo) {
+ t.Helper()
+
+ if !info.IsDir() {
+ t.Error("Created path is not a directory")
+ }
+}
+
+// verifyDirectoryPermissions verifies that the directory has correct permissions.
+func verifyDirectoryPermissions(t *testing.T, info os.FileInfo) {
+ t.Helper()
+
+ if info.Mode().Perm() != shared.TestDirPermission {
+ t.Errorf("Directory permissions = %v, want %v", info.Mode().Perm(), shared.TestDirPermission)
+ }
+}
+
+// verifyDirectoryUsability verifies that files can be created in the directory.
+func verifyDirectoryUsability(t *testing.T, dirPath string) {
+ t.Helper()
+
+ testFile := filepath.Join(dirPath, "test.txt")
+ if err := os.WriteFile(testFile, []byte("test"), shared.TestFilePermission); err != nil {
+ t.Errorf("Cannot create file in directory: %v", err)
}
}
func TestCreateTestFiles(t *testing.T) {
- tests := []struct {
+ tests := createTestFilesTestCases()
+
+ for _, tt := range tests {
+ t.Run(
+ tt.name, func(t *testing.T) {
+ runTestFilesTest(t, tt.fileSpecs, tt.wantCount)
+ },
+ )
+ }
+}
+
+// createTestFilesTestCases creates test cases for TestCreateTestFiles.
+func createTestFilesTestCases() []struct {
+ name string
+ fileSpecs []FileSpec
+ wantCount int
+} {
+ return []struct {
name string
fileSpecs []FileSpec
wantCount int
@@ -221,7 +348,7 @@ func TestCreateTestFiles(t *testing.T) {
name: "create multiple files",
fileSpecs: []FileSpec{
{Name: "file1.txt", Content: "content1"},
- {Name: "file2.go", Content: "package main"},
+ {Name: "file2.go", Content: shared.LiteralPackageMain},
{Name: "file3.json", Content: `{"key": "value"}`},
},
wantCount: 3,
@@ -229,7 +356,7 @@ func TestCreateTestFiles(t *testing.T) {
{
name: "create files with subdirectories",
fileSpecs: []FileSpec{
- {Name: "src/main.go", Content: "package main"},
+ {Name: "src/main.go", Content: shared.LiteralPackageMain},
{Name: "test/test.go", Content: "package test"},
},
wantCount: 2,
@@ -248,39 +375,56 @@ func TestCreateTestFiles(t *testing.T) {
wantCount: 2,
},
}
+}
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- rootDir := t.TempDir()
+// runTestFilesTest runs a single test case for CreateTestFiles.
+func runTestFilesTest(t *testing.T, fileSpecs []FileSpec, wantCount int) {
+ t.Helper()
- // Create necessary subdirectories
- for _, spec := range tt.fileSpecs {
- if strings.Contains(spec.Name, "/") {
- subdir := filepath.Join(rootDir, filepath.Dir(spec.Name))
- if err := os.MkdirAll(subdir, DirPermission); err != nil {
- t.Fatalf("Failed to create subdirectory: %v", err)
- }
- }
+ rootDir := t.TempDir()
+
+ createNecessarySubdirectories(t, rootDir, fileSpecs)
+ createdFiles := CreateTestFiles(t, rootDir, fileSpecs)
+ verifyCreatedFilesCount(t, createdFiles, wantCount)
+ verifyCreatedFilesContent(t, createdFiles, fileSpecs)
+}
+
+// createNecessarySubdirectories creates subdirectories for file specs that need them.
+func createNecessarySubdirectories(t *testing.T, rootDir string, fileSpecs []FileSpec) {
+ t.Helper()
+
+ for _, spec := range fileSpecs {
+ if strings.Contains(spec.Name, "/") {
+ subdir := filepath.Join(rootDir, filepath.Dir(spec.Name))
+ if err := os.MkdirAll(subdir, shared.TestDirPermission); err != nil {
+ t.Fatalf("Failed to create subdirectory: %v", err)
}
-
- createdFiles := CreateTestFiles(t, rootDir, tt.fileSpecs)
-
- // Verify count
- if len(createdFiles) != tt.wantCount {
- t.Errorf("Created %d files, want %d", len(createdFiles), tt.wantCount)
- }
-
- // Verify each file
- for i, filePath := range createdFiles {
- content, err := os.ReadFile(filePath) // #nosec G304 - test file path is controlled
- if err != nil {
- t.Errorf("Failed to read file %s: %v", filePath, err)
- continue
- }
- if string(content) != tt.fileSpecs[i].Content {
- t.Errorf("File %s content = %q, want %q", filePath, content, tt.fileSpecs[i].Content)
- }
- }
- })
+ }
+ }
+}
+
+// verifyCreatedFilesCount verifies the count of created files.
+func verifyCreatedFilesCount(t *testing.T, createdFiles []string, wantCount int) {
+ t.Helper()
+
+ if len(createdFiles) != wantCount {
+ t.Errorf("Created %d files, want %d", len(createdFiles), wantCount)
+ }
+}
+
+// verifyCreatedFilesContent verifies the content of created files.
+func verifyCreatedFilesContent(t *testing.T, createdFiles []string, fileSpecs []FileSpec) {
+ t.Helper()
+
+ for i, filePath := range createdFiles {
+ content, err := os.ReadFile(filePath)
+ if err != nil {
+ t.Errorf("Failed to read file %s: %v", filePath, err)
+
+ continue
+ }
+ if string(content) != fileSpecs[i].Content {
+ t.Errorf("File %s content = %q, want %q", filePath, content, fileSpecs[i].Content)
+ }
}
}
diff --git a/testutil/testutil.go b/testutil/testutil.go
index ecc43c4..2c17067 100644
--- a/testutil/testutil.go
+++ b/testutil/testutil.go
@@ -1,7 +1,34 @@
// Package testutil provides common testing utilities and helper functions.
+//
+// Testing Patterns and Conventions:
+//
+// File Setup:
+// - Use CreateTestFile() for individual files
+// - Use CreateTestFiles() for multiple files from FileSpec
+// - Use CreateTestDirectoryStructure() for complex directory trees
+// - Use SetupTempDirWithStructure() for complete test environments
+//
+// Error Assertions:
+// - Use AssertError() for conditional error checking
+// - Use AssertNoError() when expecting success
+// - Use AssertExpectedError() when expecting failure
+// - Use AssertErrorContains() for substring validation
+//
+// Configuration:
+// - Use ResetViperConfig() to reset between tests
+// - Remember to call config.LoadConfig() after ResetViperConfig()
+//
+// Best Practices:
+// - Always use t.Helper() in test helper functions
+// - Use descriptive operation names in assertions
+// - Prefer table-driven tests for multiple scenarios
+// - Use testutil.ErrTestError for standard test errors
package testutil
import (
+ "bytes"
+ "errors"
+ "io"
"os"
"path/filepath"
"strconv"
@@ -11,22 +38,143 @@ import (
"github.com/spf13/viper"
"github.com/ivuorinen/gibidify/config"
+ "github.com/ivuorinen/gibidify/shared"
)
-const (
- // FilePermission is the default file permission for test files.
- FilePermission = 0o644
- // DirPermission is the default directory permission for test directories.
- DirPermission = 0o755
-)
+// SuppressLogs suppresses logger output during testing to keep test output clean.
+// Returns a function that should be called to restore the original log output.
+func SuppressLogs(t *testing.T) func() {
+ t.Helper()
+ logger := shared.GetLogger()
+
+ // Capture original output by temporarily setting it to discard
+ logger.SetOutput(io.Discard)
+
+ // Return function to restore original settings (stderr)
+ return func() {
+ logger.SetOutput(os.Stderr)
+ }
+}
+
+// OutputRestoreFunc represents a function that restores output after suppression.
+type OutputRestoreFunc func()
+
+// SuppressAllOutput suppresses both stdout and stderr during testing.
+// This captures all output including UI messages, progress bars, and direct prints.
+// Returns a function that should be called to restore original output.
+func SuppressAllOutput(t *testing.T) OutputRestoreFunc {
+ t.Helper()
+
+ // Save original stdout and stderr
+ originalStdout := os.Stdout
+ originalStderr := os.Stderr
+
+ // Suppress logger output as well
+ logger := shared.GetLogger()
+ logger.SetOutput(io.Discard)
+
+ // Open /dev/null for safe redirection
+ devNull, err := os.OpenFile(os.DevNull, os.O_WRONLY, 0)
+ if err != nil {
+ t.Fatalf("Failed to open devnull: %v", err)
+ }
+
+ // Redirect both stdout and stderr to /dev/null
+ os.Stdout = devNull
+ os.Stderr = devNull
+
+ // Return restore function
+ return func() {
+ // Close devNull first
+ if devNull != nil {
+ _ = devNull.Close() // Ignore close errors in cleanup
+ }
+
+ // Restore original outputs
+ os.Stdout = originalStdout
+ os.Stderr = originalStderr
+ logger.SetOutput(originalStderr)
+ }
+}
+
+// CaptureOutput captures both stdout and stderr during test execution.
+// Returns the captured output as strings and a restore function.
+func CaptureOutput(t *testing.T) (getStdout func() string, getStderr func() string, restore OutputRestoreFunc) {
+ t.Helper()
+
+ // Save original outputs
+ originalStdout := os.Stdout
+ originalStderr := os.Stderr
+
+ // Create pipes for stdout
+ stdoutReader, stdoutWriter, err := os.Pipe()
+ if err != nil {
+ t.Fatalf("Failed to create stdout pipe: %v", err)
+ }
+
+ // Create pipes for stderr
+ stderrReader, stderrWriter, err := os.Pipe()
+ if err != nil {
+ t.Fatalf("Failed to create stderr pipe: %v", err)
+ }
+
+ // Redirect outputs
+ os.Stdout = stdoutWriter
+ os.Stderr = stderrWriter
+
+ // Suppress logger output to stderr
+ logger := shared.GetLogger()
+ logger.SetOutput(stderrWriter)
+
+ // Buffers to collect output
+ var stdoutBuf, stderrBuf bytes.Buffer
+
+ // Start goroutines to read from pipes
+ stdoutDone := make(chan struct{})
+ stderrDone := make(chan struct{})
+
+ go func() {
+ defer close(stdoutDone)
+ _, _ = io.Copy(&stdoutBuf, stdoutReader) //nolint:errcheck // Ignore errors during test output capture shutdown
+ }()
+
+ go func() {
+ defer close(stderrDone)
+ _, _ = io.Copy(&stderrBuf, stderrReader) //nolint:errcheck // Ignore errors during test output capture shutdown
+ }()
+
+ return func() string {
+ return stdoutBuf.String()
+ }, func() string {
+ return stderrBuf.String()
+ }, func() {
+ // Close writers first to signal EOF
+ _ = stdoutWriter.Close() // Ignore close errors in cleanup
+ _ = stderrWriter.Close() // Ignore close errors in cleanup
+
+ // Wait for readers to finish
+ <-stdoutDone
+ <-stderrDone
+
+ // Close readers
+ _ = stdoutReader.Close() // Ignore close errors in cleanup
+ _ = stderrReader.Close() // Ignore close errors in cleanup
+
+ // Restore original outputs
+ os.Stdout = originalStdout
+ os.Stderr = originalStderr
+ logger.SetOutput(originalStderr)
+ }
+}
// CreateTestFile creates a test file with the given content and returns its path.
func CreateTestFile(t *testing.T, dir, filename string, content []byte) string {
t.Helper()
filePath := filepath.Join(dir, filename)
- if err := os.WriteFile(filePath, content, FilePermission); err != nil {
+ if err := os.WriteFile(filePath, content, shared.TestFilePermission); err != nil {
t.Fatalf("Failed to write file %s: %v", filePath, err)
}
+
return filePath
}
@@ -38,6 +186,7 @@ func CreateTempOutputFile(t *testing.T, pattern string) (file *os.File, path str
t.Fatalf("Failed to create temp output file: %v", err)
}
path = outFile.Name()
+
return outFile, path
}
@@ -45,9 +194,10 @@ func CreateTempOutputFile(t *testing.T, pattern string) (file *os.File, path str
func CreateTestDirectory(t *testing.T, parent, name string) string {
t.Helper()
dirPath := filepath.Join(parent, name)
- if err := os.Mkdir(dirPath, DirPermission); err != nil {
+ if err := os.Mkdir(dirPath, shared.TestDirPermission); err != nil {
t.Fatalf("Failed to create directory %s: %v", dirPath, err)
}
+
return dirPath
}
@@ -65,6 +215,7 @@ func CreateTestFiles(t *testing.T, rootDir string, fileSpecs []FileSpec) []strin
filePath := CreateTestFile(t, rootDir, spec.Name, []byte(spec.Content))
createdFiles = append(createdFiles, filePath)
}
+
return createdFiles
}
@@ -78,6 +229,23 @@ func ResetViperConfig(t *testing.T, configPath string) {
config.LoadConfig()
}
+// SetViperKeys sets specific configuration keys for testing.
+func SetViperKeys(t *testing.T, keyValues map[string]any) {
+ t.Helper()
+ viper.Reset()
+ for key, value := range keyValues {
+ viper.Set(key, value)
+ }
+ config.LoadConfig()
+}
+
+// ApplyBackpressureOverrides applies backpressure configuration overrides for testing.
+// This is a convenience wrapper around SetViperKeys specifically for backpressure tests.
+func ApplyBackpressureOverrides(t *testing.T, overrides map[string]any) {
+ t.Helper()
+ SetViperKeys(t, overrides)
+}
+
// SetupCLIArgs configures os.Args for CLI testing.
func SetupCLIArgs(srcDir, outFilePath, prefix, suffix string, concurrency int) {
os.Args = []string{
@@ -87,6 +255,7 @@ func SetupCLIArgs(srcDir, outFilePath, prefix, suffix string, concurrency int) {
"-prefix", prefix,
"-suffix", suffix,
"-concurrency", strconv.Itoa(concurrency),
+ "-no-ui", // Suppress UI output during tests
}
}
@@ -104,7 +273,7 @@ func VerifyContentContains(t *testing.T, content string, expectedSubstrings []st
func MustSucceed(t *testing.T, err error, operation string) {
t.Helper()
if err != nil {
- t.Fatalf("Operation %s failed: %v", operation, err)
+ t.Fatalf(shared.TestMsgOperationFailed, operation, err)
}
}
@@ -115,3 +284,130 @@ func CloseFile(t *testing.T, file *os.File) {
t.Errorf("Failed to close file: %v", err)
}
}
+
+// BaseName returns the base name of a file path (filename without directory).
+func BaseName(path string) string {
+ return filepath.Base(path)
+}
+
+// Advanced directory setup patterns.
+
+// DirSpec represents a directory specification for creating test directory structures.
+type DirSpec struct {
+ Path string
+ Files []FileSpec
+}
+
+// CreateTestDirectoryStructure creates multiple directories with files.
+func CreateTestDirectoryStructure(t *testing.T, rootDir string, dirSpecs []DirSpec) []string {
+ t.Helper()
+ createdPaths := make([]string, 0)
+
+ for _, dirSpec := range dirSpecs {
+ dirPath := filepath.Join(rootDir, dirSpec.Path)
+ if err := os.MkdirAll(dirPath, shared.TestDirPermission); err != nil {
+ t.Fatalf("Failed to create directory structure %s: %v", dirPath, err)
+ }
+ createdPaths = append(createdPaths, dirPath)
+
+ // Create files in the directory
+ for _, fileSpec := range dirSpec.Files {
+ filePath := CreateTestFile(t, dirPath, fileSpec.Name, []byte(fileSpec.Content))
+ createdPaths = append(createdPaths, filePath)
+ }
+ }
+
+ return createdPaths
+}
+
+// SetupTempDirWithStructure creates a temp directory with a structured layout.
+func SetupTempDirWithStructure(t *testing.T, dirSpecs []DirSpec) string {
+ t.Helper()
+ rootDir := t.TempDir()
+ CreateTestDirectoryStructure(t, rootDir, dirSpecs)
+
+ return rootDir
+}
+
+// Error assertion helpers - safe to use across packages.
+
+// AssertError checks if an error matches the expected state.
+// If wantErr is true, expects err to be non-nil.
+// If wantErr is false, expects err to be nil and fails if it's not.
+func AssertError(t *testing.T, err error, wantErr bool, operation string) {
+ t.Helper()
+ if (err != nil) != wantErr {
+ if wantErr {
+ t.Errorf(shared.TestMsgOperationNoError, operation)
+ } else {
+ t.Errorf("Operation %s unexpected error: %v", operation, err)
+ }
+ }
+}
+
+// AssertNoError fails the test if err is not nil.
+func AssertNoError(t *testing.T, err error, operation string) {
+ t.Helper()
+ if err != nil {
+ t.Errorf(shared.TestMsgOperationFailed, operation, err)
+ }
+}
+
+// AssertExpectedError fails the test if err is nil when an error is expected.
+func AssertExpectedError(t *testing.T, err error, operation string) {
+ t.Helper()
+ if err == nil {
+ t.Errorf(shared.TestMsgOperationNoError, operation)
+ }
+}
+
+// AssertErrorContains checks that error contains the expected substring.
+func AssertErrorContains(t *testing.T, err error, expectedSubstring, operation string) {
+ t.Helper()
+ if err == nil {
+ t.Errorf("Operation %s expected error containing %q but got none", operation, expectedSubstring)
+
+ return
+ }
+ if !strings.Contains(err.Error(), expectedSubstring) {
+ t.Errorf("Operation %s error %q should contain %q", operation, err.Error(), expectedSubstring)
+ }
+}
+
+// ValidateErrorCase checks error expectations and optionally validates error message content.
+// This is a comprehensive helper that combines error checking with substring matching.
+func ValidateErrorCase(t *testing.T, err error, wantErr bool, errContains string, operation string) {
+ t.Helper()
+ if wantErr {
+ if err == nil {
+ t.Errorf("%s: expected error but got none", operation)
+
+ return
+ }
+ if errContains != "" && !strings.Contains(err.Error(), errContains) {
+ t.Errorf("%s: expected error containing %q, got: %v", operation, errContains, err)
+ }
+ } else {
+ if err != nil {
+ t.Errorf("%s: unexpected error: %v", operation, err)
+ }
+ }
+}
+
+// VerifyStructuredError validates StructuredError properties.
+// This helper ensures structured errors have the expected Type and Code values.
+func VerifyStructuredError(t *testing.T, err error, expectedType shared.ErrorType, expectedCode string) {
+ t.Helper()
+ var structErr *shared.StructuredError
+ if !errors.As(err, &structErr) {
+ t.Errorf("expected StructuredError, got: %T", err)
+
+ return
+ }
+ if structErr.Type != expectedType {
+ t.Errorf("expected Type %v, got %v", expectedType, structErr.Type)
+ }
+ if structErr.Code != expectedCode {
+ t.Errorf("expected Code %q, got %q", expectedCode, structErr.Code)
+ }
+}
diff --git a/testutil/utility_test.go b/testutil/utility_test.go
new file mode 100644
index 0000000..a6f8c48
--- /dev/null
+++ b/testutil/utility_test.go
@@ -0,0 +1,119 @@
+package testutil
+
+import (
+ "path/filepath"
+ "testing"
+)
+
+// TestGetBaseName tests the GetBaseName utility function.
+func TestBaseName(t *testing.T) {
+ tests := []struct {
+ name string
+ path string
+ expected string
+ }{
+ {
+ name: "simple filename",
+ path: "test.txt",
+ expected: "test.txt",
+ },
+ {
+ name: "absolute path",
+ path: "/path/to/file.go",
+ expected: "file.go",
+ },
+ {
+ name: "relative path",
+ path: "src/main.go",
+ expected: "main.go",
+ },
+ {
+ name: "nested path",
+ path: "/deep/nested/path/to/file.json",
+ expected: "file.json",
+ },
+ {
+ name: "path with trailing slash",
+ path: "/path/to/dir/",
+ expected: "dir",
+ },
+ {
+ name: "empty path",
+ path: "",
+ expected: ".",
+ },
+ {
+ name: "root path",
+ path: "/",
+ expected: "/",
+ },
+ {
+ name: "current directory",
+ path: ".",
+ expected: ".",
+ },
+ {
+ name: "parent directory",
+ path: "..",
+ expected: "..",
+ },
+ {
+ name: "hidden file",
+ path: "/path/to/.hidden",
+ expected: ".hidden",
+ },
+ {
+ name: "file with multiple dots",
+ path: "/path/file.test.go",
+ expected: "file.test.go",
+ },
+ {
+ name: "windows-style path",
+ path: "C:\\Windows\\System32\\file.dll",
+ expected: filepath.Base("C:\\Windows\\System32\\file.dll"), // Platform-specific result
+ },
+ {
+ name: "mixed path separators",
+ path: "/path\\to/file.txt",
+ expected: "file.txt",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(
+ tt.name, func(t *testing.T) {
+ result := BaseName(tt.path)
+ if result != tt.expected {
+ t.Errorf("BaseName(%q) = %q, want %q", tt.path, result, tt.expected)
+ }
+
+ // Also verify against Go's filepath.Base for consistency
+ expected := filepath.Base(tt.path)
+ if result != expected {
+ t.Errorf(
+ "BaseName(%q) = %q, filepath.Base = %q, should be consistent",
+ tt.path, result, expected,
+ )
+ }
+ },
+ )
+ }
+}
+
+// BenchmarkGetBaseName benchmarks the GetBaseName function.
+func BenchmarkBaseName(b *testing.B) {
+ testPaths := []string{
+ "simple.txt",
+ "/path/to/file.go",
+ "/very/deep/nested/path/to/some/file.json",
+ "../relative/path.txt",
+ "",
+ "/",
+ }
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ path := testPaths[i%len(testPaths)]
+ _ = BaseName(path)
+ }
+}
diff --git a/testutil/verification_test.go b/testutil/verification_test.go
index 6357e3f..894565a 100644
--- a/testutil/verification_test.go
+++ b/testutil/verification_test.go
@@ -8,100 +8,118 @@ import (
func TestVerifyContentContains(t *testing.T) {
// Test successful verification
- t.Run("all substrings present", func(t *testing.T) {
- content := "This is a test file with multiple lines"
- VerifyContentContains(t, content, []string{"test file", "multiple lines"})
- // If we get here, the test passed
- })
+ t.Run(
+ "all substrings present", func(t *testing.T) {
+ content := "This is a test file with multiple lines"
+ VerifyContentContains(t, content, []string{"test file", "multiple lines"})
+ // If we get here, the test passed
+ },
+ )
// Test empty expected substrings
- t.Run("empty expected substrings", func(t *testing.T) {
- content := "Any content"
- VerifyContentContains(t, content, []string{})
- // Should pass with no expected strings
- })
+ t.Run(
+ "empty expected substrings", func(t *testing.T) {
+ content := "Any content"
+ VerifyContentContains(t, content, []string{})
+ // Should pass with no expected strings
+ },
+ )
// For failure cases, we'll test indirectly by verifying behavior
- t.Run("verify error reporting", func(t *testing.T) {
- // We can't easily test the failure case directly since it calls t.Errorf
- // But we can at least verify the function doesn't panic
- defer func() {
- if r := recover(); r != nil {
- t.Errorf("VerifyContentContains panicked: %v", r)
- }
- }()
+ t.Run(
+ "verify error reporting", func(t *testing.T) {
+ // We can't easily test the failure case directly since it calls t.Errorf
+ // But we can at least verify the function doesn't panic
+ defer func() {
+ if r := recover(); r != nil {
+ t.Errorf("VerifyContentContains panicked: %v", r)
+ }
+ }()
- // This would normally fail, but we're just checking it doesn't panic
- content := "test"
- expected := []string{"not found"}
- // Create a subtest that we expect to fail
- t.Run("expected_failure", func(t *testing.T) {
- t.Skip("Skipping actual failure test")
- VerifyContentContains(t, content, expected)
- })
- })
+ // This would normally fail, but we're just checking it doesn't panic
+ content := "test"
+ expected := []string{"not found"}
+ // Create a subtest that we expect to fail
+ t.Run(
+ "expected_failure", func(t *testing.T) {
+ t.Skip("Skipping actual failure test")
+ VerifyContentContains(t, content, expected)
+ },
+ )
+ },
+ )
}
func TestMustSucceed(t *testing.T) {
// Test with nil error (should succeed)
- t.Run("nil error", func(t *testing.T) {
- MustSucceed(t, nil, "successful operation")
- // If we get here, the test passed
- })
+ t.Run(
+ "nil error", func(t *testing.T) {
+ MustSucceed(t, nil, "successful operation")
+ // If we get here, the test passed
+ },
+ )
// Test error behavior without causing test failure
- t.Run("verify error handling", func(t *testing.T) {
- // We can't test the failure case directly since it calls t.Fatalf
- // But we can verify the function exists and is callable
- defer func() {
- if r := recover(); r != nil {
- t.Errorf("MustSucceed panicked: %v", r)
- }
- }()
+ t.Run(
+ "verify error handling", func(t *testing.T) {
+ // We can't test the failure case directly since it calls t.Fatalf
+ // But we can verify the function exists and is callable
+ defer func() {
+ if r := recover(); r != nil {
+ t.Errorf("MustSucceed panicked: %v", r)
+ }
+ }()
- // Create a subtest that we expect to fail
- t.Run("expected_failure", func(t *testing.T) {
- t.Skip("Skipping actual failure test")
- MustSucceed(t, errors.New("test error"), "failed operation")
- })
- })
+ // Create a subtest that we expect to fail
+ t.Run(
+ "expected_failure", func(t *testing.T) {
+ t.Skip("Skipping actual failure test")
+ MustSucceed(t, errors.New("test error"), "failed operation")
+ },
+ )
+ },
+ )
}
func TestCloseFile(t *testing.T) {
// Test closing a normal file
- t.Run("close normal file", func(t *testing.T) {
- file, err := os.CreateTemp(t.TempDir(), "test")
- if err != nil {
- t.Fatalf("Failed to create test file: %v", err)
- }
+ t.Run(
+ "close normal file", func(t *testing.T) {
+ file, err := os.CreateTemp(t.TempDir(), "test")
+ if err != nil {
+ t.Fatalf("Failed to create test file: %v", err)
+ }
- CloseFile(t, file)
+ CloseFile(t, file)
- // Verify file is closed by trying to write to it
- _, writeErr := file.Write([]byte("test"))
- if writeErr == nil {
- t.Error("Expected write to fail on closed file")
- }
- })
+ // Verify file is closed by trying to write to it
+ _, writeErr := file.Write([]byte("test"))
+ if writeErr == nil {
+ t.Error("Expected write to fail on closed file")
+ }
+ },
+ )
// Test that CloseFile doesn't panic on already closed files
// Note: We can't easily test the error case without causing test failure
// since CloseFile calls t.Errorf, which is the expected behavior
- t.Run("verify CloseFile function exists and is callable", func(t *testing.T) {
- // This test just verifies the function signature and basic functionality
- // The error case is tested in integration tests where failures are expected
- file, err := os.CreateTemp(t.TempDir(), "test")
- if err != nil {
- t.Fatalf("Failed to create test file: %v", err)
- }
+ t.Run(
+ "verify CloseFile function exists and is callable", func(t *testing.T) {
+ // This test just verifies the function signature and basic functionality
+ // The error case is tested in integration tests where failures are expected
+ file, err := os.CreateTemp(t.TempDir(), "test")
+ if err != nil {
+ t.Fatalf("Failed to create test file: %v", err)
+ }
- // Test normal case - file should close successfully
- CloseFile(t, file)
+ // Test normal case - file should close successfully
+ CloseFile(t, file)
- // Verify file is closed
- _, writeErr := file.Write([]byte("test"))
- if writeErr == nil {
- t.Error("Expected write to fail on closed file")
- }
- })
+ // Verify file is closed
+ _, writeErr := file.Write([]byte("test"))
+ if writeErr == nil {
+ t.Error("Expected write to fail on closed file")
+ }
+ },
+ )
}