1 Commits

Author SHA1 Message Date
db63505fa7 fix(ci): update security.yml 2025-08-13 10:16:24 +03:00
146 changed files with 3977 additions and 23690 deletions

View File

@@ -1,38 +0,0 @@
# Git
.git
.github
.gitignore
# Build artifacts
gibidify
gibidify-*
dist/
coverage.out
coverage.html
test-results.json
*.sarif
# Documentation
*.md
docs/
# Config and tooling
.checkmake
.editorconfig
.golangci.yml
.yamllint
revive.toml
# Scripts
scripts/
# IDE
.vscode
.idea
*.swp
*.swo
*~
# OS
.DS_Store
Thumbs.db

View File

@@ -7,12 +7,14 @@ trim_trailing_whitespace = true
indent_size = 2
indent_style = tab
tab_width = 2
charset = utf-8
[*.go]
max_line_length = 120
[*.yml]
indent_style = space
[*.{yml,yaml,json,example}]
[*.md]
trim_trailing_whitespace = false
[*.{yml,yaml,json}]
indent_style = space
max_line_length = 250
@@ -20,18 +22,5 @@ max_line_length = 250
max_line_length = 80
indent_size = 0
indent_style = space
trim_trailing_whitespace = true
[*.{sh,md,txt}]
indent_style = space
[.yamllint]
indent_style = space
[Makefile]
indent_style = tab
indent_size = 0
max_line_length = 999
tab_width = 4
[*.md]
trim_trailing_whitespace = false

View File

@@ -1,14 +0,0 @@
{
"Exclude": [".git", "vendor", "node_modules", "README\\.md"],
"AllowedContentTypes": [],
"PassedFiles": [],
"Disable": {
"IndentSize": false,
"EndOfLine": false,
"InsertFinalNewline": false,
"TrimTrailingWhitespace": false,
"MaxLineLength": false
},
"SpacesAfterTabs": false,
"NoColor": false
}

View File

@@ -1,16 +0,0 @@
---
name: "Setup Go with Runner Hardening"
description: "Reusable action to set up Go"
inputs:
token:
description: "GitHub token for checkout (optional)"
required: false
default: ""
runs:
using: "composite"
steps:
- name: Set up Go
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
with:
go-version-file: ".go-version"
cache: true

View File

@@ -1,5 +1,4 @@
# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json
---
name: Build, Test, Coverage, and Publish
on:
@@ -10,7 +9,8 @@ on:
release:
types: [created]
permissions: {}
permissions:
contents: read
jobs:
test:
@@ -25,60 +25,51 @@ jobs:
statuses: write
steps:
- name: Checkout repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Setup Go
uses: ./.github/actions/setup
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@ec9f2d5744a09debf3a187a3f4f675c53b671911 # v2.13.0
with:
token: ${{ github.token }}
egress-policy: audit
- name: Download dependencies
shell: bash
run: go mod download
- name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Run tests with coverage
shell: bash
run: |
go test -race -covermode=atomic -json -coverprofile=coverage.out ./... | tee test-results.json
- name: Set up Go
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
with:
go-version-file: "./go.mod"
cache: true
- name: Install dependencies
run: go mod tidy
- name: Run tests
run: go test -json ./... > test-results.json
- name: Generate coverage report
run: go test -coverprofile=coverage.out ./...
- name: Check coverage
id: coverage
if: always()
shell: bash
run: |
if [[ ! -f coverage.out ]]; then
echo "coverage.out is missing; tests likely failed before producing coverage"
exit 1
fi
coverage="$(go tool cover -func=coverage.out | grep total | awk '{print substr($3, 1, length($3)-1)}')"
echo "total_coverage=$coverage" >> "$GITHUB_ENV"
echo "Coverage: $coverage%"
- name: Upload test results
if: always()
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: test-results
path: test-results.json
- name: Cleanup
if: always()
shell: bash
run: rm -f coverage.out test-results.json
run: rm coverage.out
- name: Fail if coverage is below threshold
if: always()
shell: bash
run: |
if [[ -z "${total_coverage:-}" ]]; then
echo "total_coverage is unset; previous step likely failed"
if (( $(echo "$total_coverage < 50" | bc -l) )); then
echo "Coverage ($total_coverage%) is below the threshold (50%)"
exit 1
fi
awk -v cov="$total_coverage" 'BEGIN{ if (cov < 60) exit 1; else exit 0 }' || {
echo "Coverage ($total_coverage%) is below the threshold (60%)"
exit 1
}
build:
name: Build Binaries
@@ -96,15 +87,15 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Setup Go
uses: ./.github/actions/setup
- name: Set up Go
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
with:
token: ${{ github.token }}
go-version-file: "./go.mod"
- name: Download dependencies
run: go mod download
- name: Run go mod tidy
run: go mod tidy
- name: Build binary for ${{ matrix.goos }}-${{ matrix.goarch }}
run: |
@@ -122,7 +113,7 @@ jobs:
done
- name: Upload artifact
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: gibidify-${{ matrix.goos }}-${{ matrix.goarch }}
path: dist/*
@@ -139,26 +130,26 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Setup Go
uses: ./.github/actions/setup
- name: Download Linux binaries
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
with:
token: ${{ github.token }}
name: gibidify-linux-amd64
path: .
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
- name: Log in to GitHub Container Registry
run: |
echo "${{ github.token }}" | docker login ghcr.io \
-u "$(echo "${{ github.actor }}" | tr '[:upper:]' '[:lower:]')" \
--password-stdin
run: echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
- name: Build and push Docker image
- name: Build and push multi-arch Docker image
run: |
repo="$(echo "${{ github.repository }}" | tr '[:upper:]' '[:lower:]')"
docker buildx build --platform linux/amd64 \
--tag "ghcr.io/${repo}/gibidify:${{ github.ref_name }}" \
--tag "ghcr.io/${repo}/gibidify:latest" \
--push .
chmod +x gibidify-linux-amd64
mv gibidify-linux-amd64 gibidify
docker buildx build --platform linux/amd64,linux/arm64,linux/arm/v7 \
--tag ghcr.io/${{ github.repository }}/gibidify:${{ github.ref_name }} \
--tag ghcr.io/${{ github.repository }}/gibidify:latest \
--push \
--squash .

View File

@@ -1,40 +0,0 @@
---
name: CodeQL Analysis
on:
push:
branches: [main, develop]
pull_request:
branches: [main, develop]
permissions: {}
jobs:
analyze:
name: Analyze Code
runs-on: ubuntu-latest
permissions:
security-events: write
contents: read
actions: read
steps:
- name: Checkout repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Setup Go
uses: ./.github/actions/setup
with:
token: ${{ github.token }}
- name: Initialize CodeQL
uses: github/codeql-action/init@b20883b0cd1f46c72ae0ba6d1090936928f9fa30 # v4.32.0
with:
languages: go
- name: Autobuild
uses: github/codeql-action/autobuild@b20883b0cd1f46c72ae0ba6d1090936928f9fa30 # v4.32.0
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@b20883b0cd1f46c72ae0ba6d1090936928f9fa30 # v4.32.0

View File

@@ -9,7 +9,7 @@ on:
pull_request:
branches: [master, main]
permissions: {}
permissions: read-all
jobs:
Linter:
@@ -21,12 +21,7 @@ jobs:
pull-requests: write
statuses: write
steps:
- name: Checkout repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Setup Go
uses: ./.github/actions/setup
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
with:
token: ${{ github.token }}
- uses: ivuorinen/actions/pr-lint@f98ae7cd7d0feb1f9d6b01de0addbb11414cfc73 # v2026.01.21
token: ${{ secrets.GITHUB_TOKEN }}
- uses: ivuorinen/actions/pr-lint@fa0232d3c4ba16d087b606296435354a69c01756 # 25.8.11

View File

@@ -1,4 +1,3 @@
---
name: Security Scan
on:
@@ -8,9 +7,9 @@ on:
branches: [main, develop]
schedule:
# Run security scan weekly on Sundays at 00:00 UTC
- cron: "0 0 * * 0"
- cron: '0 0 * * 0'
permissions: {}
permissions: read-all
jobs:
security:
@@ -23,22 +22,32 @@ jobs:
actions: read
steps:
- name: Checkout repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Go
uses: ./.github/actions/setup
uses: actions/setup-go@v5
with:
token: ${{ github.token }}
go-version-file: 'go.mod'
- name: Cache Go modules
uses: actions/cache@v4
with:
path: |
~/.cache/go-build
~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
# Security Scanning with gosec
- name: Run gosec Security Scanner
uses: securego/gosec@424fc4cd9c82ea0fd6bee9cd49c2db2c3cc0c93f # v2.22.11
uses: securego/gosec@v2
with:
args: "-fmt sarif -out gosec-results.sarif ./..."
args: '-fmt sarif -out gosec-results.sarif ./...'
- name: Upload gosec results to GitHub Security tab
uses: github/codeql-action/upload-sarif@b20883b0cd1f46c72ae0ba6d1090936928f9fa30 # v4.32.0
uses: github/codeql-action/upload-sarif@v3
if: always()
with:
sarif_file: gosec-results.sarif
@@ -53,18 +62,24 @@ jobs:
run: |
if [ -s govulncheck-results.json ]; then
echo "::warning::Vulnerability check completed. Check govulncheck-results.json for details."
if grep -i -q '"finding"' govulncheck-results.json; then
if grep -q '"finding"' govulncheck-results.json; then
echo "::error::Vulnerabilities found in dependencies!"
cat govulncheck-results.json
exit 1
fi
fi
# Additional Security Linting
- name: Run security-focused golangci-lint
uses: golangci/golangci-lint-action@v6.0.1
with:
args: "--enable=gosec,gocritic,bodyclose,rowserrcheck,misspell,unconvert,unparam,unused --timeout=5m"
# Makefile Linting
- name: Run checkmake on Makefile
run: |
go install github.com/checkmake/checkmake/cmd/checkmake@latest
checkmake --config=.checkmake Makefile
uses: Uno-Takashi/checkmake-action@v2
with:
config: .checkmake
# Shell Script Formatting Check
- name: Check shell script formatting
@@ -72,11 +87,27 @@ jobs:
go install mvdan.cc/sh/v3/cmd/shfmt@latest
shfmt -d .
# YAML Linting
- name: Run YAML linting
uses: ibiqlik/action-yamllint@2576378a8e339169678f9939646ee3ee325e845c # v3.1.1
with:
file_or_dir: .
strict: true
run: |
go install github.com/excilsploft/yamllint@latest
yamllint -c .yamllint .
# Secrets Detection (basic patterns)
- name: Run secrets detection
run: |
echo "Scanning for potential secrets..."
# Look for common secret patterns
git log --all --full-history -- . | grep -i -E "(password|secret|key|token|api_key)" || true
find . -type f -name "*.go" -exec grep -H -i -E "(password|secret|key|token|api_key)\s*[:=]" {} \; || true
# Check for hardcoded IPs and URLs
- name: Check for hardcoded network addresses
run: |
echo "Scanning for hardcoded network addresses..."
find . -type f -name "*.go" -exec grep -H -E "([0-9]{1,3}\.){3}[0-9]{1,3}" {} \; || true
find . -type f -name "*.go" -exec grep -H -E "https?://[^/\s]+" {} \; | \
grep -v "example.com|localhost|127.0.0.1" || true
# Docker Security (if Dockerfile exists)
- name: Run Docker security scan
@@ -85,9 +116,24 @@ jobs:
docker run --rm -v "$PWD":/workspace \
aquasec/trivy:latest fs --security-checks vuln,config /workspace/Dockerfile || true
# SAST with CodeQL (if available)
- name: Initialize CodeQL
if: github.event_name != 'schedule'
uses: github/codeql-action/init@v3
with:
languages: go
- name: Autobuild
if: github.event_name != 'schedule'
uses: github/codeql-action/autobuild@v3
- name: Perform CodeQL Analysis
if: github.event_name != 'schedule'
uses: github/codeql-action/analyze@v3
# Upload artifacts for review
- name: Upload security scan results
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
uses: actions/upload-artifact@v4
if: always()
with:
name: security-scan-results

View File

@@ -22,4 +22,4 @@ jobs:
issues: write
runs-on: ubuntu-latest
steps:
- uses: ivuorinen/actions/sync-labels@f98ae7cd7d0feb1f9d6b01de0addbb11414cfc73 # v2026.01.21
- uses: ivuorinen/actions/sync-labels@fa0232d3c4ba16d087b606296435354a69c01756 # 25.8.11

17
.gitignore vendored
View File

@@ -1,21 +1,14 @@
*.out
.DS_Store
.idea
.serena/
coverage.*
gibidify
gibidify-benchmark
gibidify.json
gibidify.txt
gibidify.yaml
megalinter-reports/*
output.json
output.txt
output.yaml
gosec-report.json
govulncheck-report.json
gitleaks-report.json
security-report.json
security-report.md
gosec*.log
pr.txt
coverage.out
megalinter-reports/*
coverage.*
*.out
gibidify-benchmark

View File

@@ -1,15 +0,0 @@
# gitleaks configuration
# https://github.com/gitleaks/gitleaks
#
# Extends the built-in ruleset. Only allowlist overrides are defined here.
[allowlist]
description = "Global allowlist for generated and report files"
paths = [
'''gosec-report\.json$''',
'''govulncheck-report\.json$''',
'''security-report\.json$''',
'''security-report\.md$''',
'''output\.json$''',
'''gibidify\.json$''',
]

View File

@@ -1 +1 @@
1.25.6
1.23.0

256
.golangci.yml Normal file
View File

@@ -0,0 +1,256 @@
run:
timeout: 5m
tests: true
go: "1.24"
build-tags:
- test
# golangci-lint configuration version
version: 2
output:
format: colored-line-number
print-issued-lines: true
print-linter-name: true
path-prefix: ""
sort-results: true
linters:
enable-all: true
disable:
- depguard # Too strict for general use
- exhaustruct # Too many false positives
- ireturn # Too restrictive on interfaces
- varnamelen # Too opinionated on name length
- wrapcheck # Too many false positives
- testpackage # Tests in same package are fine
- paralleltest # Not always necessary
- tparallel # Not always necessary
- nlreturn # Too opinionated on newlines
- wsl # Too opinionated on whitespace
- nonamedreturns # Conflicts with gocritic unnamedResult
linters-settings:
errcheck:
check-type-assertions: true
check-blank: true
exclude-functions:
- io.Copy
- fmt.Print
- fmt.Printf
- fmt.Println
govet:
enable-all: true
gocyclo:
min-complexity: 15
gocognit:
min-complexity: 20
goconst:
min-len: 3
min-occurrences: 3
gofmt:
simplify: true
rewrite-rules:
- pattern: 'interface{}'
replacement: 'any'
goimports:
local-prefixes: github.com/ivuorinen/gibidify
golint:
min-confidence: 0.8
lll:
line-length: 120
tab-width: 2 # EditorConfig: tab_width = 2
misspell:
locale: US
nakedret:
max-func-lines: 30
prealloc:
simple: true
range-loops: true
for-loops: true
revive:
enable-all-rules: true
rules:
- name: package-comments
disabled: true
- name: file-header
disabled: true
- name: max-public-structs
disabled: true
- name: line-length-limit
arguments: [120]
- name: function-length
arguments: [50, 100]
- name: cognitive-complexity
arguments: [20]
- name: cyclomatic
arguments: [15]
- name: add-constant
arguments:
- maxLitCount: "3"
allowStrs: "\"error\",\"\""
allowInts: "0,1,2"
- name: argument-limit
arguments: [6]
- name: banned-characters
disabled: true
- name: function-result-limit
arguments: [3]
gosec:
excludes:
- G104 # Handled by errcheck
severity: medium
confidence: medium
exclude-generated: true
config:
G301: "0750"
G302: "0640"
G306: "0640"
dupl:
threshold: 150
gocritic:
enabled-tags:
- diagnostic
- experimental
- opinionated
- performance
- style
disabled-checks:
- whyNoLint
- paramTypeCombine
gofumpt:
extra-rules: true
# EditorConfig compliance settings
# These settings enforce .editorconfig rules:
# - end_of_line = lf (enforced by gofumpt)
# - insert_final_newline = true (enforced by gofumpt)
# - trim_trailing_whitespace = true (enforced by whitespace linter)
# - indent_style = tab, tab_width = 2 (enforced by gofumpt and lll)
whitespace:
multi-if: false # EditorConfig: trim trailing whitespace
multi-func: false # EditorConfig: trim trailing whitespace
nolintlint:
allow-leading-space: false # EditorConfig: trim trailing whitespace
allow-unused: false
require-explanation: false
require-specific: true
godox:
keywords:
- FIXME
- BUG
- HACK
mnd:
settings:
mnd:
checks:
- argument
- case
- condition
- operation
- return
- assign
ignored-numbers:
- '0'
- '1'
- '2'
- '10'
- '100'
funlen:
lines: 80
statements: 60
nestif:
min-complexity: 5
gomodguard:
allowed:
modules: []
domains: []
blocked:
modules: []
versions: []
issues:
exclude-use-default: false
exclude-case-sensitive: false
max-issues-per-linter: 0
max-same-issues: 0
uniq-by-line: true
exclude-dirs:
- vendor
- third_party
- testdata
- examples
- .git
exclude-files:
- ".*\\.pb\\.go$"
- ".*\\.gen\\.go$"
exclude-rules:
- path: _test\.go
linters:
- dupl
- gosec
- goconst
- funlen
- gocognit
- gocyclo
- errcheck
- lll
- nestif
- path: main\.go
linters:
- gochecknoglobals
- gochecknoinits
- path: fileproc/filetypes\.go
linters:
- gochecknoglobals # Allow globals for singleton registry pattern
- text: "Using the variable on range scope"
linters:
- scopelint
- text: "should have comment or be unexported"
linters:
- golint
- revive
- text: "don't use ALL_CAPS in Go names"
linters:
- golint
- stylecheck
exclude:
- "Error return value of .* is not checked"
- "exported (type|method|function) .* should have comment"
- "ST1000: at least one file in a package should have a package comment"
severity:
default-severity: error
case-sensitive: false

View File

@@ -18,8 +18,3 @@ SHOW_SKIPPED_LINTERS: false # Show skipped linters in MegaLinter log
DISABLE_LINTERS:
- REPOSITORY_DEVSKIM
- REPOSITORY_TRIVY
- GO_GOLANGCI_LINT
- YAML_PRETTIER
# By default megalinter uses list_of_files, which is wrong.
GO_REVIVE_CLI_LINT_MODE: project

View File

@@ -1,27 +1,15 @@
---
# yaml-language-server: $schema=https://json.schemastore.org/pre-commit-config.json
# For more hooks, see https://pre-commit.com/hooks.html
repos:
- repo: https://github.com/editorconfig-checker/editorconfig-checker.python
rev: 3.6.0
- repo: https://github.com/golangci/golangci-lint
rev: v1.57.2
hooks:
- id: editorconfig-checker
alias: ec
- id: golangci-lint
args: ["--timeout=5m"]
- repo: https://github.com/tekwizely/pre-commit-golang
rev: v1.0.0-rc.2
rev: v1.0.0-rc.1
hooks:
- id: go-build-mod
alias: build
- id: go-mod-tidy
alias: tidy
- id: go-revive
alias: revive
- id: go-vet-mod
alias: vet
- id: go-staticcheck-mod
alias: static
- id: go-fmt
alias: fmt
- id: go-sec-mod
alias: sec

1
.serena/.gitignore vendored
View File

@@ -1 +0,0 @@
/cache

View File

@@ -1,74 +0,0 @@
# language of the project (csharp, python, rust, java, typescript, go, cpp, or ruby)
# * For C, use cpp
# * For JavaScript, use typescript
# Special requirements:
# * csharp: Requires the presence of a .sln file in the project folder.
---
language: go
# whether to use the project's gitignore file to ignore files
# Added on 2025-04-07
ignore_all_files_in_gitignore: true
# list of additional paths to ignore
# same syntax as gitignore, so you can use * and **
# Was previously called `ignored_dirs`, please update your config if you are using that.
# Added (renamed) on 2025-04-07
ignored_paths: []
# whether the project is in read-only mode
# If set to true, all editing tools will be disabled and attempts to use them will result in an error
# Added on 2025-04-18
read_only: false
# list of tool names to exclude. We recommend not excluding any tools, see the readme for more details.
# Below is the complete list of tools for convenience.
# To make sure you have the latest list of tools, and to view their descriptions,
# execute `uv run scripts/print_tool_overview.py`.
#
# * `activate_project`: Activates a project by name.
# * `check_onboarding_performed`: Checks whether project onboarding was already performed.
# * `create_text_file`: Creates/overwrites a file in the project directory.
# * `delete_lines`: Deletes a range of lines within a file.
# * `delete_memory`: Deletes a memory from Serena's project-specific memory store.
# * `execute_shell_command`: Executes a shell command.
# * `find_referencing_code_snippets`: Finds code snippets in which the symbol at the given location is referenced.
# * `find_referencing_symbols`: Finds symbols that reference the symbol at the given location
# (optionally filtered by type).
# * `find_symbol`: Performs a global (or local) search for symbols with/containing a given
# name/substring (optionally filtered by type).
# * `get_current_config`: Prints the current configuration of the agent, including the active
# and available projects, tools, contexts, and modes.
# * `get_symbols_overview`: Gets an overview of the top-level symbols defined in a given file.
# * `initial_instructions`: Gets the initial instructions for the current project.
# Should only be used in settings where the system prompt cannot be set,
# e.g. in clients you have no control over, like Claude Desktop.
# * `insert_after_symbol`: Inserts content after the end of the definition of a given symbol.
# * `insert_at_line`: Inserts content at a given line in a file.
# * `insert_before_symbol`: Inserts content before the beginning of the definition of a given symbol.
# * `list_dir`: Lists files and directories in the given directory (optionally with recursion).
# * `list_memories`: Lists memories in Serena's project-specific memory store.
# * `onboarding`: Performs onboarding (identifying the project structure and essential tasks,
# e.g. for testing or building).
# * `prepare_for_new_conversation`: Provides instructions for preparing for a new conversation
# (in order to continue with the necessary context).
# * `read_file`: Reads a file within the project directory.
# * `read_memory`: Reads the memory with the given name from Serena's project-specific memory store.
# * `remove_project`: Removes a project from the Serena configuration.
# * `replace_lines`: Replaces a range of lines within a file with new content.
# * `replace_symbol_body`: Replaces the full definition of a symbol.
# * `restart_language_server`: Restarts the language server, may be necessary when edits not through Serena happen.
# * `search_for_pattern`: Performs a search for a pattern in the project.
# * `summarize_changes`: Provides instructions for summarizing the changes made to the codebase.
# * `switch_modes`: Activates modes by providing a list of their names
# * `think_about_collected_information`: Thinking tool for pondering the completeness of collected information.
# * `think_about_task_adherence`: Thinking tool for determining whether the agent is still on
# track with the current task.
# * `think_about_whether_you_are_done`: Thinking tool for determining whether the task is truly completed.
# * `write_memory`: Writes a named memory (for future reference) to Serena's project-specific memory store.
excluded_tools: []
# initial prompt for the project. It will always be given to the LLM upon activating the project
# (contrary to the memories, which are loaded on demand).
initial_prompt: ""
project_name: "gibidify"

View File

@@ -1,18 +0,0 @@
---
doublestar: true
gitignore_excludes: true
formatter:
type: basic
include_document_start: true
retain_line_breaks_single: true
scan_folded_as_literal: false
max_line_length: 0
trim_trailing_whitespace: true
array_indent: 2
force_array_style: block
include:
- ./**/*.yml
- ./**/*.yaml
- .github/**/*.yml
- .github/**/*.yaml
# exclude:

View File

@@ -1,4 +1,3 @@
---
# yamllint configuration
# See: https://yamllint.readthedocs.io/en/stable/configuration.html
@@ -36,3 +35,6 @@ rules:
# Relax comments formatting
comments:
min-spaces-from-content: 1
# Allow document start marker to be optional
document-start: disable

View File

@@ -1,15 +1,12 @@
# CLAUDE.md
Go CLI aggregating code files into LLM-optimized output.
Supports markdown/JSON/YAML with concurrent processing.
Go CLI aggregating code files into LLM-optimized output. Supports markdown/JSON/YAML with concurrent processing.
## Architecture
## Architecture (42 files, 8.2K lines)
**Core**: `main.go`, `cli/`, `fileproc/`, `config/`, `utils/`, `testutil/`, `cmd/`
**Core**: `main.go` (37), `cli/` (4), `fileproc/` (27), `config/` (3), `utils/` (4), `testutil/` (2)
**Advanced**: `metrics/`, `templates/`, `benchmark/`
**Modules**: Collection, processing, writers, registry (~63ns cache), resource limits, metrics, templating
**Modules**: Collection, processing, writers, registry (~63ns cache), resource limits
**Patterns**: Producer-consumer, thread-safe registry, streaming, modular (50-200 lines)
@@ -18,7 +15,6 @@ Supports markdown/JSON/YAML with concurrent processing.
```bash
make lint-fix && make lint && make test
./gibidify -source <dir> -format markdown --verbose
./gibidify -source <dir> -format json --log-level debug --verbose
```
## Config
@@ -26,51 +22,26 @@ make lint-fix && make lint && make test
`~/.config/gibidify/config.yaml`
Size limit 5MB, ignore dirs, custom types, 100MB memory limit
## Linting Standards (MANDATORY)
## Quality
**Linter**: revive (comprehensive rule set migrated from golangci-lint)
**Command**: `revive -config revive.toml ./...`
**Complexity**: cognitive-complexity ≤15, cyclomatic ≤15, max-control-nesting ≤5
**Security**: unhandled errors, secure coding patterns, credential detection
**Performance**: optimize-operands-order, string-format, range optimizations
**Format**: line-length ≤120 chars, EditorConfig (LF, tabs), gofmt/goimports
**Testing**: error handling best practices, 0 tolerance policy
**CRITICAL**: All rules non-negotiable. `make lint-fix && make lint` must show 0 issues.
**CRITICAL**: `make lint-fix && make lint` (0 issues), 120 chars, EditorConfig, 30+ linters
## Testing
**Coverage**: 77.9% overall (utils 90.0%, cli 83.8%, config 77.0%, testutil 73.7%, fileproc 74.5%, metrics 96.0%, templates 87.3%)
**Patterns**: Table-driven tests, shared testutil helpers, mock objects, error assertions
**Race detection**, benchmarks, comprehensive integration tests
## Development Patterns
**Logging**: Use `utils.Logger()` for all logging (replaces logrus). Default WARN level, set via `--log-level` flag
**Error Handling**: Use `utils.WrapError` family for structured errors with context
**Streaming**: Use `utils.StreamContent/StreamLines` for consistent file processing
**Context**: Use `utils.CheckContextCancellation` for standardized cancellation
**Testing**: Use `testutil.*` helpers for directory setup, error assertions
**Validation**: Centralized in `config/validation.go` with structured error collection
**Coverage**: 84%+ (utils 90.9%, fileproc 83.8%), race detection, benchmarks
## Standards
EditorConfig (LF, tabs), semantic commits, testing required, error wrapping
## revive.toml Restrictions
**AGENTS DO NOT HAVE PERMISSION** to modify `revive.toml` configuration unless user explicitly requests it.
The linting configuration is carefully tuned and should not be altered during normal development.
EditorConfig (LF, tabs), semantic commits, testing required
## Status
**Health: 9/10** - Production-ready with systematic deduplication complete
**Health: 10/10** - Production-ready, 84%+ coverage, modular, memory-optimized
**Done**: Deduplication, errors, benchmarks, config, optimization, testing (77.9%), modularization, linting (0 issues), metrics system, templating
**Done**: Errors, benchmarks, config, optimization, modularization, CLI (progress/colors), security (path validation, resource limits, scanning)
**Next**: Documentation, output customization
## Workflow
1. `make lint-fix` first
2. >80% coverage
3. Follow patterns
4. Update docs
1. `make lint-fix` first 2. >80% coverage 3. Follow patterns 4. Update docs

View File

@@ -1,5 +1,5 @@
# Use a minimal base image
FROM alpine:3.23.3
FROM alpine:3.21.2
# Add user
RUN useradd -ms /bin/bash gibidify

100
Makefile
View File

@@ -1,10 +1,10 @@
.PHONY: all help install-tools lint lint-fix test coverage build clean all build-benchmark benchmark benchmark-go benchmark-go-cli benchmark-go-fileproc benchmark-go-metrics benchmark-go-shared benchmark-all benchmark-collection benchmark-processing benchmark-concurrency benchmark-format security security-full vuln-check update-deps check-all dev-setup
.PHONY: help install-tools lint lint-fix lint-verbose test coverage build clean all build-benchmark benchmark benchmark-collection benchmark-processing benchmark-concurrency benchmark-format security security-full vuln-check check-all dev-setup
# Default target shows help
.DEFAULT_GOAL := help
# All target runs full workflow
all: lint lint-fix test build
all: lint test build
# Help target
help:
@@ -12,7 +12,25 @@ help:
# Install required tools
install-tools:
@./scripts/install-tools.sh
@echo "Installing golangci-lint..."
@go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest
@echo "Installing gofumpt..."
@go install mvdan.cc/gofumpt@latest
@echo "Installing goimports..."
@go install golang.org/x/tools/cmd/goimports@latest
@echo "Installing staticcheck..."
@go install honnef.co/go/tools/cmd/staticcheck@latest
@echo "Installing gosec..."
@go install github.com/securego/gosec/v2/cmd/gosec@latest
@echo "Installing gocyclo..."
@go install github.com/fzipp/gocyclo/cmd/gocyclo@latest
@echo "Installing checkmake..."
@go install github.com/mrtazz/checkmake/cmd/checkmake@latest
@echo "Installing shfmt..."
@go install mvdan.cc/sh/v3/cmd/shfmt@latest
@echo "Installing yamllint (Go-based)..."
@go install github.com/excilsploft/yamllint@latest
@echo "All tools installed successfully!"
# Run linters
lint:
@@ -20,7 +38,35 @@ lint:
# Run linters with auto-fix
lint-fix:
@./scripts/lint-fix.sh
@echo "Running gofumpt..."
@gofumpt -l -w .
@echo "Running goimports..."
@goimports -w -local github.com/ivuorinen/gibidify .
@echo "Running go fmt..."
@go fmt ./...
@echo "Running go mod tidy..."
@go mod tidy
@echo "Running shfmt formatting..."
@shfmt -w -i 2 -ci .
@echo "Running golangci-lint with --fix..."
@golangci-lint run --fix ./...
@echo "Auto-fix completed. Running final lint check..."
@golangci-lint run ./...
@echo "Running checkmake..."
@checkmake --config=.checkmake Makefile
@echo "Running yamllint..."
@yamllint -c .yamllint .
# Run linters with verbose output
lint-verbose:
@echo "Running golangci-lint (verbose)..."
@golangci-lint run -v ./...
@echo "Running checkmake (verbose)..."
@checkmake --config=.checkmake --format="{{.Line}}:{{.Rule}}:{{.Violation}}" Makefile
@echo "Running shfmt check (verbose)..."
@shfmt -d .
@echo "Running yamllint (verbose)..."
@yamllint -c .yamllint -f parsable .
# Run tests
test:
@@ -43,14 +89,15 @@ build:
# Clean build artifacts
clean:
@echo "Cleaning build artifacts..."
@rm -f gibidify gibidify-benchmark coverage.out coverage.html *.out
@rm -f gibidify gibidify-benchmark
@rm -f coverage.out coverage.html
@echo "Clean complete"
# CI-specific targets
.PHONY: ci-lint ci-test
ci-lint:
@revive -config revive.toml -formatter friendly -set_exit_status ./...
@golangci-lint run --out-format=github-actions ./...
ci-test:
@go test -race -coverprofile=coverage.out -json ./... > test-results.json
@@ -61,36 +108,11 @@ build-benchmark:
@go build -ldflags="-s -w" -o gibidify-benchmark ./cmd/benchmark
@echo "Build complete: ./gibidify-benchmark"
# Run custom benchmark binary
# Run benchmarks
benchmark: build-benchmark
@echo "Running custom benchmarks..."
@echo "Running all benchmarks..."
@./gibidify-benchmark -type=all
# Run all Go test benchmarks
benchmark-go:
@echo "Running all Go test benchmarks..."
@go test -bench=. -benchtime=100ms -run=^$$ ./...
# Run Go test benchmarks for specific packages
benchmark-go-cli:
@echo "Running CLI benchmarks..."
@go test -bench=. -benchtime=100ms -run=^$$ ./cli/...
benchmark-go-fileproc:
@echo "Running fileproc benchmarks..."
@go test -bench=. -benchtime=100ms -run=^$$ ./fileproc/...
benchmark-go-metrics:
@echo "Running metrics benchmarks..."
@go test -bench=. -benchtime=100ms -run=^$$ ./metrics/...
benchmark-go-shared:
@echo "Running shared benchmarks..."
@go test -bench=. -benchtime=100ms -run=^$$ ./shared/...
# Run all benchmarks (custom + Go test)
benchmark-all: benchmark benchmark-go
# Run specific benchmark types
benchmark-collection: build-benchmark
@echo "Running file collection benchmarks..."
@@ -113,19 +135,13 @@ security:
@echo "Running comprehensive security scan..."
@./scripts/security-scan.sh
security-full: install-tools
security-full:
@echo "Running full security analysis..."
@./scripts/security-scan.sh
@echo "Running additional security checks..."
@gosec -fmt=json -out=security-report.json ./...
@staticcheck -checks=all ./...
@golangci-lint run --enable-all --disable=depguard,exhaustruct,ireturn,varnamelen,wrapcheck --timeout=10m
vuln-check:
@echo "Checking for dependency vulnerabilities..."
@go install golang.org/x/vuln/cmd/govulncheck@v1.1.4
@go install golang.org/x/vuln/cmd/govulncheck@latest
@govulncheck ./...
# Update dependencies
update-deps:
@echo "Updating Go dependencies..."
@./scripts/update-deps.sh

View File

@@ -14,11 +14,9 @@ file sections with separators, and a suffix.
- **Concurrent processing** with configurable worker pools
- **Comprehensive configuration** via YAML with validation
- **Production-ready** with structured error handling and benchmarking
- **Modular architecture** - clean, focused codebase (92 files, ~21.5K lines) with ~63ns registry lookups
- **Modular architecture** - clean, focused codebase with ~63ns registry lookups
- **Enhanced CLI experience** - progress bars, colored output, helpful error messages
- **Cross-platform** with Docker support
- **Advanced template system** - 4 built-in templates (default, minimal, detailed, compact) with custom template support, variable substitution, and YAML-based configuration
- **Comprehensive metrics and profiling** - real-time processing statistics, performance analysis, memory usage tracking, and automated recommendations
## Installation
@@ -42,8 +40,7 @@ go build -o gibidify .
--suffix="..." \
--no-colors \
--no-progress \
--verbose \
--log-level debug
--verbose
```
Flags:
@@ -56,7 +53,6 @@ Flags:
- `--no-colors`: disable colored terminal output.
- `--no-progress`: disable progress bars.
- `--verbose`: enable verbose output and detailed logging.
- `--log-level`: set log level (default: warn; accepted values: debug, info, warn, error).
## Docker
@@ -127,33 +123,6 @@ backpressure:
maxPendingWrites: 100 # Max writes in write channel buffer
maxMemoryUsage: 104857600 # 100MB max memory usage
memoryCheckInterval: 1000 # Check memory every 1000 files
# Output and template customization
output:
# Template selection: default, minimal, detailed, compact, or custom
# Templates control output structure and formatting
template: "default"
# Metadata options
metadata:
includeStats: true
includeTimestamp: true
includeFileCount: true
includeSourcePath: true
includeMetrics: true
# Markdown-specific options
markdown:
useCodeBlocks: true
includeLanguage: true
headerLevel: 2
tableOfContents: false
useCollapsible: false
syntaxHighlighting: true
lineNumbers: false
# Custom template variables
variables:
project_name: "My Project"
author: "Developer Name"
version: "1.0.0"
```
See `config.example.yaml` for a comprehensive configuration example.

134
TODO.md
View File

@@ -4,127 +4,43 @@ Prioritized improvements by impact/effort.
## ✅ Completed
**Core**: Config validation, structured errors, benchmarking, linting (revive: 0 issues)
**Architecture**: Modularization (92 files, ~21.5K lines), CLI (progress/colors), security (path validation, resource limits, scanning) ✅
**Core**: Testing (84%+), config validation, structured errors, benchmarking ✅
**Architecture**: Modularization (50-200 lines), CLI (progress/colors), security (path validation, resource limits, scanning) ✅
## 🚀 Critical Priorities
## 🚀 Current Priorities
### Testing Coverage (URGENT)
- [x] **CLI module testing** (0% → 83.8%) - COMPLETED ✅
- [x] cli/flags_test.go - Flag parsing and validation ✅
- [x] cli/errors_test.go - Error formatting and structured errors ✅
- [x] cli/ui_test.go - UI components, colors, progress bars ✅
- [x] cli/processor_test.go - Processing workflow integration ✅
- [x] **Utils module testing** (7.4% → 90.0%) - COMPLETED ✅
- [x] utils/writers_test.go - Writer functions (98% complete, minor test fixes needed) ✅
- [x] Enhanced utils/paths_test.go - Security and edge cases ✅
- [x] Enhanced utils/errors_test.go - StructuredError system ✅
- [x] **Testutil module testing** (45.1% → 73.7%) - COMPLETED ✅
- [x] testutil/utility_test.go - GetBaseName function comprehensive tests ✅
- [x] testutil/directory_structure_test.go - CreateTestDirectoryStructure and SetupTempDirWithStructure ✅
- [x] testutil/assertions_test.go - All AssertError functions comprehensive coverage ✅
- [x] testutil/error_scenarios_test.go - Edge cases and performance benchmarks ✅
- [x] **Main module testing** (41% → 50.0%) - COMPLETED ✅
- [x] **Fileproc module improvement** (66% → 74.5%) - COMPLETED ✅
### Metrics & Profiling
- [ ] Processing stats, timing
### ✅ Metrics & Profiling - COMPLETED
- [x] **Comprehensive metrics collection system** with processing statistics ✅
- [x] File processing metrics (processed, skipped, errors) ✅
- [x] Size metrics (total, average, largest, smallest file sizes) ✅
- [x] Performance metrics (files/sec, bytes/sec, processing time) ✅
- [x] Memory and resource tracking (peak memory, current memory, goroutine count) ✅
- [x] Format-specific metrics and error breakdown ✅
- [x] Phase timing (collection, processing, writing, finalize) ✅
- [x] Concurrency tracking and recommendations ✅
- [x] **Performance measurements and reporting**
- [x] Real-time progress reporting in CLI ✅
- [x] Verbose mode with detailed statistics ✅
- [x] Final comprehensive profiling reports ✅
- [x] Performance recommendations based on metrics ✅
- [x] **Structured logging integration** with centralized logging service ✅
- [x] Configurable log levels (debug, info, warn, error) ✅
- [x] Context-aware logging with structured data ✅
- [x] Metrics data integration in log output ✅
### ✅ Output Customization - COMPLETED
- [x] **Template system for output formatting**
- [x] Builtin templates: default, minimal, detailed, compact ✅
- [x] Custom template support with variables ✅
- [x] Template functions for formatting (formatSize, basename, etc.) ✅
- [x] Header/footer and file header/footer customization ✅
- [x] **Configurable markdown options**
- [x] Code block controls (syntax highlighting, line numbers) ✅
- [x] Header levels and table of contents ✅
- [x] Collapsible sections for space efficiency ✅
- [x] Line length limits and long file folding ✅
- [x] Custom CSS support ✅
- [x] **Metadata integration in outputs**
- [x] Configurable metadata inclusion (stats, timestamp, file counts) ✅
- [x] Processing metrics in output (performance, memory usage) ✅
- [x] File type breakdown and error summaries ✅
- [x] Source path and processing time information ✅
- [x] **Enhanced configuration system**
- [x] Template selection and customization options ✅
- [x] Metadata control flags ✅
- [x] Markdown formatting preferences ✅
- [x] Custom template variables support ✅
### Output Customization
- [ ] Templates, markdown config, metadata
### Documentation
- [ ] API docs, user guides
## 🌟 Future
**Plugins**: Custom handlers, formats
**Git**: Commit filtering, blame
**Rich output**: HTML, PDF, web UI
**Monitoring**: Prometheus, structured logging
## Guidelines
**Before**: `make lint-fix && make lint` (0 issues), >80% coverage
**Priorities**: Testing → Security → UX → Extensions
**Before**: `make lint-fix && make lint`, >80% coverage
**Priorities**: Security → UX → Extensions
## Status (2025-08-23 - Phase 3 Feature Implementation Complete)
## Status (2025-07-19)
**Health: 10/10** - Advanced metrics & profiling system and comprehensive output customization implemented
**Health: 10/10** - Production-ready, 42 files (8.2K lines), 84%+ coverage
**Stats**: 92 files (~21.5K lines), 77.9% overall coverage achieved
- CLI: 83.8% ✅, Utils: 90.0% ✅, Config: 77.0% ✅, Testutil: 73.7% ✅, Fileproc: 74.5% ✅, Main: 50.0% ✅, Metrics: 96.0% ✅, Templates: 87.3% ✅, Benchmark: 64.7% ✅
**Done**: Testing, config, errors, performance, modularization, CLI, security
**Next**: Documentation → Output customization
**Completed Today**:
-**Phase 1**: Consolidated duplicate code patterns
- Writer closeReader → utils.SafeCloseReader
- Custom yamlQuoteString → utils.EscapeForYAML
- Streaming patterns → utils.StreamContent/StreamLines
-**Phase 2**: Enhanced test infrastructure
- **Phase 2A**: Main module (41% → 50.0%) - Complete integration testing
- **Phase 2B**: Fileproc module (66% → 74.5%) - Streaming and backpressure testing
- **Phase 2C**: Testutil module (45.1% → 73.7%) - Utility and assertion testing
- Shared test helpers (directory structure, error assertions)
- Advanced testutil patterns (avoided import cycles)
-**Phase 3**: Standardized error/context handling
- Error creation using utils.WrapError family
- Centralized context cancellation patterns
-**Phase 4**: Documentation updates
### Token Usage
**Impact**: Eliminated code duplication, enhanced maintainability, achieved comprehensive test coverage across all major modules
- TODO.md: 171 words (~228 tokens) - 35% reduction ✅
- CLAUDE.md: 160 words (~213 tokens) - 25% reduction ✅
- Total: 331 words (~441 tokens) - 30% reduction ✅
**Completed This Session**:
-**Phase 3A**: Advanced Metrics & Profiling System
- Comprehensive processing statistics collection (files, sizes, performance)
- Real-time progress reporting with detailed metrics
- Phase timing tracking (collection, processing, writing, finalize)
- Memory and resource usage monitoring
- Format-specific metrics and error breakdown
- Performance recommendations engine
- Structured logging integration
-**Phase 3B**: Output Customization Features
- Template system with 4 builtin templates (default, minimal, detailed, compact)
- Custom template support with variable substitution
- Configurable markdown options (code blocks, TOC, collapsible sections)
- Metadata integration with selective inclusion controls
- Enhanced configuration system for all customization options
-**Phase 3C**: Comprehensive Testing & Integration
- Full test coverage for metrics and templates packages
- Integration with existing CLI processor workflow
- Deadlock-free concurrent metrics collection
- Configuration system extensions
**Impact**: Added powerful analytics and customization capabilities while maintaining high code quality and test coverage
**Next Session**:
- Phase 4: Enhanced documentation and user guides
- Optional: Advanced features (watch mode, incremental processing, etc.)
*Optimized from 474 → 331 words while preserving critical information*

View File

@@ -12,11 +12,11 @@ import (
"github.com/ivuorinen/gibidify/config"
"github.com/ivuorinen/gibidify/fileproc"
"github.com/ivuorinen/gibidify/shared"
"github.com/ivuorinen/gibidify/utils"
)
// Result represents the results of a benchmark run.
type Result struct {
// BenchmarkResult represents the results of a benchmark run.
type BenchmarkResult struct {
Name string
Duration time.Duration
FilesProcessed int
@@ -42,54 +42,14 @@ type CPUStats struct {
Goroutines int
}
// Suite represents a collection of benchmarks.
type Suite struct {
// BenchmarkSuite represents a collection of benchmarks.
type BenchmarkSuite struct {
Name string
Results []Result
}
// buildBenchmarkResult constructs a Result with all metrics calculated.
// This eliminates code duplication across benchmark functions.
func buildBenchmarkResult(
name string,
files []string,
totalBytes int64,
duration time.Duration,
memBefore, memAfter runtime.MemStats,
) *Result {
result := &Result{
Name: name,
Duration: duration,
FilesProcessed: len(files),
BytesProcessed: totalBytes,
}
// Calculate rates with zero-division guard
secs := duration.Seconds()
if secs == 0 {
result.FilesPerSecond = 0
result.BytesPerSecond = 0
} else {
result.FilesPerSecond = float64(len(files)) / secs
result.BytesPerSecond = float64(totalBytes) / secs
}
result.MemoryUsage = MemoryStats{
AllocMB: shared.SafeMemoryDiffMB(memAfter.Alloc, memBefore.Alloc),
SysMB: shared.SafeMemoryDiffMB(memAfter.Sys, memBefore.Sys),
NumGC: memAfter.NumGC - memBefore.NumGC,
PauseTotalNs: memAfter.PauseTotalNs - memBefore.PauseTotalNs,
}
result.CPUUsage = CPUStats{
Goroutines: runtime.NumGoroutine(),
}
return result
Results []BenchmarkResult
}
// FileCollectionBenchmark benchmarks file collection operations.
func FileCollectionBenchmark(sourceDir string, numFiles int) (*Result, error) {
func FileCollectionBenchmark(sourceDir string, numFiles int) (*BenchmarkResult, error) {
// Load configuration to ensure proper file filtering
config.LoadConfig()
@@ -98,15 +58,9 @@ func FileCollectionBenchmark(sourceDir string, numFiles int) (*Result, error) {
if sourceDir == "" {
tempDir, cleanupFunc, err := createBenchmarkFiles(numFiles)
if err != nil {
return nil, shared.WrapError(
err,
shared.ErrorTypeFileSystem,
shared.CodeFSAccess,
shared.BenchmarkMsgFailedToCreateFiles,
)
return nil, utils.WrapError(err, utils.ErrorTypeFileSystem, utils.CodeFSAccess, "failed to create benchmark files")
}
cleanup = cleanupFunc
//nolint:errcheck // Benchmark output, errors don't affect results
defer cleanup()
sourceDir = tempDir
}
@@ -120,12 +74,7 @@ func FileCollectionBenchmark(sourceDir string, numFiles int) (*Result, error) {
// Run the file collection benchmark
files, err := fileproc.CollectFiles(sourceDir)
if err != nil {
return nil, shared.WrapError(
err,
shared.ErrorTypeProcessing,
shared.CodeProcessingCollection,
shared.BenchmarkMsgCollectionFailed,
)
return nil, utils.WrapError(err, utils.ErrorTypeProcessing, utils.CodeProcessingCollection, "benchmark file collection failed")
}
duration := time.Since(startTime)
@@ -142,29 +91,40 @@ func FileCollectionBenchmark(sourceDir string, numFiles int) (*Result, error) {
}
}
result := buildBenchmarkResult("FileCollection", files, totalBytes, duration, memBefore, memAfter)
result := &BenchmarkResult{
Name: "FileCollection",
Duration: duration,
FilesProcessed: len(files),
BytesProcessed: totalBytes,
FilesPerSecond: float64(len(files)) / duration.Seconds(),
BytesPerSecond: float64(totalBytes) / duration.Seconds(),
MemoryUsage: MemoryStats{
AllocMB: float64(memAfter.Alloc-memBefore.Alloc) / 1024 / 1024,
SysMB: float64(memAfter.Sys-memBefore.Sys) / 1024 / 1024,
NumGC: memAfter.NumGC - memBefore.NumGC,
PauseTotalNs: memAfter.PauseTotalNs - memBefore.PauseTotalNs,
},
CPUUsage: CPUStats{
Goroutines: runtime.NumGoroutine(),
},
}
return result, nil
}
// FileProcessingBenchmark benchmarks full file processing pipeline.
func FileProcessingBenchmark(sourceDir string, format string, concurrency int) (*Result, error) {
func FileProcessingBenchmark(sourceDir string, format string, concurrency int) (*BenchmarkResult, error) {
// Load configuration to ensure proper file filtering
config.LoadConfig()
var cleanup func()
if sourceDir == "" {
// Create temporary directory with test files
tempDir, cleanupFunc, err := createBenchmarkFiles(shared.BenchmarkDefaultFileCount)
tempDir, cleanupFunc, err := createBenchmarkFiles(100)
if err != nil {
return nil, shared.WrapError(
err,
shared.ErrorTypeFileSystem,
shared.CodeFSAccess,
shared.BenchmarkMsgFailedToCreateFiles,
)
return nil, utils.WrapError(err, utils.ErrorTypeFileSystem, utils.CodeFSAccess, "failed to create benchmark files")
}
cleanup = cleanupFunc
//nolint:errcheck // Benchmark output, errors don't affect results
defer cleanup()
sourceDir = tempDir
}
@@ -172,21 +132,16 @@ func FileProcessingBenchmark(sourceDir string, format string, concurrency int) (
// Create temporary output file
outputFile, err := os.CreateTemp("", "benchmark_output_*."+format)
if err != nil {
return nil, shared.WrapError(
err,
shared.ErrorTypeIO,
shared.CodeIOFileCreate,
"failed to create benchmark output file",
)
return nil, utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIOFileCreate, "failed to create benchmark output file")
}
defer func() {
if err := outputFile.Close(); err != nil {
//nolint:errcheck // Warning message in defer, failure doesn't affect benchmark
_, _ = fmt.Printf("Warning: failed to close benchmark output file: %v\n", err)
// Log error but don't fail the benchmark
fmt.Printf("Warning: failed to close benchmark output file: %v\n", err)
}
if err := os.Remove(outputFile.Name()); err != nil {
//nolint:errcheck // Warning message in defer, failure doesn't affect benchmark
_, _ = fmt.Printf("Warning: failed to remove benchmark output file: %v\n", err)
// Log error but don't fail the benchmark
fmt.Printf("Warning: failed to remove benchmark output file: %v\n", err)
}
}()
@@ -199,23 +154,13 @@ func FileProcessingBenchmark(sourceDir string, format string, concurrency int) (
// Run the full processing pipeline
files, err := fileproc.CollectFiles(sourceDir)
if err != nil {
return nil, shared.WrapError(
err,
shared.ErrorTypeProcessing,
shared.CodeProcessingCollection,
shared.BenchmarkMsgCollectionFailed,
)
return nil, utils.WrapError(err, utils.ErrorTypeProcessing, utils.CodeProcessingCollection, "benchmark file collection failed")
}
// Process files with concurrency
err = runProcessingPipeline(context.Background(), files, outputFile, format, concurrency, sourceDir)
if err != nil {
return nil, shared.WrapError(
err,
shared.ErrorTypeProcessing,
shared.CodeProcessingFileRead,
"benchmark processing pipeline failed",
)
return nil, utils.WrapError(err, utils.ErrorTypeProcessing, utils.CodeProcessingFileRead, "benchmark processing pipeline failed")
}
duration := time.Since(startTime)
@@ -232,28 +177,38 @@ func FileProcessingBenchmark(sourceDir string, format string, concurrency int) (
}
}
benchmarkName := fmt.Sprintf("FileProcessing_%s_c%d", format, concurrency)
result := buildBenchmarkResult(benchmarkName, files, totalBytes, duration, memBefore, memAfter)
result := &BenchmarkResult{
Name: fmt.Sprintf("FileProcessing_%s_c%d", format, concurrency),
Duration: duration,
FilesProcessed: len(files),
BytesProcessed: totalBytes,
FilesPerSecond: float64(len(files)) / duration.Seconds(),
BytesPerSecond: float64(totalBytes) / duration.Seconds(),
MemoryUsage: MemoryStats{
AllocMB: float64(memAfter.Alloc-memBefore.Alloc) / 1024 / 1024,
SysMB: float64(memAfter.Sys-memBefore.Sys) / 1024 / 1024,
NumGC: memAfter.NumGC - memBefore.NumGC,
PauseTotalNs: memAfter.PauseTotalNs - memBefore.PauseTotalNs,
},
CPUUsage: CPUStats{
Goroutines: runtime.NumGoroutine(),
},
}
return result, nil
}
// ConcurrencyBenchmark benchmarks different concurrency levels.
func ConcurrencyBenchmark(sourceDir string, format string, concurrencyLevels []int) (*Suite, error) {
suite := &Suite{
func ConcurrencyBenchmark(sourceDir string, format string, concurrencyLevels []int) (*BenchmarkSuite, error) {
suite := &BenchmarkSuite{
Name: "ConcurrencyBenchmark",
Results: make([]Result, 0, len(concurrencyLevels)),
Results: make([]BenchmarkResult, 0, len(concurrencyLevels)),
}
for _, concurrency := range concurrencyLevels {
result, err := FileProcessingBenchmark(sourceDir, format, concurrency)
if err != nil {
return nil, shared.WrapErrorf(
err,
shared.ErrorTypeProcessing,
shared.CodeProcessingCollection,
"concurrency benchmark failed for level %d",
concurrency,
)
return nil, utils.WrapErrorf(err, utils.ErrorTypeProcessing, utils.CodeProcessingCollection, "concurrency benchmark failed for level %d", concurrency)
}
suite.Results = append(suite.Results, *result)
}
@@ -262,22 +217,16 @@ func ConcurrencyBenchmark(sourceDir string, format string, concurrencyLevels []i
}
// FormatBenchmark benchmarks different output formats.
func FormatBenchmark(sourceDir string, formats []string) (*Suite, error) {
suite := &Suite{
func FormatBenchmark(sourceDir string, formats []string) (*BenchmarkSuite, error) {
suite := &BenchmarkSuite{
Name: "FormatBenchmark",
Results: make([]Result, 0, len(formats)),
Results: make([]BenchmarkResult, 0, len(formats)),
}
for _, format := range formats {
result, err := FileProcessingBenchmark(sourceDir, format, runtime.NumCPU())
if err != nil {
return nil, shared.WrapErrorf(
err,
shared.ErrorTypeProcessing,
shared.CodeProcessingCollection,
"format benchmark failed for format %s",
format,
)
return nil, utils.WrapErrorf(err, utils.ErrorTypeProcessing, utils.CodeProcessingCollection, "format benchmark failed for format %s", format)
}
suite.Results = append(suite.Results, *result)
}
@@ -289,18 +238,13 @@ func FormatBenchmark(sourceDir string, formats []string) (*Suite, error) {
func createBenchmarkFiles(numFiles int) (string, func(), error) {
tempDir, err := os.MkdirTemp("", "gibidify_benchmark_*")
if err != nil {
return "", nil, shared.WrapError(
err,
shared.ErrorTypeFileSystem,
shared.CodeFSAccess,
"failed to create temp directory",
)
return "", nil, utils.WrapError(err, utils.ErrorTypeFileSystem, utils.CodeFSAccess, "failed to create temp directory")
}
cleanup := func() {
if err := os.RemoveAll(tempDir); err != nil {
//nolint:errcheck // Warning message in cleanup, failure doesn't affect benchmark
_, _ = fmt.Printf("Warning: failed to remove benchmark temp directory: %v\n", err)
// Log error but don't fail the benchmark
fmt.Printf("Warning: failed to remove benchmark temp directory: %v\n", err)
}
}
@@ -312,16 +256,8 @@ func createBenchmarkFiles(numFiles int) (string, func(), error) {
{".go", "package main\n\nfunc main() {\n\tprintln(\"Hello, World!\")\n}"},
{".js", "console.log('Hello, World!');"},
{".py", "print('Hello, World!')"},
{
".java",
"public class Hello {\n\tpublic static void main(String[] args) {\n\t" +
"\tSystem.out.println(\"Hello, World!\");\n\t}\n}",
},
{
".cpp",
"#include <iostream>\n\n" +
"int main() {\n\tstd::cout << \"Hello, World!\" << std::endl;\n\treturn 0;\n}",
},
{".java", "public class Hello {\n\tpublic static void main(String[] args) {\n\t\tSystem.out.println(\"Hello, World!\");\n\t}\n}"},
{".cpp", "#include <iostream>\n\nint main() {\n\tstd::cout << \"Hello, World!\" << std::endl;\n\treturn 0;\n}"},
{".rs", "fn main() {\n\tprintln!(\"Hello, World!\");\n}"},
{".rb", "puts 'Hello, World!'"},
{".php", "<?php\necho 'Hello, World!';\n?>"},
@@ -336,15 +272,9 @@ func createBenchmarkFiles(numFiles int) (string, func(), error) {
// Create subdirectories for some files
if i%10 == 0 {
subdir := filepath.Join(tempDir, fmt.Sprintf("subdir_%d", i/10))
if err := os.MkdirAll(subdir, 0o750); err != nil {
if err := os.MkdirAll(subdir, 0o755); err != nil {
cleanup()
return "", nil, shared.WrapError(
err,
shared.ErrorTypeFileSystem,
shared.CodeFSAccess,
"failed to create subdirectory",
)
return "", nil, utils.WrapError(err, utils.ErrorTypeFileSystem, utils.CodeFSAccess, "failed to create subdirectory")
}
filename = filepath.Join(subdir, filename)
} else {
@@ -357,12 +287,9 @@ func createBenchmarkFiles(numFiles int) (string, func(), error) {
content += fmt.Sprintf("// Line %d\n%s\n", j, fileType.content)
}
if err := os.WriteFile(filename, []byte(content), 0o600); err != nil {
if err := os.WriteFile(filename, []byte(content), 0o644); err != nil {
cleanup()
return "", nil, shared.WrapError(
err, shared.ErrorTypeIO, shared.CodeIOFileWrite, "failed to write benchmark file",
)
return "", nil, utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIOFileWrite, "failed to write benchmark file")
}
}
@@ -370,19 +297,7 @@ func createBenchmarkFiles(numFiles int) (string, func(), error) {
}
// runProcessingPipeline runs the processing pipeline similar to main.go.
func runProcessingPipeline(
ctx context.Context,
files []string,
outputFile *os.File,
format string,
concurrency int,
sourceDir string,
) error {
// Guard against invalid concurrency to prevent deadlocks
if concurrency < 1 {
concurrency = 1
}
func runProcessingPipeline(ctx context.Context, files []string, outputFile *os.File, format string, concurrency int, sourceDir string) error {
fileCh := make(chan string, concurrency)
writeCh := make(chan fileproc.WriteRequest, concurrency)
writerDone := make(chan struct{})
@@ -391,14 +306,9 @@ func runProcessingPipeline(
go fileproc.StartWriter(outputFile, writeCh, writerDone, format, "", "")
// Get absolute path once
absRoot, err := shared.AbsolutePath(sourceDir)
absRoot, err := utils.GetAbsolutePath(sourceDir)
if err != nil {
return shared.WrapError(
err,
shared.ErrorTypeFileSystem,
shared.CodeFSPathResolution,
"failed to get absolute path for source directory",
)
return utils.WrapError(err, utils.ErrorTypeFileSystem, utils.CodeFSPathResolution, "failed to get absolute path for source directory")
}
// Start workers with proper synchronization
@@ -421,8 +331,7 @@ func runProcessingPipeline(
workersDone.Wait() // Wait for workers to finish
close(writeCh)
<-writerDone
return fmt.Errorf("context canceled: %w", ctx.Err())
return ctx.Err()
case fileCh <- file:
}
}
@@ -438,98 +347,59 @@ func runProcessingPipeline(
return nil
}
// PrintResult prints a formatted benchmark result.
func PrintResult(result *Result) {
printBenchmarkLine := func(format string, args ...any) {
if _, err := fmt.Printf(format, args...); err != nil {
// Stdout write errors are rare (broken pipe, etc.) - log but continue
shared.LogError("failed to write benchmark output", err)
}
}
printBenchmarkLine(shared.BenchmarkFmtSectionHeader, result.Name)
printBenchmarkLine("Duration: %v\n", result.Duration)
printBenchmarkLine("Files Processed: %d\n", result.FilesProcessed)
printBenchmarkLine("Bytes Processed: %d (%.2f MB)\n", result.BytesProcessed,
float64(result.BytesProcessed)/float64(shared.BytesPerMB))
printBenchmarkLine("Files/sec: %.2f\n", result.FilesPerSecond)
printBenchmarkLine("Bytes/sec: %.2f MB/sec\n", result.BytesPerSecond/float64(shared.BytesPerMB))
printBenchmarkLine(
"Memory Usage: +%.2f MB (Sys: +%.2f MB)\n",
result.MemoryUsage.AllocMB,
result.MemoryUsage.SysMB,
)
//nolint:errcheck // Overflow unlikely for pause duration, result output only
pauseDuration, _ := shared.SafeUint64ToInt64(result.MemoryUsage.PauseTotalNs)
printBenchmarkLine("GC Runs: %d (Pause: %v)\n", result.MemoryUsage.NumGC, time.Duration(pauseDuration))
printBenchmarkLine("Goroutines: %d\n", result.CPUUsage.Goroutines)
printBenchmarkLine("\n")
// PrintBenchmarkResult prints a formatted benchmark result.
func PrintBenchmarkResult(result *BenchmarkResult) {
fmt.Printf("=== %s ===\n", result.Name)
fmt.Printf("Duration: %v\n", result.Duration)
fmt.Printf("Files Processed: %d\n", result.FilesProcessed)
fmt.Printf("Bytes Processed: %d (%.2f MB)\n", result.BytesProcessed, float64(result.BytesProcessed)/1024/1024)
fmt.Printf("Files/sec: %.2f\n", result.FilesPerSecond)
fmt.Printf("Bytes/sec: %.2f MB/sec\n", result.BytesPerSecond/1024/1024)
fmt.Printf("Memory Usage: +%.2f MB (Sys: +%.2f MB)\n", result.MemoryUsage.AllocMB, result.MemoryUsage.SysMB)
fmt.Printf("GC Runs: %d (Pause: %v)\n", result.MemoryUsage.NumGC, time.Duration(result.MemoryUsage.PauseTotalNs))
fmt.Printf("Goroutines: %d\n", result.CPUUsage.Goroutines)
fmt.Println()
}
// PrintSuite prints all results in a benchmark suite.
func PrintSuite(suite *Suite) {
if _, err := fmt.Printf(shared.BenchmarkFmtSectionHeader, suite.Name); err != nil {
shared.LogError("failed to write benchmark suite header", err)
}
// Iterate by index to avoid taking address of range variable
for i := range suite.Results {
PrintResult(&suite.Results[i])
// PrintBenchmarkSuite prints all results in a benchmark suite.
func PrintBenchmarkSuite(suite *BenchmarkSuite) {
fmt.Printf("=== %s ===\n", suite.Name)
for _, result := range suite.Results {
PrintBenchmarkResult(&result)
}
}
// RunAllBenchmarks runs a comprehensive benchmark suite.
func RunAllBenchmarks(sourceDir string) error {
printBenchmark := func(msg string) {
if _, err := fmt.Println(msg); err != nil {
shared.LogError("failed to write benchmark message", err)
}
}
printBenchmark("Running gibidify benchmark suite...")
fmt.Println("Running gibidify benchmark suite...")
// Load configuration
config.LoadConfig()
// File collection benchmark
printBenchmark(shared.BenchmarkMsgRunningCollection)
result, err := FileCollectionBenchmark(sourceDir, shared.BenchmarkDefaultFileCount)
fmt.Println("Running file collection benchmark...")
result, err := FileCollectionBenchmark(sourceDir, 1000)
if err != nil {
return shared.WrapError(
err,
shared.ErrorTypeProcessing,
shared.CodeProcessingCollection,
shared.BenchmarkMsgFileCollectionFailed,
)
return utils.WrapError(err, utils.ErrorTypeProcessing, utils.CodeProcessingCollection, "file collection benchmark failed")
}
PrintResult(result)
PrintBenchmarkResult(result)
// Format benchmarks
printBenchmark("Running format benchmarks...")
formats := []string{shared.FormatJSON, shared.FormatYAML, shared.FormatMarkdown}
formatSuite, err := FormatBenchmark(sourceDir, formats)
fmt.Println("Running format benchmarks...")
formatSuite, err := FormatBenchmark(sourceDir, []string{"json", "yaml", "markdown"})
if err != nil {
return shared.WrapError(
err,
shared.ErrorTypeProcessing,
shared.CodeProcessingCollection,
shared.BenchmarkMsgFormatFailed,
)
return utils.WrapError(err, utils.ErrorTypeProcessing, utils.CodeProcessingCollection, "format benchmark failed")
}
PrintSuite(formatSuite)
PrintBenchmarkSuite(formatSuite)
// Concurrency benchmarks
printBenchmark("Running concurrency benchmarks...")
fmt.Println("Running concurrency benchmarks...")
concurrencyLevels := []int{1, 2, 4, 8, runtime.NumCPU()}
concurrencySuite, err := ConcurrencyBenchmark(sourceDir, shared.FormatJSON, concurrencyLevels)
concurrencySuite, err := ConcurrencyBenchmark(sourceDir, "json", concurrencyLevels)
if err != nil {
return shared.WrapError(
err,
shared.ErrorTypeProcessing,
shared.CodeProcessingCollection,
shared.BenchmarkMsgConcurrencyFailed,
)
return utils.WrapError(err, utils.ErrorTypeProcessing, utils.CodeProcessingCollection, "concurrency benchmark failed")
}
PrintSuite(concurrencySuite)
PrintBenchmarkSuite(concurrencySuite)
return nil
}

View File

@@ -1,54 +1,10 @@
package benchmark
import (
"bytes"
"io"
"os"
"path/filepath"
"runtime"
"strings"
"testing"
"time"
"github.com/ivuorinen/gibidify/shared"
)
// capturedOutput captures stdout output from a function call.
func capturedOutput(t *testing.T, fn func()) string {
t.Helper()
original := os.Stdout
r, w, err := os.Pipe()
if err != nil {
t.Fatalf(shared.TestMsgFailedToCreatePipe, err)
}
defer r.Close()
defer func() { os.Stdout = original }()
os.Stdout = w
fn()
if err := w.Close(); err != nil {
t.Logf(shared.TestMsgFailedToClose, err)
}
var buf bytes.Buffer
if _, err := io.Copy(&buf, r); err != nil {
t.Fatalf(shared.TestMsgFailedToReadOutput, err)
}
return buf.String()
}
// verifyOutputContains checks if output contains all expected strings.
func verifyOutputContains(t *testing.T, testName, output string, expected []string) {
t.Helper()
for _, check := range expected {
if !strings.Contains(output, check) {
t.Errorf("Test %s: output missing expected content: %q\nFull output:\n%s", testName, check, output)
}
}
}
// TestFileCollectionBenchmark tests the file collection benchmark.
func TestFileCollectionBenchmark(t *testing.T) {
result, err := FileCollectionBenchmark("", 10)
@@ -66,7 +22,7 @@ func TestFileCollectionBenchmark(t *testing.T) {
t.Logf("Bytes processed: %d", result.BytesProcessed)
if result.FilesProcessed <= 0 {
t.Errorf(shared.TestFmtExpectedFilesProcessed, result.FilesProcessed)
t.Errorf("Expected files processed > 0, got %d", result.FilesProcessed)
}
if result.Duration <= 0 {
@@ -82,7 +38,7 @@ func TestFileProcessingBenchmark(t *testing.T) {
}
if result.FilesProcessed <= 0 {
t.Errorf(shared.TestFmtExpectedFilesProcessed, result.FilesProcessed)
t.Errorf("Expected files processed > 0, got %d", result.FilesProcessed)
}
if result.Duration <= 0 {
@@ -103,12 +59,12 @@ func TestConcurrencyBenchmark(t *testing.T) {
}
if len(suite.Results) != len(concurrencyLevels) {
t.Errorf(shared.TestFmtExpectedResults, len(concurrencyLevels), len(suite.Results))
t.Errorf("Expected %d results, got %d", len(concurrencyLevels), len(suite.Results))
}
for i, result := range suite.Results {
if result.FilesProcessed <= 0 {
t.Errorf("Result %d: "+shared.TestFmtExpectedFilesProcessed, i, result.FilesProcessed)
t.Errorf("Result %d: Expected files processed > 0, got %d", i, result.FilesProcessed)
}
}
}
@@ -126,12 +82,12 @@ func TestFormatBenchmark(t *testing.T) {
}
if len(suite.Results) != len(formats) {
t.Errorf(shared.TestFmtExpectedResults, len(formats), len(suite.Results))
t.Errorf("Expected %d results, got %d", len(formats), len(suite.Results))
}
for i, result := range suite.Results {
if result.FilesProcessed <= 0 {
t.Errorf("Result %d: "+shared.TestFmtExpectedFilesProcessed, i, result.FilesProcessed)
t.Errorf("Result %d: Expected files processed > 0, got %d", i, result.FilesProcessed)
}
}
}
@@ -160,7 +116,7 @@ func BenchmarkFileCollection(b *testing.B) {
b.Fatalf("FileCollectionBenchmark failed: %v", err)
}
if result.FilesProcessed <= 0 {
b.Errorf(shared.TestFmtExpectedFilesProcessed, result.FilesProcessed)
b.Errorf("Expected files processed > 0, got %d", result.FilesProcessed)
}
}
}
@@ -173,7 +129,7 @@ func BenchmarkFileProcessing(b *testing.B) {
b.Fatalf("FileProcessingBenchmark failed: %v", err)
}
if result.FilesProcessed <= 0 {
b.Errorf(shared.TestFmtExpectedFilesProcessed, result.FilesProcessed)
b.Errorf("Expected files processed > 0, got %d", result.FilesProcessed)
}
}
}
@@ -188,7 +144,7 @@ func BenchmarkConcurrency(b *testing.B) {
b.Fatalf("ConcurrencyBenchmark failed: %v", err)
}
if len(suite.Results) != len(concurrencyLevels) {
b.Errorf(shared.TestFmtExpectedResults, len(concurrencyLevels), len(suite.Results))
b.Errorf("Expected %d results, got %d", len(concurrencyLevels), len(suite.Results))
}
}
}
@@ -203,315 +159,7 @@ func BenchmarkFormats(b *testing.B) {
b.Fatalf("FormatBenchmark failed: %v", err)
}
if len(suite.Results) != len(formats) {
b.Errorf(shared.TestFmtExpectedResults, len(formats), len(suite.Results))
b.Errorf("Expected %d results, got %d", len(formats), len(suite.Results))
}
}
}
// TestPrintResult tests the PrintResult function.
func TestPrintResult(t *testing.T) {
// Create a test result
result := &Result{
Name: "Test Benchmark",
Duration: 1 * time.Second,
FilesProcessed: 100,
BytesProcessed: 2048000, // ~2MB for easy calculation
}
// Capture stdout
original := os.Stdout
r, w, err := os.Pipe()
if err != nil {
t.Fatalf(shared.TestMsgFailedToCreatePipe, err)
}
defer r.Close()
defer func() { os.Stdout = original }()
os.Stdout = w
// Call PrintResult
PrintResult(result)
// Close writer and read captured output
if err := w.Close(); err != nil {
t.Logf(shared.TestMsgFailedToClose, err)
}
var buf bytes.Buffer
if _, err := io.Copy(&buf, r); err != nil {
t.Fatalf(shared.TestMsgFailedToReadOutput, err)
}
output := buf.String()
// Verify expected content
expectedContents := []string{
"=== Test Benchmark ===",
"Duration: 1s",
"Files Processed: 100",
"Bytes Processed: 2048000",
"1.95 MB", // 2048000 / 1024 / 1024 ≈ 1.95
}
for _, expected := range expectedContents {
if !strings.Contains(output, expected) {
t.Errorf("PrintResult output missing expected content: %q\nFull output:\n%s", expected, output)
}
}
}
// TestPrintSuite tests the PrintSuite function.
func TestPrintSuite(t *testing.T) {
// Create a test suite with multiple results
suite := &Suite{
Name: "Test Suite",
Results: []Result{
{
Name: "Benchmark 1",
Duration: 500 * time.Millisecond,
FilesProcessed: 50,
BytesProcessed: 1024000, // 1MB
},
{
Name: "Benchmark 2",
Duration: 750 * time.Millisecond,
FilesProcessed: 75,
BytesProcessed: 1536000, // 1.5MB
},
},
}
// Capture stdout
original := os.Stdout
r, w, err := os.Pipe()
if err != nil {
t.Fatalf(shared.TestMsgFailedToCreatePipe, err)
}
defer r.Close()
defer func() { os.Stdout = original }()
os.Stdout = w
// Call PrintSuite
PrintSuite(suite)
// Close writer and read captured output
if err := w.Close(); err != nil {
t.Logf(shared.TestMsgFailedToClose, err)
}
var buf bytes.Buffer
if _, err := io.Copy(&buf, r); err != nil {
t.Fatalf(shared.TestMsgFailedToReadOutput, err)
}
output := buf.String()
// Verify expected content
expectedContents := []string{
"=== Test Suite ===",
"=== Benchmark 1 ===",
"Duration: 500ms",
"Files Processed: 50",
"=== Benchmark 2 ===",
"Duration: 750ms",
"Files Processed: 75",
}
for _, expected := range expectedContents {
if !strings.Contains(output, expected) {
t.Errorf("PrintSuite output missing expected content: %q\nFull output:\n%s", expected, output)
}
}
// Verify both results are printed
benchmark1Count := strings.Count(output, "=== Benchmark 1 ===")
benchmark2Count := strings.Count(output, "=== Benchmark 2 ===")
if benchmark1Count != 1 {
t.Errorf("Expected exactly 1 occurrence of 'Benchmark 1', got %d", benchmark1Count)
}
if benchmark2Count != 1 {
t.Errorf("Expected exactly 1 occurrence of 'Benchmark 2', got %d", benchmark2Count)
}
}
// TestPrintResultEdgeCases tests edge cases for PrintResult.
func TestPrintResultEdgeCases(t *testing.T) {
tests := []struct {
name string
result *Result
checks []string
}{
{
name: "zero values",
result: &Result{
Name: "Zero Benchmark",
Duration: 0,
FilesProcessed: 0,
BytesProcessed: 0,
},
checks: []string{
"=== Zero Benchmark ===",
"Duration: 0s",
"Files Processed: 0",
"Bytes Processed: 0",
"0.00 MB",
},
},
{
name: "large values",
result: &Result{
Name: "Large Benchmark",
Duration: 1 * time.Hour,
FilesProcessed: 1000000,
BytesProcessed: 1073741824, // 1GB
},
checks: []string{
"=== Large Benchmark ===",
"Duration: 1h0m0s",
"Files Processed: 1000000",
"Bytes Processed: 1073741824",
"1024.00 MB",
},
},
{
name: "empty name",
result: &Result{
Name: "",
Duration: 100 * time.Millisecond,
FilesProcessed: 10,
BytesProcessed: 1024,
},
checks: []string{
"=== ===", // Empty name between === markers
"Duration: 100ms",
"Files Processed: 10",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := tt.result
output := capturedOutput(t, func() { PrintResult(result) })
verifyOutputContains(t, tt.name, output, tt.checks)
})
}
}
// TestPrintSuiteEdgeCases tests edge cases for PrintSuite.
func TestPrintSuiteEdgeCases(t *testing.T) {
tests := []struct {
name string
suite *Suite
checks []string
}{
{
name: "empty suite",
suite: &Suite{
Name: "Empty Suite",
Results: []Result{},
},
checks: []string{
"=== Empty Suite ===",
},
},
{
name: "suite with empty name",
suite: &Suite{
Name: "",
Results: []Result{
{
Name: "Single Benchmark",
Duration: 200 * time.Millisecond,
FilesProcessed: 20,
BytesProcessed: 2048,
},
},
},
checks: []string{
"=== ===", // Empty name
"=== Single Benchmark ===",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
suite := tt.suite
output := capturedOutput(t, func() { PrintSuite(suite) })
verifyOutputContains(t, tt.name, output, tt.checks)
})
}
}
// TestRunAllBenchmarks tests the RunAllBenchmarks function.
func TestRunAllBenchmarks(t *testing.T) {
// Create a temporary directory with some test files
srcDir := t.TempDir()
// Create a few test files
testFiles := []struct {
name string
content string
}{
{shared.TestFileMainGo, "package main\nfunc main() {}"},
{shared.TestFile2Name, "Hello World"},
{shared.TestFile3Name, "# Test Markdown"},
}
for _, file := range testFiles {
filePath := filepath.Join(srcDir, file.name)
err := os.WriteFile(filePath, []byte(file.content), 0o644)
if err != nil {
t.Fatalf("Failed to create test file %s: %v", file.name, err)
}
}
// Capture stdout to verify output
original := os.Stdout
r, w, pipeErr := os.Pipe()
if pipeErr != nil {
t.Fatalf(shared.TestMsgFailedToCreatePipe, pipeErr)
}
defer func() {
if err := r.Close(); err != nil {
t.Logf("Failed to close pipe reader: %v", err)
}
}()
defer func() { os.Stdout = original }()
os.Stdout = w
// Call RunAllBenchmarks
err := RunAllBenchmarks(srcDir)
// Close writer and read captured output
if closeErr := w.Close(); closeErr != nil {
t.Logf(shared.TestMsgFailedToClose, closeErr)
}
var buf bytes.Buffer
if _, copyErr := io.Copy(&buf, r); copyErr != nil {
t.Fatalf(shared.TestMsgFailedToReadOutput, copyErr)
}
output := buf.String()
// Check for error
if err != nil {
t.Errorf("RunAllBenchmarks failed: %v", err)
}
// Verify expected output content
expectedContents := []string{
"Running gibidify benchmark suite...",
"Running file collection benchmark...",
"Running format benchmarks...",
"Running concurrency benchmarks...",
}
for _, expected := range expectedContents {
if !strings.Contains(output, expected) {
t.Errorf("RunAllBenchmarks output missing expected content: %q\nFull output:\n%s", expected, output)
}
}
// The function should not panic and should complete successfully
t.Log("RunAllBenchmarks completed successfully with output captured")
}

View File

@@ -1,4 +1,3 @@
// Package cli provides command-line interface functionality for gibidify.
package cli
import (
@@ -7,11 +6,10 @@ import (
"path/filepath"
"strings"
"github.com/ivuorinen/gibidify/shared"
"github.com/ivuorinen/gibidify/utils"
)
// ErrorFormatter handles CLI-friendly error formatting with suggestions.
// This is not an error type itself; it formats existing errors for display.
type ErrorFormatter struct {
ui *UIManager
}
@@ -28,10 +26,8 @@ func (ef *ErrorFormatter) FormatError(err error) {
}
// Handle structured errors
structErr := &shared.StructuredError{}
if errors.As(err, &structErr) {
if structErr, ok := err.(*utils.StructuredError); ok {
ef.formatStructuredError(structErr)
return
}
@@ -40,12 +36,12 @@ func (ef *ErrorFormatter) FormatError(err error) {
}
// formatStructuredError formats a structured error with context and suggestions.
func (ef *ErrorFormatter) formatStructuredError(err *shared.StructuredError) {
func (ef *ErrorFormatter) formatStructuredError(err *utils.StructuredError) {
// Print main error
ef.ui.PrintError(shared.CLIMsgErrorFormat, err.Message)
ef.ui.PrintError("Error: %s", err.Message)
// Print error type and code
if err.Type != shared.ErrorTypeUnknown || err.Code != "" {
if err.Type != utils.ErrorTypeUnknown || err.Code != "" {
ef.ui.PrintInfo("Type: %s, Code: %s", err.Type.String(), err.Code)
}
@@ -68,20 +64,20 @@ func (ef *ErrorFormatter) formatStructuredError(err *shared.StructuredError) {
// formatGenericError formats a generic error.
func (ef *ErrorFormatter) formatGenericError(err error) {
ef.ui.PrintError(shared.CLIMsgErrorFormat, err.Error())
ef.ui.PrintError("Error: %s", err.Error())
ef.provideGenericSuggestions(err)
}
// provideSuggestions provides helpful suggestions based on the error.
func (ef *ErrorFormatter) provideSuggestions(err *shared.StructuredError) {
func (ef *ErrorFormatter) provideSuggestions(err *utils.StructuredError) {
switch err.Type {
case shared.ErrorTypeFileSystem:
case utils.ErrorTypeFileSystem:
ef.provideFileSystemSuggestions(err)
case shared.ErrorTypeValidation:
case utils.ErrorTypeValidation:
ef.provideValidationSuggestions(err)
case shared.ErrorTypeProcessing:
case utils.ErrorTypeProcessing:
ef.provideProcessingSuggestions(err)
case shared.ErrorTypeIO:
case utils.ErrorTypeIO:
ef.provideIOSuggestions(err)
default:
ef.provideDefaultSuggestions()
@@ -89,17 +85,17 @@ func (ef *ErrorFormatter) provideSuggestions(err *shared.StructuredError) {
}
// provideFileSystemSuggestions provides suggestions for file system errors.
func (ef *ErrorFormatter) provideFileSystemSuggestions(err *shared.StructuredError) {
func (ef *ErrorFormatter) provideFileSystemSuggestions(err *utils.StructuredError) {
filePath := err.FilePath
ef.ui.PrintWarning(shared.CLIMsgSuggestions)
ef.ui.PrintWarning("Suggestions:")
switch err.Code {
case shared.CodeFSAccess:
case utils.CodeFSAccess:
ef.suggestFileAccess(filePath)
case shared.CodeFSPathResolution:
case utils.CodeFSPathResolution:
ef.suggestPathResolution(filePath)
case shared.CodeFSNotFound:
case utils.CodeFSNotFound:
ef.suggestFileNotFound(filePath)
default:
ef.suggestFileSystemGeneral(filePath)
@@ -107,31 +103,31 @@ func (ef *ErrorFormatter) provideFileSystemSuggestions(err *shared.StructuredErr
}
// provideValidationSuggestions provides suggestions for validation errors.
func (ef *ErrorFormatter) provideValidationSuggestions(err *shared.StructuredError) {
ef.ui.PrintWarning(shared.CLIMsgSuggestions)
func (ef *ErrorFormatter) provideValidationSuggestions(err *utils.StructuredError) {
ef.ui.PrintWarning("Suggestions:")
switch err.Code {
case shared.CodeValidationFormat:
case utils.CodeValidationFormat:
ef.ui.printf(" • Use a supported format: markdown, json, yaml\n")
ef.ui.printf(" • Example: -format markdown\n")
case shared.CodeValidationSize:
case utils.CodeValidationSize:
ef.ui.printf(" • Increase file size limit in config.yaml\n")
ef.ui.printf(" • Use smaller files or exclude large files\n")
default:
ef.ui.printf(shared.CLIMsgCheckCommandLineArgs)
ef.ui.printf(shared.CLIMsgRunWithHelp)
ef.ui.printf(" • Check your command line arguments\n")
ef.ui.printf(" • Run with --help for usage information\n")
}
}
// provideProcessingSuggestions provides suggestions for processing errors.
func (ef *ErrorFormatter) provideProcessingSuggestions(err *shared.StructuredError) {
ef.ui.PrintWarning(shared.CLIMsgSuggestions)
func (ef *ErrorFormatter) provideProcessingSuggestions(err *utils.StructuredError) {
ef.ui.PrintWarning("Suggestions:")
switch err.Code {
case shared.CodeProcessingCollection:
case utils.CodeProcessingCollection:
ef.ui.printf(" • Check if the source directory exists and is readable\n")
ef.ui.printf(" • Verify directory permissions\n")
case shared.CodeProcessingFileRead:
case utils.CodeProcessingFileRead:
ef.ui.printf(" • Check file permissions\n")
ef.ui.printf(" • Verify the file is not corrupted\n")
default:
@@ -141,24 +137,24 @@ func (ef *ErrorFormatter) provideProcessingSuggestions(err *shared.StructuredErr
}
// provideIOSuggestions provides suggestions for I/O errors.
func (ef *ErrorFormatter) provideIOSuggestions(err *shared.StructuredError) {
ef.ui.PrintWarning(shared.CLIMsgSuggestions)
func (ef *ErrorFormatter) provideIOSuggestions(err *utils.StructuredError) {
ef.ui.PrintWarning("Suggestions:")
switch err.Code {
case shared.CodeIOFileCreate:
case utils.CodeIOFileCreate:
ef.ui.printf(" • Check if the destination directory exists\n")
ef.ui.printf(" • Verify write permissions for the output file\n")
ef.ui.printf(" • Ensure sufficient disk space\n")
case shared.CodeIOWrite:
case utils.CodeIOWrite:
ef.ui.printf(" • Check available disk space\n")
ef.ui.printf(" • Verify write permissions\n")
default:
ef.ui.printf(shared.CLIMsgCheckFilePermissions)
ef.ui.printf(" • Check file/directory permissions\n")
ef.ui.printf(" • Verify available disk space\n")
}
}
// Helper methods for specific suggestions.
// Helper methods for specific suggestions
func (ef *ErrorFormatter) suggestFileAccess(filePath string) {
ef.ui.printf(" • Check if the path exists: %s\n", filePath)
ef.ui.printf(" • Verify read permissions\n")
@@ -181,16 +177,9 @@ func (ef *ErrorFormatter) suggestPathResolution(filePath string) {
func (ef *ErrorFormatter) suggestFileNotFound(filePath string) {
ef.ui.printf(" • Check if the file/directory exists: %s\n", filePath)
if filePath == "" {
return
}
if filePath != "" {
dir := filepath.Dir(filePath)
entries, err := os.ReadDir(dir)
if err != nil {
return
}
if entries, err := os.ReadDir(dir); err == nil {
ef.ui.printf(" • Similar files in %s:\n", dir)
count := 0
for _, entry := range entries {
@@ -202,10 +191,12 @@ func (ef *ErrorFormatter) suggestFileNotFound(filePath string) {
count++
}
}
}
}
}
func (ef *ErrorFormatter) suggestFileSystemGeneral(filePath string) {
ef.ui.printf(shared.CLIMsgCheckFilePermissions)
ef.ui.printf(" • Check file/directory permissions\n")
ef.ui.printf(" • Verify the path is correct\n")
if filePath != "" {
ef.ui.printf(" • Path: %s\n", filePath)
@@ -214,8 +205,8 @@ func (ef *ErrorFormatter) suggestFileSystemGeneral(filePath string) {
// provideDefaultSuggestions provides general suggestions.
func (ef *ErrorFormatter) provideDefaultSuggestions() {
ef.ui.printf(shared.CLIMsgCheckCommandLineArgs)
ef.ui.printf(shared.CLIMsgRunWithHelp)
ef.ui.printf(" • Check your command line arguments\n")
ef.ui.printf(" • Run with --help for usage information\n")
ef.ui.printf(" • Try with -concurrency 1 to reduce resource usage\n")
}
@@ -223,12 +214,12 @@ func (ef *ErrorFormatter) provideDefaultSuggestions() {
func (ef *ErrorFormatter) provideGenericSuggestions(err error) {
errorMsg := err.Error()
ef.ui.PrintWarning(shared.CLIMsgSuggestions)
ef.ui.PrintWarning("Suggestions:")
// Pattern matching for common errors
switch {
case strings.Contains(errorMsg, "permission denied"):
ef.ui.printf(shared.CLIMsgCheckFilePermissions)
ef.ui.printf(" • Check file/directory permissions\n")
ef.ui.printf(" • Try running with appropriate privileges\n")
case strings.Contains(errorMsg, "no such file or directory"):
ef.ui.printf(" • Verify the file/directory path is correct\n")
@@ -243,16 +234,16 @@ func (ef *ErrorFormatter) provideGenericSuggestions(err error) {
// CLI-specific error types
// MissingSourceError represents a missing source directory error.
type MissingSourceError struct{}
// CLIMissingSourceError represents a missing source directory error.
type CLIMissingSourceError struct{}
func (e MissingSourceError) Error() string {
func (e CLIMissingSourceError) Error() string {
return "source directory is required"
}
// NewCLIMissingSourceError creates a new CLI missing source error with suggestions.
func NewCLIMissingSourceError() error {
return &MissingSourceError{}
return &CLIMissingSourceError{}
}
// IsUserError checks if an error is a user input error that should be handled gracefully.
@@ -262,17 +253,16 @@ func IsUserError(err error) bool {
}
// Check for specific user error types
var cliErr *MissingSourceError
var cliErr *CLIMissingSourceError
if errors.As(err, &cliErr) {
return true
}
// Check for structured errors that are user-facing
structErr := &shared.StructuredError{}
if errors.As(err, &structErr) {
return structErr.Type == shared.ErrorTypeValidation ||
structErr.Code == shared.CodeValidationFormat ||
structErr.Code == shared.CodeValidationSize
if structErr, ok := err.(*utils.StructuredError); ok {
return structErr.Type == utils.ErrorTypeValidation ||
structErr.Code == utils.CodeValidationFormat ||
structErr.Code == utils.CodeValidationSize
}
// Check error message patterns

View File

@@ -1,744 +0,0 @@
package cli
import (
"bytes"
"errors"
"os"
"path/filepath"
"strings"
"testing"
"github.com/ivuorinen/gibidify/shared"
)
func TestNewErrorFormatter(t *testing.T) {
ui := NewUIManager()
formatter := NewErrorFormatter(ui)
if formatter == nil {
t.Error("NewErrorFormatter() returned nil")
return
}
if formatter.ui != ui {
t.Error("NewErrorFormatter() did not set ui manager correctly")
}
}
func TestErrorFormatterFormatError(t *testing.T) {
tests := []struct {
name string
err error
expectedOutput []string // Substrings that should be present in output
}{
{
name: "nil error",
err: nil,
expectedOutput: []string{}, // Should produce no output
},
{
name: "structured error with context",
err: &shared.StructuredError{
Type: shared.ErrorTypeFileSystem,
Code: shared.CodeFSAccess,
Message: shared.TestErrCannotAccessFile,
FilePath: shared.TestPathBase,
Context: map[string]any{
"permission": "0000",
"owner": "root",
},
},
expectedOutput: []string{
"✗ Error: " + shared.TestErrCannotAccessFile,
"Type: FileSystem, Code: ACCESS_DENIED",
"File: " + shared.TestPathBase,
"Context:",
"permission: 0000",
"owner: root",
shared.TestSuggestionsWarning,
"Check if the path exists",
},
},
{
name: "validation error",
err: &shared.StructuredError{
Type: shared.ErrorTypeValidation,
Code: shared.CodeValidationFormat,
Message: "invalid output format",
},
expectedOutput: []string{
"✗ Error: invalid output format",
"Type: Validation, Code: FORMAT",
shared.TestSuggestionsWarning,
"Use a supported format: markdown, json, yaml",
},
},
{
name: "processing error",
err: &shared.StructuredError{
Type: shared.ErrorTypeProcessing,
Code: shared.CodeProcessingCollection,
Message: "failed to collect files",
},
expectedOutput: []string{
"✗ Error: failed to collect files",
"Type: Processing, Code: COLLECTION",
shared.TestSuggestionsWarning,
"Check if the source directory exists",
},
},
{
name: "I/O error",
err: &shared.StructuredError{
Type: shared.ErrorTypeIO,
Code: shared.CodeIOFileCreate,
Message: "cannot create output file",
},
expectedOutput: []string{
"✗ Error: cannot create output file",
"Type: IO, Code: FILE_CREATE",
shared.TestSuggestionsWarning,
"Check if the destination directory exists",
},
},
{
name: "generic error with permission denied",
err: errors.New("permission denied: access to /secret/file"),
expectedOutput: []string{
"✗ Error: permission denied: access to /secret/file",
shared.TestSuggestionsWarning,
shared.TestSuggestCheckPermissions,
"Try running with appropriate privileges",
},
},
{
name: "generic error with file not found",
err: errors.New("no such file or directory"),
expectedOutput: []string{
"✗ Error: no such file or directory",
shared.TestSuggestionsWarning,
"Verify the file/directory path is correct",
"Check if the file exists",
},
},
{
name: "generic error with flag redefined",
err: errors.New("flag provided but not defined: -invalid"),
expectedOutput: []string{
"✗ Error: flag provided but not defined: -invalid",
shared.TestSuggestionsWarning,
shared.TestSuggestCheckArguments,
"Run with --help for usage information",
},
},
{
name: "unknown generic error",
err: errors.New("some unknown error"),
expectedOutput: []string{
"✗ Error: some unknown error",
shared.TestSuggestionsWarning,
shared.TestSuggestCheckArguments,
"Run with --help for usage information",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Capture output
ui, output := createTestUI()
formatter := NewErrorFormatter(ui)
formatter.FormatError(tt.err)
outputStr := output.String()
// For nil error, output should be empty
if tt.err == nil {
if outputStr != "" {
t.Errorf("Expected no output for nil error, got: %s", outputStr)
}
return
}
// Check that all expected substrings are present
for _, expected := range tt.expectedOutput {
if !strings.Contains(outputStr, expected) {
t.Errorf(shared.TestMsgOutputMissingSubstring, expected, outputStr)
}
}
})
}
}
func TestErrorFormatterSuggestFileAccess(t *testing.T) {
ui, output := createTestUI()
formatter := NewErrorFormatter(ui)
// Create a temporary file to test with existing file
tempDir := t.TempDir()
tempFile, err := os.Create(filepath.Join(tempDir, "testfile"))
if err != nil {
t.Fatalf("Failed to create temp file: %v", err)
}
if err := tempFile.Close(); err != nil {
t.Errorf("Failed to close temp file: %v", err)
}
tests := []struct {
name string
filePath string
expectedOutput []string
}{
{
name: shared.TestErrEmptyFilePath,
filePath: "",
expectedOutput: []string{
shared.TestSuggestCheckExists,
"Verify read permissions",
},
},
{
name: "existing file",
filePath: tempFile.Name(),
expectedOutput: []string{
shared.TestSuggestCheckExists,
"Path exists but may not be accessible",
"Mode:",
},
},
{
name: "nonexistent file",
filePath: "/nonexistent/file",
expectedOutput: []string{
shared.TestSuggestCheckExists,
"Verify read permissions",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
output.Reset()
formatter.suggestFileAccess(tt.filePath)
outputStr := output.String()
for _, expected := range tt.expectedOutput {
if !strings.Contains(outputStr, expected) {
t.Errorf(shared.TestMsgOutputMissingSubstring, expected, outputStr)
}
}
})
}
}
func TestErrorFormatterSuggestFileNotFound(t *testing.T) {
// Create a test directory with some files
tempDir := t.TempDir()
testFiles := []string{"similar-file.txt", "another-similar.go", "different.md"}
for _, filename := range testFiles {
file, err := os.Create(filepath.Join(tempDir, filename))
if err != nil {
t.Fatalf("Failed to create test file %s: %v", filename, err)
}
if err := file.Close(); err != nil {
t.Errorf("Failed to close test file %s: %v", filename, err)
}
}
ui, output := createTestUI()
formatter := NewErrorFormatter(ui)
tests := []struct {
name string
filePath string
expectedOutput []string
}{
{
name: shared.TestErrEmptyFilePath,
filePath: "",
expectedOutput: []string{
shared.TestSuggestCheckFileExists,
},
},
{
name: "file with similar matches",
filePath: tempDir + "/similar",
expectedOutput: []string{
shared.TestSuggestCheckFileExists,
"Similar files in",
"similar-file.txt",
},
},
{
name: "nonexistent directory",
filePath: "/nonexistent/dir/file.txt",
expectedOutput: []string{
shared.TestSuggestCheckFileExists,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
output.Reset()
formatter.suggestFileNotFound(tt.filePath)
outputStr := output.String()
for _, expected := range tt.expectedOutput {
if !strings.Contains(outputStr, expected) {
t.Errorf(shared.TestMsgOutputMissingSubstring, expected, outputStr)
}
}
})
}
}
func TestErrorFormatterProvideSuggestions(t *testing.T) {
ui, output := createTestUI()
formatter := NewErrorFormatter(ui)
tests := []struct {
name string
err *shared.StructuredError
expectSuggestions []string
}{
{
name: "filesystem error",
err: &shared.StructuredError{
Type: shared.ErrorTypeFileSystem,
Code: shared.CodeFSAccess,
},
expectSuggestions: []string{shared.TestSuggestionsPlain, "Check if the path exists"},
},
{
name: "validation error",
err: &shared.StructuredError{
Type: shared.ErrorTypeValidation,
Code: shared.CodeValidationFormat,
},
expectSuggestions: []string{shared.TestSuggestionsPlain, "Use a supported format"},
},
{
name: "processing error",
err: &shared.StructuredError{
Type: shared.ErrorTypeProcessing,
Code: shared.CodeProcessingCollection,
},
expectSuggestions: []string{shared.TestSuggestionsPlain, "Check if the source directory exists"},
},
{
name: "I/O error",
err: &shared.StructuredError{
Type: shared.ErrorTypeIO,
Code: shared.CodeIOWrite,
},
expectSuggestions: []string{shared.TestSuggestionsPlain, "Check available disk space"},
},
{
name: "unknown error type",
err: &shared.StructuredError{
Type: shared.ErrorTypeUnknown,
},
expectSuggestions: []string{"Check your command line arguments"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
output.Reset()
formatter.provideSuggestions(tt.err)
outputStr := output.String()
for _, expected := range tt.expectSuggestions {
if !strings.Contains(outputStr, expected) {
t.Errorf(shared.TestMsgOutputMissingSubstring, expected, outputStr)
}
}
})
}
}
func TestMissingSourceError(t *testing.T) {
err := NewCLIMissingSourceError()
if err == nil {
t.Error("NewCLIMissingSourceError() returned nil")
return
}
expectedMsg := "source directory is required"
if err.Error() != expectedMsg {
t.Errorf("MissingSourceError.Error() = %v, want %v", err.Error(), expectedMsg)
}
// Test type assertion
var cliErr *MissingSourceError
if !errors.As(err, &cliErr) {
t.Error("NewCLIMissingSourceError() did not return *MissingSourceError type")
}
}
func TestIsUserError(t *testing.T) {
tests := []struct {
name string
err error
expected bool
}{
{
name: "nil error",
err: nil,
expected: false,
},
{
name: "CLI missing source error",
err: NewCLIMissingSourceError(),
expected: true,
},
{
name: "validation structured error",
err: &shared.StructuredError{
Type: shared.ErrorTypeValidation,
},
expected: true,
},
{
name: "validation format structured error",
err: &shared.StructuredError{
Code: shared.CodeValidationFormat,
},
expected: true,
},
{
name: "validation size structured error",
err: &shared.StructuredError{
Code: shared.CodeValidationSize,
},
expected: true,
},
{
name: "non-validation structured error",
err: &shared.StructuredError{
Type: shared.ErrorTypeFileSystem,
},
expected: false,
},
{
name: "generic error with flag keyword",
err: errors.New("flag provided but not defined"),
expected: true,
},
{
name: "generic error with usage keyword",
err: errors.New("usage: command [options]"),
expected: true,
},
{
name: "generic error with invalid argument",
err: errors.New("invalid argument provided"),
expected: true,
},
{
name: "generic error with file not found",
err: errors.New("file not found"),
expected: true,
},
{
name: "generic error with permission denied",
err: errors.New("permission denied"),
expected: true,
},
{
name: "system error not user-facing",
err: errors.New("internal system error"),
expected: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := IsUserError(tt.err)
if result != tt.expected {
t.Errorf("IsUserError(%v) = %v, want %v", tt.err, result, tt.expected)
}
})
}
}
// Helper functions for testing
// createTestUI creates a UIManager with captured output for testing.
func createTestUI() (*UIManager, *bytes.Buffer) {
output := &bytes.Buffer{}
ui := &UIManager{
enableColors: false, // Disable colors for consistent testing
enableProgress: false, // Disable progress for testing
output: output,
}
return ui, output
}
// TestErrorFormatterIntegration tests the complete error formatting workflow.
func TestErrorFormatterIntegration(t *testing.T) {
ui, output := createTestUI()
formatter := NewErrorFormatter(ui)
// Test a complete workflow with a complex structured error
structuredErr := &shared.StructuredError{
Type: shared.ErrorTypeFileSystem,
Code: shared.CodeFSNotFound,
Message: "source directory not found",
FilePath: "/missing/directory",
Context: map[string]any{
"attempted_path": "/missing/directory",
"current_dir": "/working/dir",
},
}
formatter.FormatError(structuredErr)
outputStr := output.String()
// Verify all components are present
expectedComponents := []string{
"✗ Error: source directory not found",
"Type: FileSystem, Code: NOT_FOUND",
"File: /missing/directory",
"Context:",
"attempted_path: /missing/directory",
"current_dir: /working/dir",
shared.TestSuggestionsWarning,
"Check if the file/directory exists",
}
for _, expected := range expectedComponents {
if !strings.Contains(outputStr, expected) {
t.Errorf("Integration test output missing expected component: %q\nFull output:\n%s", expected, outputStr)
}
}
}
// TestErrorFormatter_SuggestPathResolution tests the suggestPathResolution function.
func TestErrorFormatterSuggestPathResolution(t *testing.T) {
tests := []struct {
name string
filePath string
expectedOutput []string
}{
{
name: "with file path",
filePath: "relative/path/file.txt",
expectedOutput: []string{
shared.TestSuggestUseAbsolutePath,
"Try:",
},
},
{
name: shared.TestErrEmptyFilePath,
filePath: "",
expectedOutput: []string{
shared.TestSuggestUseAbsolutePath,
},
},
{
name: "current directory reference",
filePath: "./file.txt",
expectedOutput: []string{
shared.TestSuggestUseAbsolutePath,
"Try:",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ui, output := createTestUI()
formatter := NewErrorFormatter(ui)
// Call the method
formatter.suggestPathResolution(tt.filePath)
// Check output
outputStr := output.String()
for _, expected := range tt.expectedOutput {
if !strings.Contains(outputStr, expected) {
t.Errorf("suggestPathResolution output missing: %q\nFull output: %q", expected, outputStr)
}
}
})
}
}
// TestErrorFormatter_SuggestFileSystemGeneral tests the suggestFileSystemGeneral function.
func TestErrorFormatterSuggestFileSystemGeneral(t *testing.T) {
tests := []struct {
name string
filePath string
expectedOutput []string
}{
{
name: "with file path",
filePath: "/path/to/file.txt",
expectedOutput: []string{
shared.TestSuggestCheckPermissions,
shared.TestSuggestVerifyPath,
"Path: /path/to/file.txt",
},
},
{
name: shared.TestErrEmptyFilePath,
filePath: "",
expectedOutput: []string{
shared.TestSuggestCheckPermissions,
shared.TestSuggestVerifyPath,
},
},
{
name: "relative path",
filePath: "../parent/file.txt",
expectedOutput: []string{
shared.TestSuggestCheckPermissions,
shared.TestSuggestVerifyPath,
"Path: ../parent/file.txt",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ui, output := createTestUI()
formatter := NewErrorFormatter(ui)
// Call the method
formatter.suggestFileSystemGeneral(tt.filePath)
// Check output
outputStr := output.String()
for _, expected := range tt.expectedOutput {
if !strings.Contains(outputStr, expected) {
t.Errorf("suggestFileSystemGeneral output missing: %q\nFull output: %q", expected, outputStr)
}
}
// When no file path is provided, should not contain "Path:" line
if tt.filePath == "" && strings.Contains(outputStr, "Path:") {
t.Error("suggestFileSystemGeneral should not include Path line when filePath is empty")
}
})
}
}
// TestErrorFormatter_SuggestionFunctions_Integration tests the integration of suggestion functions.
func TestErrorFormatterSuggestionFunctionsIntegration(t *testing.T) {
// Test that suggestion functions work as part of the full error formatting workflow
tests := []struct {
name string
err *shared.StructuredError
expectedSuggestions []string
}{
{
name: "filesystem path resolution error",
err: &shared.StructuredError{
Type: shared.ErrorTypeFileSystem,
Code: shared.CodeFSPathResolution,
Message: "path resolution failed",
FilePath: "relative/path",
},
expectedSuggestions: []string{
shared.TestSuggestUseAbsolutePath,
"Try:",
},
},
{
name: "filesystem unknown error",
err: &shared.StructuredError{
Type: shared.ErrorTypeFileSystem,
Code: "UNKNOWN_FS_ERROR", // This will trigger default case
Message: "unknown filesystem error",
FilePath: "/some/path",
},
expectedSuggestions: []string{
shared.TestSuggestCheckPermissions,
shared.TestSuggestVerifyPath,
"Path: /some/path",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ui, output := createTestUI()
formatter := NewErrorFormatter(ui)
// Format the error (which should include suggestions)
formatter.FormatError(tt.err)
// Check that expected suggestions are present
outputStr := output.String()
for _, expected := range tt.expectedSuggestions {
if !strings.Contains(outputStr, expected) {
t.Errorf("Integrated suggestion missing: %q\nFull output: %q", expected, outputStr)
}
}
})
}
}
// Benchmarks for error formatting performance
// BenchmarkErrorFormatterFormatError benchmarks the FormatError method.
func BenchmarkErrorFormatterFormatError(b *testing.B) {
ui, _ := createTestUI()
formatter := NewErrorFormatter(ui)
err := &shared.StructuredError{
Type: shared.ErrorTypeFileSystem,
Code: shared.CodeFSAccess,
Message: shared.TestErrCannotAccessFile,
FilePath: shared.TestPathBase,
}
b.ResetTimer()
for b.Loop() {
formatter.FormatError(err)
}
}
// BenchmarkErrorFormatterFormatErrorWithContext benchmarks error formatting with context.
func BenchmarkErrorFormatterFormatErrorWithContext(b *testing.B) {
ui, _ := createTestUI()
formatter := NewErrorFormatter(ui)
err := &shared.StructuredError{
Type: shared.ErrorTypeValidation,
Code: shared.CodeValidationFormat,
Message: "validation failed",
FilePath: shared.TestPathBase,
Context: map[string]any{
"field": "format",
"value": "invalid",
},
}
b.ResetTimer()
for b.Loop() {
formatter.FormatError(err)
}
}
// BenchmarkErrorFormatterProvideSuggestions benchmarks suggestion generation.
func BenchmarkErrorFormatterProvideSuggestions(b *testing.B) {
ui, _ := createTestUI()
formatter := NewErrorFormatter(ui)
err := &shared.StructuredError{
Type: shared.ErrorTypeFileSystem,
Code: shared.CodeFSAccess,
Message: shared.TestErrCannotAccessFile,
FilePath: shared.TestPathBase,
}
b.ResetTimer()
for b.Loop() {
formatter.provideSuggestions(err)
}
}

View File

@@ -1,14 +1,11 @@
// Package cli provides command-line interface functionality for gibidify.
package cli
import (
"flag"
"fmt"
"os"
"runtime"
"github.com/ivuorinen/gibidify/config"
"github.com/ivuorinen/gibidify/shared"
"github.com/ivuorinen/gibidify/utils"
)
// Flags holds CLI flags values.
@@ -21,9 +18,7 @@ type Flags struct {
Format string
NoColors bool
NoProgress bool
NoUI bool
Verbose bool
LogLevel string
}
var (
@@ -31,15 +26,6 @@ var (
globalFlags *Flags
)
// ResetFlags resets the global flag parsing state for testing.
// This function should only be used in tests to ensure proper isolation.
func ResetFlags() {
flagsParsed = false
globalFlags = nil
// Reset default FlagSet to avoid duplicate flag registration across tests
flag.CommandLine = flag.NewFlagSet(os.Args[0], flag.ContinueOnError)
}
// ParseFlags parses and validates CLI flags.
func ParseFlags() (*Flags, error) {
if flagsParsed {
@@ -48,20 +34,16 @@ func ParseFlags() (*Flags, error) {
flags := &Flags{}
flag.StringVar(&flags.SourceDir, shared.CLIArgSource, "", "Source directory to scan recursively")
flag.StringVar(&flags.SourceDir, "source", "", "Source directory to scan recursively")
flag.StringVar(&flags.Destination, "destination", "", "Output file to write aggregated code")
flag.StringVar(&flags.Prefix, "prefix", "", "Text to add at the beginning of the output file")
flag.StringVar(&flags.Suffix, "suffix", "", "Text to add at the end of the output file")
flag.StringVar(&flags.Format, shared.CLIArgFormat, shared.FormatJSON, "Output format (json, markdown, yaml)")
flag.IntVar(&flags.Concurrency, shared.CLIArgConcurrency, runtime.NumCPU(),
flag.StringVar(&flags.Format, "format", "markdown", "Output format (json, markdown, yaml)")
flag.IntVar(&flags.Concurrency, "concurrency", runtime.NumCPU(),
"Number of concurrent workers (default: number of CPU cores)")
flag.BoolVar(&flags.NoColors, "no-colors", false, "Disable colored output")
flag.BoolVar(&flags.NoProgress, "no-progress", false, "Disable progress bars")
flag.BoolVar(&flags.NoUI, "no-ui", false, "Disable all UI output (implies no-colors and no-progress)")
flag.BoolVar(&flags.Verbose, "verbose", false, "Enable verbose output")
flag.StringVar(
&flags.LogLevel, "log-level", string(shared.LogLevelWarn), "Set log level (debug, info, warn, error)",
)
flag.Parse()
@@ -75,7 +57,6 @@ func ParseFlags() (*Flags, error) {
flagsParsed = true
globalFlags = flags
return flags, nil
}
@@ -86,23 +67,18 @@ func (f *Flags) validate() error {
}
// Validate source path for security
if err := shared.ValidateSourcePath(f.SourceDir); err != nil {
return fmt.Errorf("validating source path: %w", err)
if err := utils.ValidateSourcePath(f.SourceDir); err != nil {
return err
}
// Validate output format
if err := config.ValidateOutputFormat(f.Format); err != nil {
return fmt.Errorf("validating output format: %w", err)
return err
}
// Validate concurrency
if err := config.ValidateConcurrency(f.Concurrency); err != nil {
return fmt.Errorf("validating concurrency: %w", err)
}
// Validate log level
if !shared.ValidateLogLevel(f.LogLevel) {
return fmt.Errorf("invalid log level: %s (must be: debug, info, warn, error)", f.LogLevel)
return err
}
return nil
@@ -111,17 +87,17 @@ func (f *Flags) validate() error {
// setDefaultDestination sets the default destination if not provided.
func (f *Flags) setDefaultDestination() error {
if f.Destination == "" {
absRoot, err := shared.AbsolutePath(f.SourceDir)
absRoot, err := utils.GetAbsolutePath(f.SourceDir)
if err != nil {
return fmt.Errorf("getting absolute path: %w", err)
return err
}
baseName := shared.BaseName(absRoot)
baseName := utils.GetBaseName(absRoot)
f.Destination = baseName + "." + f.Format
}
// Validate destination path for security
if err := shared.ValidateDestinationPath(f.Destination); err != nil {
return fmt.Errorf("validating destination path: %w", err)
if err := utils.ValidateDestinationPath(f.Destination); err != nil {
return err
}
return nil

View File

@@ -1,664 +0,0 @@
package cli
import (
"flag"
"os"
"runtime"
"strings"
"testing"
"github.com/ivuorinen/gibidify/shared"
"github.com/ivuorinen/gibidify/testutil"
)
const testDirPlaceholder = "testdir"
// setupTestArgs prepares test arguments by replacing testdir with actual temp directory.
func setupTestArgs(t *testing.T, args []string, want *Flags) ([]string, *Flags) {
t.Helper()
if !containsFlag(args, shared.TestCLIFlagSource) {
return args, want
}
tempDir := t.TempDir()
modifiedArgs := replaceTestDirInArgs(args, tempDir)
// Handle nil want parameter (used for error test cases)
if want == nil {
return modifiedArgs, nil
}
modifiedWant := updateWantFlags(*want, tempDir)
return modifiedArgs, &modifiedWant
}
// replaceTestDirInArgs replaces testdir placeholder with actual temp directory in args.
func replaceTestDirInArgs(args []string, tempDir string) []string {
modifiedArgs := make([]string, len(args))
copy(modifiedArgs, args)
for i, arg := range modifiedArgs {
if arg == testDirPlaceholder {
modifiedArgs[i] = tempDir
break
}
}
return modifiedArgs
}
// updateWantFlags updates the want flags with temp directory replacements.
func updateWantFlags(want Flags, tempDir string) Flags {
modifiedWant := want
if want.SourceDir == testDirPlaceholder {
modifiedWant.SourceDir = tempDir
if strings.HasPrefix(want.Destination, testDirPlaceholder+".") {
baseName := testutil.BaseName(tempDir)
modifiedWant.Destination = baseName + "." + want.Format
}
}
return modifiedWant
}
// runParseFlagsTest runs a single parse flags test.
func runParseFlagsTest(t *testing.T, args []string, want *Flags, wantErr bool, errContains string) {
t.Helper()
// Capture and restore original os.Args
origArgs := os.Args
defer func() { os.Args = origArgs }()
resetFlagsState()
modifiedArgs, modifiedWant := setupTestArgs(t, args, want)
setupCommandLineArgs(modifiedArgs)
got, err := ParseFlags()
if wantErr {
if err == nil {
t.Error("ParseFlags() expected error, got nil")
return
}
if errContains != "" && !strings.Contains(err.Error(), errContains) {
t.Errorf("ParseFlags() error = %v, want error containing %v", err, errContains)
}
return
}
if err != nil {
t.Errorf("ParseFlags() unexpected error = %v", err)
return
}
verifyFlags(t, got, modifiedWant)
}
func TestParseFlags(t *testing.T) {
tests := []struct {
name string
args []string
want *Flags
wantErr bool
errContains string
}{
{
name: "valid basic flags",
args: []string{shared.TestCLIFlagSource, "testdir", shared.TestCLIFlagFormat, "markdown"},
want: &Flags{
SourceDir: "testdir",
Format: "markdown",
Concurrency: runtime.NumCPU(),
Destination: "testdir.markdown",
LogLevel: string(shared.LogLevelWarn),
},
wantErr: false,
},
{
name: "valid with all flags",
args: []string{
shared.TestCLIFlagSource, "testdir",
shared.TestCLIFlagDestination, shared.TestOutputMD,
"-prefix", "# Header",
"-suffix", "# Footer",
shared.TestCLIFlagFormat, "json",
shared.TestCLIFlagConcurrency, "4",
"-verbose",
"-no-colors",
"-no-progress",
},
want: &Flags{
SourceDir: "testdir",
Destination: shared.TestOutputMD,
Prefix: "# Header",
Suffix: "# Footer",
Format: "json",
Concurrency: 4,
Verbose: true,
NoColors: true,
NoProgress: true,
LogLevel: string(shared.LogLevelWarn),
},
wantErr: false,
},
{
name: "missing source directory",
args: []string{shared.TestCLIFlagFormat, "markdown"},
wantErr: true,
errContains: "source directory is required",
},
{
name: "invalid format",
args: []string{shared.TestCLIFlagSource, "testdir", shared.TestCLIFlagFormat, "invalid"},
wantErr: true,
errContains: "validating output format",
},
{
name: "invalid concurrency zero",
args: []string{shared.TestCLIFlagSource, "testdir", shared.TestCLIFlagConcurrency, "0"},
wantErr: true,
errContains: shared.TestOpValidatingConcurrency,
},
{
name: "negative concurrency",
args: []string{shared.TestCLIFlagSource, "testdir", shared.TestCLIFlagConcurrency, "-1"},
wantErr: true,
errContains: shared.TestOpValidatingConcurrency,
},
}
for _, tt := range tests {
t.Run(
tt.name, func(t *testing.T) {
runParseFlagsTest(t, tt.args, tt.want, tt.wantErr, tt.errContains)
},
)
}
}
// validateFlagsValidationResult validates flag validation test results.
func validateFlagsValidationResult(t *testing.T, err error, wantErr bool, errContains string) {
t.Helper()
if wantErr {
if err == nil {
t.Error("Flags.validate() expected error, got nil")
return
}
if errContains != "" && !strings.Contains(err.Error(), errContains) {
t.Errorf("Flags.validate() error = %v, want error containing %v", err, errContains)
}
return
}
if err != nil {
t.Errorf("Flags.validate() unexpected error = %v", err)
}
}
func TestFlagsvalidate(t *testing.T) {
tempDir := t.TempDir()
tests := []struct {
name string
flags *Flags
wantErr bool
errContains string
}{
{
name: "valid flags",
flags: &Flags{
SourceDir: tempDir,
Format: "markdown",
Concurrency: 4,
LogLevel: "warn",
},
wantErr: false,
},
{
name: "empty source directory",
flags: &Flags{
Format: "markdown",
Concurrency: 4,
LogLevel: "warn",
},
wantErr: true,
errContains: "source directory is required",
},
{
name: "invalid format",
flags: &Flags{
SourceDir: tempDir,
Format: "invalid",
Concurrency: 4,
LogLevel: "warn",
},
wantErr: true,
errContains: "validating output format",
},
{
name: "zero concurrency",
flags: &Flags{
SourceDir: tempDir,
Format: "markdown",
Concurrency: 0,
LogLevel: "warn",
},
wantErr: true,
errContains: shared.TestOpValidatingConcurrency,
},
{
name: "negative concurrency",
flags: &Flags{
SourceDir: tempDir,
Format: "json",
Concurrency: -1,
LogLevel: "warn",
},
wantErr: true,
errContains: shared.TestOpValidatingConcurrency,
},
{
name: "invalid log level",
flags: &Flags{
SourceDir: tempDir,
Format: "json",
Concurrency: 4,
LogLevel: "invalid",
},
wantErr: true,
errContains: "invalid log level",
},
}
for _, tt := range tests {
t.Run(
tt.name, func(t *testing.T) {
err := tt.flags.validate()
validateFlagsValidationResult(t, err, tt.wantErr, tt.errContains)
},
)
}
}
// validateDefaultDestinationResult validates default destination test results.
func validateDefaultDestinationResult(
t *testing.T,
flags *Flags,
err error,
wantDestination string,
wantErr bool,
errContains string,
) {
t.Helper()
if wantErr {
if err == nil {
t.Error("Flags.setDefaultDestination() expected error, got nil")
return
}
if errContains != "" && !strings.Contains(err.Error(), errContains) {
t.Errorf("Flags.setDefaultDestination() error = %v, want error containing %v", err, errContains)
}
return
}
if err != nil {
t.Errorf("Flags.setDefaultDestination() unexpected error = %v", err)
return
}
if flags.Destination != wantDestination {
t.Errorf("Flags.Destination = %v, want %v", flags.Destination, wantDestination)
}
}
func TestFlagssetDefaultDestination(t *testing.T) {
tempDir := t.TempDir()
baseName := testutil.BaseName(tempDir)
tests := []struct {
name string
flags *Flags
wantDestination string
wantErr bool
errContains string
}{
{
name: "set default destination markdown",
flags: &Flags{
SourceDir: tempDir,
Format: "markdown",
LogLevel: "warn",
},
wantDestination: baseName + ".markdown",
wantErr: false,
},
{
name: "set default destination json",
flags: &Flags{
SourceDir: tempDir,
Format: "json",
LogLevel: "warn",
},
wantDestination: baseName + ".json",
wantErr: false,
},
{
name: "set default destination yaml",
flags: &Flags{
SourceDir: tempDir,
Format: "yaml",
LogLevel: "warn",
},
wantDestination: baseName + ".yaml",
wantErr: false,
},
{
name: "preserve existing destination",
flags: &Flags{
SourceDir: tempDir,
Format: "yaml",
Destination: "custom-output.yaml",
LogLevel: "warn",
},
wantDestination: "custom-output.yaml",
wantErr: false,
},
{
name: "nonexistent source path still generates destination",
flags: &Flags{
SourceDir: "/nonexistent/path/that/should/not/exist",
Format: "markdown",
LogLevel: "warn",
},
wantDestination: "exist.markdown", // Based on filepath.Base of the path
wantErr: false, // AbsolutePath doesn't validate existence, only converts to absolute
},
}
for _, tt := range tests {
t.Run(
tt.name, func(t *testing.T) {
err := tt.flags.setDefaultDestination()
validateDefaultDestinationResult(t, tt.flags, err, tt.wantDestination, tt.wantErr, tt.errContains)
},
)
}
}
func TestParseFlagsSingleton(t *testing.T) {
// Capture and restore original os.Args
origArgs := os.Args
defer func() { os.Args = origArgs }()
resetFlagsState()
tempDir := t.TempDir()
// First call
setupCommandLineArgs([]string{shared.TestCLIFlagSource, tempDir, shared.TestCLIFlagFormat, "markdown"})
flags1, err := ParseFlags()
if err != nil {
t.Fatalf("First ParseFlags() failed: %v", err)
}
// Second call should return the same instance
flags2, err := ParseFlags()
if err != nil {
t.Fatalf("Second ParseFlags() failed: %v", err)
}
if flags1 != flags2 {
t.Error("ParseFlags() should return singleton instance, got different pointers")
}
}
// Helper functions
// resetFlagsState resets the global flags state for testing.
func resetFlagsState() {
flagsParsed = false
globalFlags = nil
// Reset the flag.CommandLine for clean testing (use ContinueOnError to match ResetFlags)
flag.CommandLine = flag.NewFlagSet(os.Args[0], flag.ContinueOnError)
}
// setupCommandLineArgs sets up command line arguments for testing.
func setupCommandLineArgs(args []string) {
os.Args = append([]string{"gibidify"}, args...)
}
// containsFlag checks if a flag is present in the arguments.
func containsFlag(args []string, flagName string) bool {
for _, arg := range args {
if arg == flagName {
return true
}
}
return false
}
// verifyFlags compares two Flags structs for testing.
func verifyFlags(t *testing.T, got, want *Flags) {
t.Helper()
if got.SourceDir != want.SourceDir {
t.Errorf("SourceDir = %v, want %v", got.SourceDir, want.SourceDir)
}
if got.Destination != want.Destination {
t.Errorf("Destination = %v, want %v", got.Destination, want.Destination)
}
if got.Prefix != want.Prefix {
t.Errorf("Prefix = %v, want %v", got.Prefix, want.Prefix)
}
if got.Suffix != want.Suffix {
t.Errorf("Suffix = %v, want %v", got.Suffix, want.Suffix)
}
if got.Format != want.Format {
t.Errorf("Format = %v, want %v", got.Format, want.Format)
}
if got.Concurrency != want.Concurrency {
t.Errorf("Concurrency = %v, want %v", got.Concurrency, want.Concurrency)
}
if got.NoColors != want.NoColors {
t.Errorf("NoColors = %v, want %v", got.NoColors, want.NoColors)
}
if got.NoProgress != want.NoProgress {
t.Errorf("NoProgress = %v, want %v", got.NoProgress, want.NoProgress)
}
if got.Verbose != want.Verbose {
t.Errorf("Verbose = %v, want %v", got.Verbose, want.Verbose)
}
if got.LogLevel != want.LogLevel {
t.Errorf("LogLevel = %v, want %v", got.LogLevel, want.LogLevel)
}
if got.NoUI != want.NoUI {
t.Errorf("NoUI = %v, want %v", got.NoUI, want.NoUI)
}
}
// TestResetFlags tests the ResetFlags function.
func TestResetFlags(t *testing.T) {
// Save original state
originalArgs := os.Args
originalFlagsParsed := flagsParsed
originalGlobalFlags := globalFlags
originalCommandLine := flag.CommandLine
defer func() {
// Restore original state
os.Args = originalArgs
flagsParsed = originalFlagsParsed
globalFlags = originalGlobalFlags
flag.CommandLine = originalCommandLine
}()
// Simplified test cases to reduce complexity
testCases := map[string]func(t *testing.T){
"reset after flags have been parsed": func(t *testing.T) {
srcDir := t.TempDir()
testutil.CreateTestFile(t, srcDir, "test.txt", []byte("test"))
os.Args = []string{"test", "-source", srcDir, "-destination", "out.json"}
// Parse flags first
if _, err := ParseFlags(); err != nil {
t.Fatalf("Setup failed: %v", err)
}
},
"reset with clean state": func(t *testing.T) {
if flagsParsed {
t.Log("Note: flagsParsed was already true at start")
}
},
"multiple resets": func(t *testing.T) {
srcDir := t.TempDir()
testutil.CreateTestFile(t, srcDir, "test.txt", []byte("test"))
os.Args = []string{"test", "-source", srcDir, "-destination", "out.json"}
if _, err := ParseFlags(); err != nil {
t.Fatalf("Setup failed: %v", err)
}
},
}
for name, setup := range testCases {
t.Run(name, func(t *testing.T) {
// Setup test scenario
setup(t)
// Call ResetFlags
ResetFlags()
// Basic verification that reset worked
if flagsParsed {
t.Error("flagsParsed should be false after ResetFlags()")
}
if globalFlags != nil {
t.Error("globalFlags should be nil after ResetFlags()")
}
})
}
}
// TestResetFlags_Integration tests ResetFlags in integration scenarios.
func TestResetFlagsIntegration(t *testing.T) {
// This test verifies that ResetFlags properly resets the internal state
// to allow multiple calls to ParseFlags in test scenarios.
// Note: This test documents the expected behavior of ResetFlags
// The actual integration with ParseFlags is already tested in main tests
// where ResetFlags is used to enable proper test isolation.
t.Run("state_reset_behavior", func(t *testing.T) {
// Test behavior is already covered in TestResetFlags
// This is mainly for documentation of the integration pattern
t.Log("ResetFlags integration behavior:")
t.Log("1. Resets flagsParsed to false")
t.Log("2. Sets globalFlags to nil")
t.Log("3. Creates new flag.CommandLine FlagSet")
t.Log("4. Allows subsequent ParseFlags calls")
// The actual mechanics are tested in TestResetFlags
// This test serves to document the integration contract
// Reset state (this should not panic)
ResetFlags()
// Verify basic state expectations
if flagsParsed {
t.Error("flagsParsed should be false after ResetFlags")
}
if globalFlags != nil {
t.Error("globalFlags should be nil after ResetFlags")
}
if flag.CommandLine == nil {
t.Error("flag.CommandLine should not be nil after ResetFlags")
}
})
}
// Benchmarks for flag-related operations.
// While flag parsing is a one-time startup operation, these benchmarks
// document baseline performance and catch regressions if parsing logic becomes more complex.
//
// Note: ParseFlags benchmarks are omitted because resetFlagsState() interferes with
// Go's testing framework flags. The core operations (setDefaultDestination, validate)
// are benchmarked instead.
// BenchmarkSetDefaultDestination measures the setDefaultDestination operation.
func BenchmarkSetDefaultDestination(b *testing.B) {
tempDir := b.TempDir()
for b.Loop() {
flags := &Flags{
SourceDir: tempDir,
Format: "markdown",
LogLevel: "warn",
}
_ = flags.setDefaultDestination()
}
}
// BenchmarkSetDefaultDestinationAllFormats measures setDefaultDestination across all formats.
func BenchmarkSetDefaultDestinationAllFormats(b *testing.B) {
tempDir := b.TempDir()
formats := []string{"markdown", "json", "yaml"}
for b.Loop() {
for _, format := range formats {
flags := &Flags{
SourceDir: tempDir,
Format: format,
LogLevel: "warn",
}
_ = flags.setDefaultDestination()
}
}
}
// BenchmarkFlagsValidate measures the validate operation.
func BenchmarkFlagsValidate(b *testing.B) {
tempDir := b.TempDir()
flags := &Flags{
SourceDir: tempDir,
Destination: "output.md",
Format: "markdown",
LogLevel: "warn",
}
for b.Loop() {
_ = flags.validate()
}
}
// BenchmarkFlagsValidateAllFormats measures validate across all formats.
func BenchmarkFlagsValidateAllFormats(b *testing.B) {
tempDir := b.TempDir()
formats := []string{"markdown", "json", "yaml"}
for b.Loop() {
for _, format := range formats {
flags := &Flags{
SourceDir: tempDir,
Destination: "output." + format,
Format: format,
LogLevel: "warn",
}
_ = flags.validate()
}
}
}

View File

@@ -1,48 +1,41 @@
// Package cli provides command-line interface functionality for gibidify.
package cli
import (
"fmt"
"os"
"github.com/sirupsen/logrus"
"github.com/ivuorinen/gibidify/config"
"github.com/ivuorinen/gibidify/fileproc"
"github.com/ivuorinen/gibidify/shared"
"github.com/ivuorinen/gibidify/utils"
)
// collectFiles collects all files to be processed.
func (p *Processor) collectFiles() ([]string, error) {
files, err := fileproc.CollectFiles(p.flags.SourceDir)
if err != nil {
return nil, shared.WrapError(
err,
shared.ErrorTypeProcessing,
shared.CodeProcessingCollection,
"error collecting files",
)
return nil, utils.WrapError(err, utils.ErrorTypeProcessing, utils.CodeProcessingCollection, "error collecting files")
}
logger := shared.GetLogger()
logger.Infof(shared.CLIMsgFoundFilesToProcess, len(files))
logrus.Infof("Found %d files to process", len(files))
return files, nil
}
// validateFileCollection validates the collected files against resource limits.
func (p *Processor) validateFileCollection(files []string) error {
if !config.ResourceLimitsEnabled() {
if !config.GetResourceLimitsEnabled() {
return nil
}
// Check file count limit
maxFiles := config.MaxFiles()
maxFiles := config.GetMaxFiles()
if len(files) > maxFiles {
return shared.NewStructuredError(
shared.ErrorTypeValidation,
shared.CodeResourceLimitFiles,
return utils.NewStructuredError(
utils.ErrorTypeValidation,
utils.CodeResourceLimitFiles,
fmt.Sprintf("file count (%d) exceeds maximum limit (%d)", len(files), maxFiles),
"",
map[string]any{
map[string]interface{}{
"file_count": len(files),
"max_files": maxFiles,
},
@@ -50,7 +43,7 @@ func (p *Processor) validateFileCollection(files []string) error {
}
// Check total size limit (estimate)
maxTotalSize := config.MaxTotalSize()
maxTotalSize := config.GetMaxTotalSize()
totalSize := int64(0)
oversizedFiles := 0
@@ -58,14 +51,12 @@ func (p *Processor) validateFileCollection(files []string) error {
if fileInfo, err := os.Stat(filePath); err == nil {
totalSize += fileInfo.Size()
if totalSize > maxTotalSize {
return shared.NewStructuredError(
shared.ErrorTypeValidation,
shared.CodeResourceLimitTotalSize,
fmt.Sprintf(
"total file size (%d bytes) would exceed maximum limit (%d bytes)", totalSize, maxTotalSize,
),
return utils.NewStructuredError(
utils.ErrorTypeValidation,
utils.CodeResourceLimitTotalSize,
fmt.Sprintf("total file size (%d bytes) would exceed maximum limit (%d bytes)", totalSize, maxTotalSize),
"",
map[string]any{
map[string]interface{}{
"total_size": totalSize,
"max_total_size": maxTotalSize,
"files_checked": len(files),
@@ -77,12 +68,10 @@ func (p *Processor) validateFileCollection(files []string) error {
}
}
logger := shared.GetLogger()
if oversizedFiles > 0 {
logger.Warnf("Could not stat %d files during pre-validation", oversizedFiles)
logrus.Warnf("Could not stat %d files during pre-validation", oversizedFiles)
}
logger.Infof("Pre-validation passed: %d files, %d MB total", len(files), totalSize/int64(shared.BytesPerMB))
logrus.Infof("Pre-validation passed: %d files, %d MB total", len(files), totalSize/1024/1024)
return nil
}

View File

@@ -1,14 +1,12 @@
// Package cli provides command-line interface functionality for gibidify.
package cli
import (
"context"
"os"
"sync"
"time"
"github.com/ivuorinen/gibidify/fileproc"
"github.com/ivuorinen/gibidify/shared"
"github.com/ivuorinen/gibidify/utils"
)
// Process executes the main file processing workflow.
@@ -31,32 +29,23 @@ func (p *Processor) Process(ctx context.Context) error {
p.resourceMonitor.LogResourceInfo()
p.backpressure.LogBackpressureInfo()
// Collect files with progress indication and timing
// Collect files with progress indication
p.ui.PrintInfo("📁 Collecting files...")
collectionStart := time.Now()
files, err := p.collectFiles()
collectionTime := time.Since(collectionStart)
p.metricsCollector.RecordPhaseTime(shared.MetricsPhaseCollection, collectionTime)
if err != nil {
return err
}
// Show collection results
p.ui.PrintSuccess(shared.CLIMsgFoundFilesToProcess, len(files))
p.ui.PrintSuccess("Found %d files to process", len(files))
// Pre-validate file collection against resource limits
if err := p.validateFileCollection(files); err != nil {
return err
}
// Process files with overall timeout and timing
processingStart := time.Now()
err = p.processFiles(overallCtx, files)
processingTime := time.Since(processingStart)
p.metricsCollector.RecordPhaseTime(shared.MetricsPhaseProcessing, processingTime)
return err
// Process files with overall timeout
return p.processFiles(overallCtx, files)
}
// processFiles processes the collected files.
@@ -66,7 +55,7 @@ func (p *Processor) processFiles(ctx context.Context, files []string) error {
return err
}
defer func() {
shared.LogError("Error closing output file", outFile.Close())
utils.LogError("Error closing output file", outFile.Close())
}()
// Initialize back-pressure and channels
@@ -88,26 +77,15 @@ func (p *Processor) processFiles(ctx context.Context, files []string) error {
// Send files to workers
if err := p.sendFiles(ctx, files, fileCh); err != nil {
p.ui.FinishProgress()
return err
}
// Wait for completion with timing
writingStart := time.Now()
// Wait for completion
p.waitForCompletion(&wg, writeCh, writerDone)
writingTime := time.Since(writingStart)
p.metricsCollector.RecordPhaseTime(shared.MetricsPhaseWriting, writingTime)
p.ui.FinishProgress()
// Final cleanup with timing
finalizeStart := time.Now()
p.logFinalStats()
finalizeTime := time.Since(finalizeStart)
p.metricsCollector.RecordPhaseTime(shared.MetricsPhaseFinalize, finalizeTime)
p.ui.PrintSuccess("Processing completed. Output saved to %s", p.flags.Destination)
return nil
}
@@ -116,13 +94,7 @@ func (p *Processor) createOutputFile() (*os.File, error) {
// Destination path has been validated in CLI flags validation for path traversal attempts
outFile, err := os.Create(p.flags.Destination) // #nosec G304 - destination is validated in flags.validate()
if err != nil {
return nil, shared.WrapError(
err,
shared.ErrorTypeIO,
shared.CodeIOFileCreate,
"failed to create output file",
).WithFilePath(p.flags.Destination)
return nil, utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIOFileCreate, "failed to create output file").WithFilePath(p.flags.Destination)
}
return outFile, nil
}

View File

@@ -1,108 +1,40 @@
// Package cli provides command-line interface functionality for gibidify.
package cli
import (
"strings"
"github.com/sirupsen/logrus"
"github.com/ivuorinen/gibidify/config"
"github.com/ivuorinen/gibidify/shared"
)
// logFinalStats logs back-pressure, resource usage, and processing statistics.
// logFinalStats logs the final back-pressure and resource monitoring statistics.
func (p *Processor) logFinalStats() {
p.logBackpressureStats()
p.logResourceStats()
p.finalizeAndReportMetrics()
p.logVerboseStats()
if p.resourceMonitor != nil {
p.resourceMonitor.Close()
}
}
// logBackpressureStats logs back-pressure statistics.
func (p *Processor) logBackpressureStats() {
// Check backpressure is non-nil before dereferencing
if p.backpressure == nil {
return
}
logger := shared.GetLogger()
backpressureStats := p.backpressure.Stats()
// Log back-pressure stats
backpressureStats := p.backpressure.GetStats()
if backpressureStats.Enabled {
logger.Infof(
"Back-pressure stats: processed=%d files, memory=%dMB/%dMB",
backpressureStats.FilesProcessed,
backpressureStats.CurrentMemoryUsage/int64(shared.BytesPerMB),
backpressureStats.MaxMemoryUsage/int64(shared.BytesPerMB),
)
}
}
// logResourceStats logs resource monitoring statistics.
func (p *Processor) logResourceStats() {
// Check resource monitoring is enabled and monitor is non-nil before dereferencing
if !config.ResourceLimitsEnabled() {
return
logrus.Infof("Back-pressure stats: processed=%d files, memory=%dMB/%dMB",
backpressureStats.FilesProcessed, backpressureStats.CurrentMemoryUsage/1024/1024, backpressureStats.MaxMemoryUsage/1024/1024)
}
if p.resourceMonitor == nil {
return
}
logger := shared.GetLogger()
resourceStats := p.resourceMonitor.Metrics()
logger.Infof(
"Resource stats: processed=%d files, totalSize=%dMB, avgFileSize=%.2fKB, rate=%.2f files/sec",
resourceStats.FilesProcessed, resourceStats.TotalSizeProcessed/int64(shared.BytesPerMB),
resourceStats.AverageFileSize/float64(shared.BytesPerKB), resourceStats.ProcessingRate,
)
// Log resource monitoring stats
resourceStats := p.resourceMonitor.GetMetrics()
if config.GetResourceLimitsEnabled() {
logrus.Infof("Resource stats: processed=%d files, totalSize=%dMB, avgFileSize=%.2fKB, rate=%.2f files/sec",
resourceStats.FilesProcessed, resourceStats.TotalSizeProcessed/1024/1024,
resourceStats.AverageFileSize/1024, resourceStats.ProcessingRate)
if len(resourceStats.ViolationsDetected) > 0 {
logger.Warnf("Resource violations detected: %v", resourceStats.ViolationsDetected)
logrus.Warnf("Resource violations detected: %v", resourceStats.ViolationsDetected)
}
if resourceStats.DegradationActive {
logger.Warnf("Processing completed with degradation mode active")
logrus.Warnf("Processing completed with degradation mode active")
}
if resourceStats.EmergencyStopActive {
logger.Errorf("Processing completed with emergency stop active")
logrus.Errorf("Processing completed with emergency stop active")
}
}
// finalizeAndReportMetrics finalizes metrics collection and displays the final report.
func (p *Processor) finalizeAndReportMetrics() {
if p.metricsCollector != nil {
p.metricsCollector.Finish()
}
if p.metricsReporter != nil {
finalReport := p.metricsReporter.ReportFinal()
if finalReport != "" && p.ui != nil {
// Use UI manager to respect NoUI flag - remove trailing newline if present
p.ui.PrintInfo("%s", strings.TrimSuffix(finalReport, "\n"))
}
}
}
// logVerboseStats logs detailed structured statistics when verbose mode is enabled.
func (p *Processor) logVerboseStats() {
if !p.flags.Verbose || p.metricsCollector == nil {
return
}
logger := shared.GetLogger()
report := p.metricsCollector.GenerateReport()
fields := map[string]any{
"total_files": report.Summary.TotalFiles,
"processed_files": report.Summary.ProcessedFiles,
"skipped_files": report.Summary.SkippedFiles,
"error_files": report.Summary.ErrorFiles,
"processing_time": report.Summary.ProcessingTime,
"files_per_second": report.Summary.FilesPerSecond,
"bytes_per_second": report.Summary.BytesPerSecond,
"memory_usage_mb": report.Summary.CurrentMemoryMB,
}
logger.WithFields(fields).Info("Processing completed with comprehensive metrics")
}
// Clean up resource monitor
p.resourceMonitor.Close()
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,10 +1,8 @@
// Package cli provides command-line interface functionality for gibidify.
package cli
import (
"github.com/ivuorinen/gibidify/config"
"github.com/ivuorinen/gibidify/fileproc"
"github.com/ivuorinen/gibidify/metrics"
)
// Processor handles the main file processing logic.
@@ -13,8 +11,6 @@ type Processor struct {
backpressure *fileproc.BackpressureManager
resourceMonitor *fileproc.ResourceMonitor
ui *UIManager
metricsCollector *metrics.Collector
metricsReporter *metrics.Reporter
}
// NewProcessor creates a new processor with the given flags.
@@ -22,38 +18,27 @@ func NewProcessor(flags *Flags) *Processor {
ui := NewUIManager()
// Configure UI based on flags
ui.SetColorOutput(!flags.NoColors && !flags.NoUI)
ui.SetProgressOutput(!flags.NoProgress && !flags.NoUI)
ui.SetSilentMode(flags.NoUI)
// Initialize metrics system
metricsCollector := metrics.NewCollector()
metricsReporter := metrics.NewReporter(
metricsCollector,
flags.Verbose && !flags.NoUI,
!flags.NoColors && !flags.NoUI,
)
ui.SetColorOutput(!flags.NoColors)
ui.SetProgressOutput(!flags.NoProgress)
return &Processor{
flags: flags,
backpressure: fileproc.NewBackpressureManager(),
resourceMonitor: fileproc.NewResourceMonitor(),
ui: ui,
metricsCollector: metricsCollector,
metricsReporter: metricsReporter,
}
}
// configureFileTypes configures the file type registry.
func (p *Processor) configureFileTypes() {
if config.FileTypesEnabled() {
if config.GetFileTypesEnabled() {
fileproc.ConfigureFromSettings(
config.CustomImageExtensions(),
config.CustomBinaryExtensions(),
config.CustomLanguages(),
config.DisabledImageExtensions(),
config.DisabledBinaryExtensions(),
config.DisabledLanguageExtensions(),
config.GetCustomImageExtensions(),
config.GetCustomBinaryExtensions(),
config.GetCustomLanguages(),
config.GetDisabledImageExtensions(),
config.GetDisabledBinaryExtensions(),
config.GetDisabledLanguageExtensions(),
)
}
}

View File

@@ -1,26 +1,17 @@
// Package cli provides command-line interface functionality for gibidify.
package cli
import (
"context"
"errors"
"fmt"
"os"
"path/filepath"
"sync"
"github.com/sirupsen/logrus"
"github.com/ivuorinen/gibidify/fileproc"
"github.com/ivuorinen/gibidify/metrics"
"github.com/ivuorinen/gibidify/shared"
"github.com/ivuorinen/gibidify/utils"
)
// startWorkers starts the worker goroutines.
func (p *Processor) startWorkers(
ctx context.Context,
wg *sync.WaitGroup,
fileCh chan string,
writeCh chan fileproc.WriteRequest,
) {
func (p *Processor) startWorkers(ctx context.Context, wg *sync.WaitGroup, fileCh chan string, writeCh chan fileproc.WriteRequest) {
for range p.flags.Concurrency {
wg.Add(1)
go p.worker(ctx, wg, fileCh, writeCh)
@@ -28,12 +19,7 @@ func (p *Processor) startWorkers(
}
// worker is the worker goroutine function.
func (p *Processor) worker(
ctx context.Context,
wg *sync.WaitGroup,
fileCh chan string,
writeCh chan fileproc.WriteRequest,
) {
func (p *Processor) worker(ctx context.Context, wg *sync.WaitGroup, fileCh chan string, writeCh chan fileproc.WriteRequest) {
defer wg.Done()
for {
select {
@@ -48,69 +34,25 @@ func (p *Processor) worker(
}
}
// processFile processes a single file with resource monitoring and metrics collection.
// processFile processes a single file with resource monitoring.
func (p *Processor) processFile(ctx context.Context, filePath string, writeCh chan fileproc.WriteRequest) {
// Create file processing context with timeout (resourceMonitor may be nil)
fileCtx, fileCancel := ctx, func() {}
if p.resourceMonitor != nil {
fileCtx, fileCancel = p.resourceMonitor.CreateFileProcessingContext(ctx)
}
defer fileCancel()
// Track concurrency
if p.metricsCollector != nil {
p.metricsCollector.IncrementConcurrency()
defer p.metricsCollector.DecrementConcurrency()
}
// Check for emergency stop
if p.resourceMonitor != nil && p.resourceMonitor.IsEmergencyStopActive() {
logger := shared.GetLogger()
logger.Warnf("Emergency stop active, skipping file: %s", filePath)
// Record skipped file
p.recordFileResult(filePath, 0, "", false, true, "emergency stop active", nil)
if p.ui != nil {
p.ui.UpdateProgress(1)
}
if p.resourceMonitor.IsEmergencyStopActive() {
logrus.Warnf("Emergency stop active, skipping file: %s", filePath)
return
}
absRoot, err := shared.AbsolutePath(p.flags.SourceDir)
absRoot, err := utils.GetAbsolutePath(p.flags.SourceDir)
if err != nil {
shared.LogError("Failed to get absolute path", err)
// Record error
p.recordFileResult(filePath, 0, "", false, false, "", err)
if p.ui != nil {
p.ui.UpdateProgress(1)
}
utils.LogError("Failed to get absolute path", err)
return
}
// Use the resource monitor-aware processing with metrics tracking
fileSize, format, success, processErr := p.processFileWithMetrics(fileCtx, filePath, writeCh, absRoot)
// Use the resource monitor-aware processing
fileproc.ProcessFileWithMonitor(ctx, filePath, writeCh, absRoot, p.resourceMonitor)
// Record the processing result (skipped=false, skipReason="" since processFileWithMetrics never skips)
p.recordFileResult(filePath, fileSize, format, success, false, "", processErr)
// Update progress bar with metrics
if p.ui != nil {
// Update progress bar
p.ui.UpdateProgress(1)
}
// Show real-time stats in verbose mode
if p.flags.Verbose && p.metricsCollector != nil {
currentMetrics := p.metricsCollector.CurrentMetrics()
if currentMetrics.ProcessedFiles%10 == 0 && p.metricsReporter != nil {
logger := shared.GetLogger()
logger.Info(p.metricsReporter.ReportProgress())
}
}
}
// sendFiles sends files to the worker channels with back-pressure handling.
@@ -126,94 +68,17 @@ func (p *Processor) sendFiles(ctx context.Context, files []string, fileCh chan s
// Wait for channel space if needed
p.backpressure.WaitForChannelSpace(ctx, fileCh, nil)
if err := shared.CheckContextCancellation(ctx, shared.CLIMsgFileProcessingWorker); err != nil {
return fmt.Errorf("context check failed: %w", err)
}
select {
case fileCh <- fp:
case <-ctx.Done():
if err := shared.CheckContextCancellation(ctx, shared.CLIMsgFileProcessingWorker); err != nil {
return fmt.Errorf("context cancellation during channel send: %w", err)
}
return errors.New("context canceled during channel send")
return ctx.Err()
case fileCh <- fp:
}
}
return nil
}
// processFileWithMetrics wraps the file processing with detailed metrics collection.
func (p *Processor) processFileWithMetrics(
ctx context.Context,
filePath string,
writeCh chan fileproc.WriteRequest,
absRoot string,
) (fileSize int64, format string, success bool, err error) {
// Get file info
fileInfo, statErr := os.Stat(filePath)
if statErr != nil {
return 0, "", false, fmt.Errorf("getting file info for %s: %w", filePath, statErr)
}
fileSize = fileInfo.Size()
// Detect format from file extension
format = filepath.Ext(filePath)
if format != "" && format[0] == '.' {
format = format[1:] // Remove the dot
}
// Use the existing resource monitor-aware processing
err = fileproc.ProcessFileWithMonitor(ctx, filePath, writeCh, absRoot, p.resourceMonitor)
// Check if processing was successful
select {
case <-ctx.Done():
return fileSize, format, false, fmt.Errorf("file processing worker canceled: %w", ctx.Err())
default:
if err != nil {
return fileSize, format, false, fmt.Errorf("processing file %s: %w", filePath, err)
}
return fileSize, format, true, nil
}
}
// recordFileResult records the result of file processing in metrics.
func (p *Processor) recordFileResult(
filePath string,
fileSize int64,
format string,
success bool,
skipped bool,
skipReason string,
err error,
) {
if p.metricsCollector == nil {
return // No metrics collector, skip recording
}
result := metrics.FileProcessingResult{
FilePath: filePath,
FileSize: fileSize,
Format: format,
Success: success,
Error: err,
Skipped: skipped,
SkipReason: skipReason,
}
p.metricsCollector.RecordFileProcessed(result)
}
// waitForCompletion waits for all workers to complete.
func (p *Processor) waitForCompletion(
wg *sync.WaitGroup,
writeCh chan fileproc.WriteRequest,
writerDone chan struct{},
) {
func (p *Processor) waitForCompletion(wg *sync.WaitGroup, writeCh chan fileproc.WriteRequest, writerDone chan struct{}) {
wg.Wait()
close(writeCh)
<-writerDone

View File

@@ -1,4 +1,3 @@
// Package cli provides command-line interface functionality for gibidify.
package cli
import (
@@ -9,15 +8,12 @@ import (
"github.com/fatih/color"
"github.com/schollz/progressbar/v3"
"github.com/ivuorinen/gibidify/shared"
)
// UIManager handles CLI user interface elements.
type UIManager struct {
enableColors bool
enableProgress bool
silentMode bool
progressBar *progressbar.ProgressBar
output io.Writer
}
@@ -42,45 +38,29 @@ func (ui *UIManager) SetProgressOutput(enabled bool) {
ui.enableProgress = enabled
}
// SetSilentMode enables or disables all UI output.
func (ui *UIManager) SetSilentMode(silent bool) {
ui.silentMode = silent
if silent {
ui.output = io.Discard
} else {
ui.output = os.Stderr
}
}
// StartProgress initializes a progress bar for file processing.
func (ui *UIManager) StartProgress(total int, description string) {
if !ui.enableProgress || total <= 0 {
return
}
ui.progressBar = progressbar.NewOptions(
total,
ui.progressBar = progressbar.NewOptions(total,
progressbar.OptionSetWriter(ui.output),
progressbar.OptionSetDescription(description),
progressbar.OptionSetTheme(
progressbar.Theme{
Saucer: color.GreenString(shared.UIProgressBarChar),
SaucerHead: color.GreenString(shared.UIProgressBarChar),
progressbar.OptionSetTheme(progressbar.Theme{
Saucer: color.GreenString("█"),
SaucerHead: color.GreenString("█"),
SaucerPadding: " ",
BarStart: "[",
BarEnd: "]",
},
),
}),
progressbar.OptionShowCount(),
progressbar.OptionShowIts(),
progressbar.OptionSetWidth(40),
progressbar.OptionThrottle(100*time.Millisecond),
progressbar.OptionOnCompletion(
func() {
//nolint:errcheck // UI output, errors don't affect processing
progressbar.OptionOnCompletion(func() {
_, _ = fmt.Fprint(ui.output, "\n")
},
),
}),
progressbar.OptionSetRenderBlankState(true),
)
}
@@ -101,10 +81,7 @@ func (ui *UIManager) FinishProgress() {
}
// PrintSuccess prints a success message in green.
func (ui *UIManager) PrintSuccess(format string, args ...any) {
if ui.silentMode {
return
}
func (ui *UIManager) PrintSuccess(format string, args ...interface{}) {
if ui.enableColors {
color.Green("✓ "+format, args...)
} else {
@@ -113,10 +90,7 @@ func (ui *UIManager) PrintSuccess(format string, args ...any) {
}
// PrintError prints an error message in red.
func (ui *UIManager) PrintError(format string, args ...any) {
if ui.silentMode {
return
}
func (ui *UIManager) PrintError(format string, args ...interface{}) {
if ui.enableColors {
color.Red("✗ "+format, args...)
} else {
@@ -125,10 +99,7 @@ func (ui *UIManager) PrintError(format string, args ...any) {
}
// PrintWarning prints a warning message in yellow.
func (ui *UIManager) PrintWarning(format string, args ...any) {
if ui.silentMode {
return
}
func (ui *UIManager) PrintWarning(format string, args ...interface{}) {
if ui.enableColors {
color.Yellow("⚠ "+format, args...)
} else {
@@ -137,12 +108,8 @@ func (ui *UIManager) PrintWarning(format string, args ...any) {
}
// PrintInfo prints an info message in blue.
func (ui *UIManager) PrintInfo(format string, args ...any) {
if ui.silentMode {
return
}
func (ui *UIManager) PrintInfo(format string, args ...interface{}) {
if ui.enableColors {
//nolint:errcheck // UI output, errors don't affect processing
color.Blue(" "+format, args...)
} else {
ui.printf(" "+format+"\n", args...)
@@ -150,12 +117,8 @@ func (ui *UIManager) PrintInfo(format string, args ...any) {
}
// PrintHeader prints a header message in bold.
func (ui *UIManager) PrintHeader(format string, args ...any) {
if ui.silentMode {
return
}
func (ui *UIManager) PrintHeader(format string, args ...interface{}) {
if ui.enableColors {
//nolint:errcheck // UI output, errors don't affect processing
_, _ = color.New(color.Bold).Fprintf(ui.output, format+"\n", args...)
} else {
ui.printf(format+"\n", args...)
@@ -173,7 +136,7 @@ func isColorTerminal() bool {
// Check for CI environments that typically don't support colors
if os.Getenv("CI") != "" {
// GitHub Actions supports colors
if os.Getenv("GITHUB_ACTIONS") == shared.LiteralTrue {
if os.Getenv("GITHUB_ACTIONS") == "true" {
return true
}
// Most other CI systems don't
@@ -201,11 +164,10 @@ func isInteractiveTerminal() bool {
if err != nil {
return false
}
return (fileInfo.Mode() & os.ModeCharDevice) != 0
}
// printf is a helper that ignores printf errors (for UI output).
func (ui *UIManager) printf(format string, args ...any) {
func (ui *UIManager) printf(format string, args ...interface{}) {
_, _ = fmt.Fprintf(ui.output, format, args...)
}

View File

@@ -1,531 +0,0 @@
package cli
import (
"os"
"strings"
"testing"
"github.com/ivuorinen/gibidify/shared"
)
func TestNewUIManager(t *testing.T) {
ui := NewUIManager()
if ui == nil {
t.Error("NewUIManager() returned nil")
return
}
if ui.output == nil {
t.Error("NewUIManager() did not set output")
return
}
if ui.output != os.Stderr {
t.Error("NewUIManager() should default output to os.Stderr")
}
}
func TestUIManagerSetColorOutput(t *testing.T) {
ui := NewUIManager()
// Test enabling colors
ui.SetColorOutput(true)
if !ui.enableColors {
t.Error("SetColorOutput(true) did not enable colors")
}
// Test disabling colors
ui.SetColorOutput(false)
if ui.enableColors {
t.Error("SetColorOutput(false) did not disable colors")
}
}
func TestUIManagerSetProgressOutput(t *testing.T) {
ui := NewUIManager()
// Test enabling progress
ui.SetProgressOutput(true)
if !ui.enableProgress {
t.Error("SetProgressOutput(true) did not enable progress")
}
// Test disabling progress
ui.SetProgressOutput(false)
if ui.enableProgress {
t.Error("SetProgressOutput(false) did not disable progress")
}
}
func TestUIManagerStartProgress(t *testing.T) {
tests := []struct {
name string
total int
description string
enabled bool
expectBar bool
}{
{
name: "valid progress with enabled progress",
total: 10,
description: shared.TestProgressMessage,
enabled: true,
expectBar: true,
},
{
name: "disabled progress should not create bar",
total: 10,
description: shared.TestProgressMessage,
enabled: false,
expectBar: false,
},
{
name: "zero total should not create bar",
total: 0,
description: shared.TestProgressMessage,
enabled: true,
expectBar: false,
},
{
name: "negative total should not create bar",
total: -1,
description: shared.TestProgressMessage,
enabled: true,
expectBar: false,
},
}
for _, tt := range tests {
t.Run(
tt.name, func(t *testing.T) {
ui, _ := createTestUI() //nolint:errcheck // Test helper output buffer not used in this test
ui.SetProgressOutput(tt.enabled)
ui.StartProgress(tt.total, tt.description)
if tt.expectBar && ui.progressBar == nil {
t.Error("StartProgress() should have created progress bar but didn't")
}
if !tt.expectBar && ui.progressBar != nil {
t.Error("StartProgress() should not have created progress bar but did")
}
},
)
}
}
func TestUIManagerUpdateProgress(t *testing.T) {
ui, _ := createTestUI() //nolint:errcheck // Test helper output buffer not used in this test
ui.SetProgressOutput(true)
// Test with no progress bar (should not panic)
ui.UpdateProgress(1)
// Test with progress bar
ui.StartProgress(10, "Test progress")
if ui.progressBar == nil {
t.Fatal("StartProgress() did not create progress bar")
}
// Should not panic
ui.UpdateProgress(1)
ui.UpdateProgress(5)
}
func TestUIManagerFinishProgress(t *testing.T) {
ui, _ := createTestUI() //nolint:errcheck // Test helper output buffer not used in this test
ui.SetProgressOutput(true)
// Test with no progress bar (should not panic)
ui.FinishProgress()
// Test with progress bar
ui.StartProgress(10, "Test progress")
if ui.progressBar == nil {
t.Fatal("StartProgress() did not create progress bar")
}
ui.FinishProgress()
if ui.progressBar != nil {
t.Error("FinishProgress() should have cleared progress bar")
}
}
// testPrintMethod is a helper function to test UI print methods without duplication.
type printMethodTest struct {
name string
enableColors bool
format string
args []any
expectedText string
}
func testPrintMethod(
t *testing.T,
methodName string,
printFunc func(*UIManager, string, ...any),
tests []printMethodTest,
) {
t.Helper()
for _, tt := range tests {
t.Run(
tt.name, func(t *testing.T) {
ui, output := createTestUI()
ui.SetColorOutput(tt.enableColors)
printFunc(ui, tt.format, tt.args...)
if !tt.enableColors {
outputStr := output.String()
if !strings.Contains(outputStr, tt.expectedText) {
t.Errorf("%s() output %q should contain %q", methodName, outputStr, tt.expectedText)
}
}
},
)
}
// Test color method separately (doesn't capture output but shouldn't panic)
t.Run(
methodName+" with colors should not panic", func(_ *testing.T) {
ui, _ := createTestUI() //nolint:errcheck // Test helper output buffer not used in this test
ui.SetColorOutput(true)
// Should not panic
printFunc(ui, "Test message")
},
)
}
func TestUIManagerPrintSuccess(t *testing.T) {
tests := []printMethodTest{
{
name: "success without colors",
enableColors: false,
format: "Operation completed successfully",
args: []any{},
expectedText: "✓ Operation completed successfully",
},
{
name: "success with args without colors",
enableColors: false,
format: "Processed %d files in %s",
args: []any{5, "project"},
expectedText: "✓ Processed 5 files in project",
},
}
testPrintMethod(
t, "PrintSuccess", func(ui *UIManager, format string, args ...any) {
ui.PrintSuccess(format, args...)
}, tests,
)
}
func TestUIManagerPrintError(t *testing.T) {
tests := []printMethodTest{
{
name: "error without colors",
enableColors: false,
format: "Operation failed",
args: []any{},
expectedText: "✗ Operation failed",
},
{
name: "error with args without colors",
enableColors: false,
format: "Failed to process %d files",
args: []any{3},
expectedText: "✗ Failed to process 3 files",
},
}
testPrintMethod(
t, "PrintError", func(ui *UIManager, format string, args ...any) {
ui.PrintError(format, args...)
}, tests,
)
}
func TestUIManagerPrintWarning(t *testing.T) {
tests := []printMethodTest{
{
name: "warning without colors",
enableColors: false,
format: "This is a warning",
args: []any{},
expectedText: "⚠ This is a warning",
},
{
name: "warning with args without colors",
enableColors: false,
format: "Found %d potential issues",
args: []any{2},
expectedText: "⚠ Found 2 potential issues",
},
}
testPrintMethod(
t, "PrintWarning", func(ui *UIManager, format string, args ...any) {
ui.PrintWarning(format, args...)
}, tests,
)
}
func TestUIManagerPrintInfo(t *testing.T) {
tests := []printMethodTest{
{
name: "info without colors",
enableColors: false,
format: "Information message",
args: []any{},
expectedText: " Information message",
},
{
name: "info with args without colors",
enableColors: false,
format: "Processing file %s",
args: []any{"example.go"},
expectedText: " Processing file example.go",
},
}
testPrintMethod(
t, "PrintInfo", func(ui *UIManager, format string, args ...any) {
ui.PrintInfo(format, args...)
}, tests,
)
}
func TestUIManagerPrintHeader(t *testing.T) {
tests := []struct {
name string
enableColors bool
format string
args []any
expectedText string
}{
{
name: "header without colors",
enableColors: false,
format: "Main Header",
args: []any{},
expectedText: "Main Header",
},
{
name: "header with args without colors",
enableColors: false,
format: "Processing %s Module",
args: []any{"CLI"},
expectedText: "Processing CLI Module",
},
{
name: "header with colors",
enableColors: true,
format: "Build Results",
args: []any{},
expectedText: "Build Results",
},
}
for _, tt := range tests {
t.Run(
tt.name, func(t *testing.T) {
ui, output := createTestUI()
ui.SetColorOutput(tt.enableColors)
ui.PrintHeader(tt.format, tt.args...)
outputStr := output.String()
if !strings.Contains(outputStr, tt.expectedText) {
t.Errorf("PrintHeader() output %q should contain %q", outputStr, tt.expectedText)
}
},
)
}
}
// colorTerminalTestCase represents a test case for color terminal detection.
type colorTerminalTestCase struct {
name string
term string
ci string
githubActions string
noColor string
forceColor string
expected bool
}
// clearColorTerminalEnvVars clears all environment variables used for terminal color detection.
func clearColorTerminalEnvVars(t *testing.T) {
t.Helper()
envVars := []string{"TERM", "CI", "GITHUB_ACTIONS", "NO_COLOR", "FORCE_COLOR"}
for _, envVar := range envVars {
if err := os.Unsetenv(envVar); err != nil {
t.Logf("Failed to unset %s: %v", envVar, err)
}
}
}
// setColorTerminalTestEnv sets up environment variables for a test case.
func setColorTerminalTestEnv(t *testing.T, testCase colorTerminalTestCase) {
t.Helper()
envSettings := map[string]string{
"TERM": testCase.term,
"CI": testCase.ci,
"GITHUB_ACTIONS": testCase.githubActions,
"NO_COLOR": testCase.noColor,
"FORCE_COLOR": testCase.forceColor,
}
for key, value := range envSettings {
if value != "" {
t.Setenv(key, value)
}
}
}
func TestIsColorTerminal(t *testing.T) {
// Save original environment
originalEnv := map[string]string{
"TERM": os.Getenv("TERM"),
"CI": os.Getenv("CI"),
"GITHUB_ACTIONS": os.Getenv("GITHUB_ACTIONS"),
"NO_COLOR": os.Getenv("NO_COLOR"),
"FORCE_COLOR": os.Getenv("FORCE_COLOR"),
}
defer func() {
// Restore original environment
for key, value := range originalEnv {
setEnvOrUnset(key, value)
}
}()
tests := []colorTerminalTestCase{
{
name: "dumb terminal",
term: "dumb",
expected: false,
},
{
name: "empty term",
term: "",
expected: false,
},
{
name: "github actions with CI",
term: shared.TestTerminalXterm256,
ci: "true",
githubActions: "true",
expected: true,
},
{
name: "CI without github actions",
term: shared.TestTerminalXterm256,
ci: "true",
expected: false,
},
{
name: "NO_COLOR set",
term: shared.TestTerminalXterm256,
noColor: "1",
expected: false,
},
{
name: "FORCE_COLOR set",
term: shared.TestTerminalXterm256,
forceColor: "1",
expected: true,
},
}
for _, tt := range tests {
t.Run(
tt.name, func(t *testing.T) {
clearColorTerminalEnvVars(t)
setColorTerminalTestEnv(t, tt)
result := isColorTerminal()
if result != tt.expected {
t.Errorf("isColorTerminal() = %v, want %v", result, tt.expected)
}
},
)
}
}
func TestIsInteractiveTerminal(_ *testing.T) {
// This test is limited because we can't easily mock os.Stderr.Stat()
// but we can at least verify it doesn't panic and returns a boolean
result := isInteractiveTerminal()
// Result should be a boolean (true or false, both are valid)
// result is already a boolean, so this check is always satisfied
_ = result
}
func TestUIManagerprintf(t *testing.T) {
ui, output := createTestUI()
ui.printf("Hello %s", "world")
expected := "Hello world"
if output.String() != expected {
t.Errorf("printf() = %q, want %q", output.String(), expected)
}
}
// Helper function to set environment variable or unset if empty.
func setEnvOrUnset(key, value string) {
if value == "" {
if err := os.Unsetenv(key); err != nil {
// In tests, environment variable errors are not critical,
// but we should still handle them to avoid linting issues
_ = err // explicitly ignore error
}
} else {
if err := os.Setenv(key, value); err != nil {
// In tests, environment variable errors are not critical,
// but we should still handle them to avoid linting issues
_ = err // explicitly ignore error
}
}
}
// Integration test for UI workflow.
func TestUIManagerIntegration(t *testing.T) {
ui, output := createTestUI() //nolint:errcheck // Test helper, output buffer is used
ui.SetColorOutput(false) // Disable colors for consistent output
ui.SetProgressOutput(false) // Disable progress for testing
// Simulate a complete UI workflow
ui.PrintHeader("Starting Processing")
ui.PrintInfo("Initializing system")
ui.StartProgress(3, shared.TestProgressMessage)
ui.UpdateProgress(1)
ui.PrintInfo("Processing file 1")
ui.UpdateProgress(1)
ui.PrintWarning("Skipping invalid file")
ui.UpdateProgress(1)
ui.FinishProgress()
ui.PrintSuccess("Processing completed successfully")
outputStr := output.String()
expectedStrings := []string{
"Starting Processing",
" Initializing system",
" Processing file 1",
"⚠ Skipping invalid file",
"✓ Processing completed successfully",
}
for _, expected := range expectedStrings {
if !strings.Contains(outputStr, expected) {
t.Errorf("Integration test output missing expected string: %q\nFull output:\n%s", expected, outputStr)
}
}
}

View File

@@ -9,60 +9,38 @@ import (
"strings"
"github.com/ivuorinen/gibidify/benchmark"
"github.com/ivuorinen/gibidify/shared"
"github.com/ivuorinen/gibidify/utils"
)
var (
sourceDir = flag.String(
shared.CLIArgSource, "", "Source directory to benchmark (uses temp files if empty)",
)
benchmarkType = flag.String(
"type", shared.CLIArgAll, "Benchmark type: all, collection, processing, concurrency, format",
)
format = flag.String(
shared.CLIArgFormat, shared.FormatJSON, "Output format for processing benchmarks",
)
concurrency = flag.Int(
shared.CLIArgConcurrency, runtime.NumCPU(), "Concurrency level for processing benchmarks",
)
concurrencyList = flag.String(
"concurrency-list", shared.TestConcurrencyList, "Comma-separated list of concurrency levels",
)
formatList = flag.String(
"format-list", shared.TestFormatList, "Comma-separated list of formats",
)
numFiles = flag.Int("files", shared.BenchmarkDefaultFileCount, "Number of files to create for benchmarks")
sourceDir = flag.String("source", "", "Source directory to benchmark (uses temp files if empty)")
benchmarkType = flag.String("type", "all", "Benchmark type: all, collection, processing, concurrency, format")
format = flag.String("format", "json", "Output format for processing benchmarks")
concurrency = flag.Int("concurrency", runtime.NumCPU(), "Concurrency level for processing benchmarks")
concurrencyList = flag.String("concurrency-list", "1,2,4,8", "Comma-separated list of concurrency levels")
formatList = flag.String("format-list", "json,yaml,markdown", "Comma-separated list of formats")
numFiles = flag.Int("files", 100, "Number of files to create for benchmarks")
)
func main() {
flag.Parse()
if err := runBenchmarks(); err != nil {
//goland:noinspection GoUnhandledErrorResult
_, _ = fmt.Fprintf(os.Stderr, "Benchmark failed: %v\n", err)
fmt.Fprintf(os.Stderr, "Benchmark failed: %v\n", err)
os.Exit(1)
}
}
func runBenchmarks() error {
//nolint:errcheck // Benchmark informational output, errors don't affect benchmark results
_, _ = fmt.Println("Running gibidify benchmarks...")
//nolint:errcheck // Benchmark informational output, errors don't affect benchmark results
_, _ = fmt.Printf("Source: %s\n", getSourceDescription())
//nolint:errcheck // Benchmark informational output, errors don't affect benchmark results
_, _ = fmt.Printf("Type: %s\n", *benchmarkType)
//nolint:errcheck // Benchmark informational output, errors don't affect benchmark results
_, _ = fmt.Printf("CPU cores: %d\n", runtime.NumCPU())
//nolint:errcheck // Benchmark informational output, errors don't affect benchmark results
_, _ = fmt.Println()
fmt.Printf("Running gibidify benchmarks...\n")
fmt.Printf("Source: %s\n", getSourceDescription())
fmt.Printf("Type: %s\n", *benchmarkType)
fmt.Printf("CPU cores: %d\n", runtime.NumCPU())
fmt.Println()
switch *benchmarkType {
case shared.CLIArgAll:
if err := benchmark.RunAllBenchmarks(*sourceDir); err != nil {
return fmt.Errorf("benchmark failed: %w", err)
}
return nil
case "all":
return benchmark.RunAllBenchmarks(*sourceDir)
case "collection":
return runCollectionBenchmark()
case "processing":
@@ -72,79 +50,53 @@ func runBenchmarks() error {
case "format":
return runFormatBenchmark()
default:
return shared.NewValidationError(shared.CodeValidationFormat, "invalid benchmark type: "+*benchmarkType)
return utils.NewValidationError(utils.CodeValidationFormat, "invalid benchmark type: "+*benchmarkType)
}
}
func runCollectionBenchmark() error {
//nolint:errcheck // Benchmark status message, errors don't affect benchmark results
_, _ = fmt.Println(shared.BenchmarkMsgRunningCollection)
fmt.Println("Running file collection benchmark...")
result, err := benchmark.FileCollectionBenchmark(*sourceDir, *numFiles)
if err != nil {
return shared.WrapError(
err,
shared.ErrorTypeProcessing,
shared.CodeProcessingCollection,
shared.BenchmarkMsgFileCollectionFailed,
)
return utils.WrapError(err, utils.ErrorTypeProcessing, utils.CodeProcessingCollection, "file collection benchmark failed")
}
benchmark.PrintResult(result)
benchmark.PrintBenchmarkResult(result)
return nil
}
func runProcessingBenchmark() error {
//nolint:errcheck // Benchmark status message, errors don't affect benchmark results
_, _ = fmt.Printf("Running file processing benchmark (format: %s, concurrency: %d)...\n", *format, *concurrency)
fmt.Printf("Running file processing benchmark (format: %s, concurrency: %d)...\n", *format, *concurrency)
result, err := benchmark.FileProcessingBenchmark(*sourceDir, *format, *concurrency)
if err != nil {
return shared.WrapError(
err,
shared.ErrorTypeProcessing,
shared.CodeProcessingCollection,
"file processing benchmark failed",
)
return utils.WrapError(err, utils.ErrorTypeProcessing, utils.CodeProcessingCollection, "file processing benchmark failed")
}
benchmark.PrintResult(result)
benchmark.PrintBenchmarkResult(result)
return nil
}
func runConcurrencyBenchmark() error {
concurrencyLevels, err := parseConcurrencyList(*concurrencyList)
if err != nil {
return shared.WrapError(
err, shared.ErrorTypeValidation, shared.CodeValidationFormat, "invalid concurrency list")
return utils.WrapError(err, utils.ErrorTypeValidation, utils.CodeValidationFormat, "invalid concurrency list")
}
//nolint:errcheck // Benchmark status message, errors don't affect benchmark results
_, _ = fmt.Printf("Running concurrency benchmark (format: %s, levels: %v)...\n", *format, concurrencyLevels)
fmt.Printf("Running concurrency benchmark (format: %s, levels: %v)...\n", *format, concurrencyLevels)
suite, err := benchmark.ConcurrencyBenchmark(*sourceDir, *format, concurrencyLevels)
if err != nil {
return shared.WrapError(
err,
shared.ErrorTypeProcessing,
shared.CodeProcessingCollection,
shared.BenchmarkMsgConcurrencyFailed,
)
return utils.WrapError(err, utils.ErrorTypeProcessing, utils.CodeProcessingCollection, "concurrency benchmark failed")
}
benchmark.PrintSuite(suite)
benchmark.PrintBenchmarkSuite(suite)
return nil
}
func runFormatBenchmark() error {
formats := parseFormatList(*formatList)
//nolint:errcheck // Benchmark status message, errors don't affect benchmark results
_, _ = fmt.Printf("Running format benchmark (formats: %v)...\n", formats)
fmt.Printf("Running format benchmark (formats: %v)...\n", formats)
suite, err := benchmark.FormatBenchmark(*sourceDir, formats)
if err != nil {
return shared.WrapError(
err, shared.ErrorTypeProcessing, shared.CodeProcessingCollection, shared.BenchmarkMsgFormatFailed,
)
return utils.WrapError(err, utils.ErrorTypeProcessing, utils.CodeProcessingCollection, "format benchmark failed")
}
benchmark.PrintSuite(suite)
benchmark.PrintBenchmarkSuite(suite)
return nil
}
@@ -152,7 +104,6 @@ func getSourceDescription() string {
if *sourceDir == "" {
return fmt.Sprintf("temporary files (%d files)", *numFiles)
}
return *sourceDir
}
@@ -164,24 +115,16 @@ func parseConcurrencyList(list string) ([]int, error) {
part = strings.TrimSpace(part)
var level int
if _, err := fmt.Sscanf(part, "%d", &level); err != nil {
return nil, shared.WrapErrorf(
err,
shared.ErrorTypeValidation,
shared.CodeValidationFormat,
"invalid concurrency level: %s",
part,
)
return nil, utils.WrapErrorf(err, utils.ErrorTypeValidation, utils.CodeValidationFormat, "invalid concurrency level: %s", part)
}
if level <= 0 {
return nil, shared.NewValidationError(
shared.CodeValidationFormat, "concurrency level must be positive: "+part,
)
return nil, utils.NewValidationError(utils.CodeValidationFormat, "concurrency level must be positive: "+part)
}
levels = append(levels, level)
}
if len(levels) == 0 {
return nil, shared.NewValidationError(shared.CodeValidationFormat, "no valid concurrency levels found")
return nil, utils.NewValidationError(utils.CodeValidationFormat, "no valid concurrency levels found")
}
return levels, nil

View File

@@ -1,751 +0,0 @@
package main
import (
"errors"
"flag"
"io"
"os"
"runtime"
"testing"
"github.com/ivuorinen/gibidify/shared"
"github.com/ivuorinen/gibidify/testutil"
)
// Test constants to avoid goconst linting issues.
const (
testJSON = "json"
testMarkdown = "markdown"
testConcurrency = "1,2"
testAll = "all"
testCollection = "collection"
testConcurrencyT = "concurrency"
testNonExistent = "/nonexistent/path/that/should/not/exist"
testFile1 = "test1.txt"
testFile2 = "test2.txt"
testContent1 = "content1"
testContent2 = "content2"
)
func TestParseConcurrencyList(t *testing.T) {
tests := []struct {
name string
input string
want []int
wantErr bool
errContains string
}{
{
name: "valid single value",
input: "4",
want: []int{4},
wantErr: false,
},
{
name: "valid multiple values",
input: shared.TestConcurrencyList,
want: []int{1, 2, 4, 8},
wantErr: false,
},
{
name: "valid with whitespace",
input: " 1 , 2 , 4 , 8 ",
want: []int{1, 2, 4, 8},
wantErr: false,
},
{
name: "valid single large value",
input: "16",
want: []int{16},
wantErr: false,
},
{
name: "empty string",
input: "",
wantErr: true,
errContains: shared.TestMsgInvalidConcurrencyLevel,
},
{
name: "invalid number",
input: "1,abc,4",
wantErr: true,
errContains: shared.TestMsgInvalidConcurrencyLevel,
},
{
name: "zero value",
input: "1,0,4",
wantErr: true,
errContains: "concurrency level must be positive",
},
{
name: "negative value",
input: "1,-2,4",
wantErr: true,
errContains: "concurrency level must be positive",
},
{
name: "only whitespace",
input: " , , ",
wantErr: true,
errContains: shared.TestMsgInvalidConcurrencyLevel,
},
{
name: "large value list",
input: "1,2,4,8,16",
want: []int{1, 2, 4, 8, 16},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := parseConcurrencyList(tt.input)
if tt.wantErr {
testutil.AssertExpectedError(t, err, "parseConcurrencyList")
if tt.errContains != "" {
testutil.AssertErrorContains(t, err, tt.errContains, "parseConcurrencyList")
}
return
}
testutil.AssertNoError(t, err, "parseConcurrencyList")
if !equalSlices(got, tt.want) {
t.Errorf("parseConcurrencyList() = %v, want %v", got, tt.want)
}
})
}
}
func TestParseFormatList(t *testing.T) {
tests := []struct {
name string
input string
want []string
}{
{
name: "single format",
input: "json",
want: []string{"json"},
},
{
name: "multiple formats",
input: shared.TestFormatList,
want: []string{"json", "yaml", "markdown"},
},
{
name: "formats with whitespace",
input: " json , yaml , markdown ",
want: []string{"json", "yaml", "markdown"},
},
{
name: "empty string",
input: "",
want: []string{},
},
{
name: "empty parts",
input: "json,,yaml",
want: []string{"json", "yaml"},
},
{
name: "only whitespace and commas",
input: " , , ",
want: []string{},
},
{
name: "single format with whitespace",
input: " markdown ",
want: []string{"markdown"},
},
{
name: "duplicate formats",
input: "json,json,yaml",
want: []string{"json", "json", "yaml"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := parseFormatList(tt.input)
if !equalSlices(got, tt.want) {
t.Errorf("parseFormatList() = %v, want %v", got, tt.want)
}
})
}
}
func TestGetSourceDescription(t *testing.T) {
// Save original flag values and reset after test
origSourceDir := sourceDir
origNumFiles := numFiles
defer func() {
sourceDir = origSourceDir
numFiles = origNumFiles
}()
tests := []struct {
name string
sourceDir string
numFiles int
want string
}{
{
name: "empty source directory with default files",
sourceDir: "",
numFiles: 100,
want: "temporary files (100 files)",
},
{
name: "empty source directory with custom files",
sourceDir: "",
numFiles: 50,
want: "temporary files (50 files)",
},
{
name: "non-empty source directory",
sourceDir: "/path/to/source",
numFiles: 100,
want: "/path/to/source",
},
{
name: "current directory",
sourceDir: ".",
numFiles: 100,
want: ".",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Set flag pointers to test values
*sourceDir = tt.sourceDir
*numFiles = tt.numFiles
got := getSourceDescription()
if got != tt.want {
t.Errorf("getSourceDescription() = %v, want %v", got, tt.want)
}
})
}
}
func TestRunCollectionBenchmark(t *testing.T) {
restore := testutil.SuppressLogs(t)
defer restore()
// Save original flag values
origSourceDir := sourceDir
origNumFiles := numFiles
defer func() {
sourceDir = origSourceDir
numFiles = origNumFiles
}()
t.Run("success with temp files", func(t *testing.T) {
*sourceDir = ""
*numFiles = 10
err := runCollectionBenchmark()
testutil.AssertNoError(t, err, "runCollectionBenchmark with temp files")
})
t.Run("success with real directory", func(t *testing.T) {
tempDir := t.TempDir()
testutil.CreateTestFiles(t, tempDir, []testutil.FileSpec{
{Name: testFile1, Content: testContent1},
{Name: testFile2, Content: testContent2},
})
*sourceDir = tempDir
*numFiles = 10
err := runCollectionBenchmark()
testutil.AssertNoError(t, err, "runCollectionBenchmark with real directory")
})
}
func TestRunProcessingBenchmark(t *testing.T) {
restore := testutil.SuppressLogs(t)
defer restore()
// Save original flag values
origSourceDir := sourceDir
origFormat := format
origConcurrency := concurrency
defer func() {
sourceDir = origSourceDir
format = origFormat
concurrency = origConcurrency
}()
t.Run("success with json format", func(t *testing.T) {
tempDir := t.TempDir()
testutil.CreateTestFiles(t, tempDir, []testutil.FileSpec{
{Name: testFile1, Content: testContent1},
{Name: testFile2, Content: testContent2},
})
*sourceDir = tempDir
*format = testJSON
*concurrency = 2
err := runProcessingBenchmark()
testutil.AssertNoError(t, err, "runProcessingBenchmark with json")
})
t.Run("success with markdown format", func(t *testing.T) {
tempDir := t.TempDir()
testutil.CreateTestFiles(t, tempDir, []testutil.FileSpec{
{Name: testFile1, Content: testContent1},
})
*sourceDir = tempDir
*format = testMarkdown
*concurrency = 1
err := runProcessingBenchmark()
testutil.AssertNoError(t, err, "runProcessingBenchmark with markdown")
})
}
func TestRunConcurrencyBenchmark(t *testing.T) {
restore := testutil.SuppressLogs(t)
defer restore()
// Save original flag values
origSourceDir := sourceDir
origFormat := format
origConcurrencyList := concurrencyList
defer func() {
sourceDir = origSourceDir
format = origFormat
concurrencyList = origConcurrencyList
}()
t.Run("success with valid concurrency list", func(t *testing.T) {
tempDir := t.TempDir()
testutil.CreateTestFiles(t, tempDir, []testutil.FileSpec{
{Name: testFile1, Content: testContent1},
})
*sourceDir = tempDir
*format = testJSON
*concurrencyList = testConcurrency
err := runConcurrencyBenchmark()
testutil.AssertNoError(t, err, "runConcurrencyBenchmark")
})
t.Run("error with invalid concurrency list", func(t *testing.T) {
tempDir := t.TempDir()
*sourceDir = tempDir
*format = testJSON
*concurrencyList = "invalid"
err := runConcurrencyBenchmark()
testutil.AssertExpectedError(t, err, "runConcurrencyBenchmark with invalid list")
testutil.AssertErrorContains(t, err, "invalid concurrency list", "runConcurrencyBenchmark")
})
}
func TestRunFormatBenchmark(t *testing.T) {
restore := testutil.SuppressLogs(t)
defer restore()
// Save original flag values
origSourceDir := sourceDir
origFormatList := formatList
defer func() {
sourceDir = origSourceDir
formatList = origFormatList
}()
t.Run("success with valid format list", func(t *testing.T) {
tempDir := t.TempDir()
testutil.CreateTestFiles(t, tempDir, []testutil.FileSpec{
{Name: testFile1, Content: testContent1},
})
*sourceDir = tempDir
*formatList = "json,yaml"
err := runFormatBenchmark()
testutil.AssertNoError(t, err, "runFormatBenchmark")
})
t.Run("success with single format", func(t *testing.T) {
tempDir := t.TempDir()
testutil.CreateTestFiles(t, tempDir, []testutil.FileSpec{
{Name: testFile1, Content: testContent1},
})
*sourceDir = tempDir
*formatList = testMarkdown
err := runFormatBenchmark()
testutil.AssertNoError(t, err, "runFormatBenchmark with single format")
})
}
func TestRunBenchmarks(t *testing.T) {
restore := testutil.SuppressLogs(t)
defer restore()
// Save original flag values
origBenchmarkType := benchmarkType
origSourceDir := sourceDir
origConcurrencyList := concurrencyList
origFormatList := formatList
defer func() {
benchmarkType = origBenchmarkType
sourceDir = origSourceDir
concurrencyList = origConcurrencyList
formatList = origFormatList
}()
tempDir := t.TempDir()
testutil.CreateTestFiles(t, tempDir, []testutil.FileSpec{
{Name: testFile1, Content: testContent1},
})
tests := []struct {
name string
benchmarkType string
wantErr bool
errContains string
}{
{
name: "all benchmarks",
benchmarkType: "all",
wantErr: false,
},
{
name: "collection benchmark",
benchmarkType: "collection",
wantErr: false,
},
{
name: "processing benchmark",
benchmarkType: "processing",
wantErr: false,
},
{
name: "concurrency benchmark",
benchmarkType: "concurrency",
wantErr: false,
},
{
name: "format benchmark",
benchmarkType: "format",
wantErr: false,
},
{
name: "invalid benchmark type",
benchmarkType: "invalid",
wantErr: true,
errContains: "invalid benchmark type",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
*benchmarkType = tt.benchmarkType
*sourceDir = tempDir
*concurrencyList = testConcurrency
*formatList = testMarkdown
err := runBenchmarks()
if tt.wantErr {
testutil.AssertExpectedError(t, err, "runBenchmarks")
if tt.errContains != "" {
testutil.AssertErrorContains(t, err, tt.errContains, "runBenchmarks")
}
} else {
testutil.AssertNoError(t, err, "runBenchmarks")
}
})
}
}
func TestMainFunction(t *testing.T) {
restore := testutil.SuppressLogs(t)
defer restore()
// We can't easily test main() directly due to os.Exit calls,
// but we can test runBenchmarks() which contains the main logic
tempDir := t.TempDir()
testutil.CreateTestFiles(t, tempDir, []testutil.FileSpec{
{Name: testFile1, Content: testContent1},
})
// Save original flag values
origBenchmarkType := benchmarkType
origSourceDir := sourceDir
defer func() {
benchmarkType = origBenchmarkType
sourceDir = origSourceDir
}()
*benchmarkType = testCollection
*sourceDir = tempDir
err := runBenchmarks()
testutil.AssertNoError(t, err, "runBenchmarks through main logic path")
}
func TestFlagInitialization(t *testing.T) {
// Test that flags are properly initialized with expected defaults
resetFlags()
if *sourceDir != "" {
t.Errorf("sourceDir default should be empty, got %v", *sourceDir)
}
if *benchmarkType != testAll {
t.Errorf("benchmarkType default should be 'all', got %v", *benchmarkType)
}
if *format != testJSON {
t.Errorf("format default should be 'json', got %v", *format)
}
if *concurrency != runtime.NumCPU() {
t.Errorf("concurrency default should be %d, got %d", runtime.NumCPU(), *concurrency)
}
if *concurrencyList != shared.TestConcurrencyList {
t.Errorf("concurrencyList default should be '%s', got %v", shared.TestConcurrencyList, *concurrencyList)
}
if *formatList != shared.TestFormatList {
t.Errorf("formatList default should be '%s', got %v", shared.TestFormatList, *formatList)
}
if *numFiles != 100 {
t.Errorf("numFiles default should be 100, got %d", *numFiles)
}
}
func TestErrorPropagation(t *testing.T) {
restore := testutil.SuppressLogs(t)
defer restore()
// Save original flag values
origBenchmarkType := benchmarkType
origSourceDir := sourceDir
origConcurrencyList := concurrencyList
defer func() {
benchmarkType = origBenchmarkType
sourceDir = origSourceDir
concurrencyList = origConcurrencyList
}()
tempDir := t.TempDir()
t.Run("error from concurrency benchmark propagates", func(t *testing.T) {
*benchmarkType = testConcurrencyT
*sourceDir = tempDir
*concurrencyList = "invalid,list"
err := runBenchmarks()
testutil.AssertExpectedError(t, err, "runBenchmarks with invalid concurrency")
testutil.AssertErrorContains(t, err, "invalid concurrency list", "runBenchmarks error propagation")
})
t.Run("validation error contains proper error type", func(t *testing.T) {
*benchmarkType = "invalid-type"
*sourceDir = tempDir
err := runBenchmarks()
testutil.AssertExpectedError(t, err, "runBenchmarks with invalid type")
var validationErr *shared.StructuredError
if !errors.As(err, &validationErr) {
t.Errorf("Expected StructuredError, got %T", err)
} else if validationErr.Code != shared.CodeValidationFormat {
t.Errorf("Expected validation format error code, got %v", validationErr.Code)
}
})
t.Run("empty levels array returns error", func(t *testing.T) {
// Test the specific case where all parts are empty after trimming
_, err := parseConcurrencyList(" , , ")
testutil.AssertExpectedError(t, err, "parseConcurrencyList with all empty parts")
testutil.AssertErrorContains(t, err, shared.TestMsgInvalidConcurrencyLevel, "parseConcurrencyList empty levels")
})
t.Run("single empty part returns error", func(t *testing.T) {
// Test case that should never reach the "no valid levels found" condition
_, err := parseConcurrencyList(" ")
testutil.AssertExpectedError(t, err, "parseConcurrencyList with single empty part")
testutil.AssertErrorContains(
t, err, shared.TestMsgInvalidConcurrencyLevel, "parseConcurrencyList single empty part",
)
})
t.Run("benchmark function error paths", func(t *testing.T) {
// Test with non-existent source directory to trigger error paths
nonExistentDir := testNonExistent
*benchmarkType = testCollection
*sourceDir = nonExistentDir
// This should fail as the benchmark package cannot access non-existent directories
err := runBenchmarks()
testutil.AssertExpectedError(t, err, "runBenchmarks with non-existent directory")
testutil.AssertErrorContains(t, err, "file collection benchmark failed",
"runBenchmarks error contains expected message")
})
t.Run("processing benchmark error path", func(t *testing.T) {
// Test error path for processing benchmark
nonExistentDir := testNonExistent
*benchmarkType = "processing"
*sourceDir = nonExistentDir
*format = "json"
*concurrency = 1
err := runBenchmarks()
testutil.AssertExpectedError(t, err, "runBenchmarks processing with non-existent directory")
testutil.AssertErrorContains(t, err, "file processing benchmark failed", "runBenchmarks processing error")
})
t.Run("concurrency benchmark error path", func(t *testing.T) {
// Test error path for concurrency benchmark
nonExistentDir := testNonExistent
*benchmarkType = testConcurrencyT
*sourceDir = nonExistentDir
*format = "json"
*concurrencyList = "1,2"
err := runBenchmarks()
testutil.AssertExpectedError(t, err, "runBenchmarks concurrency with non-existent directory")
testutil.AssertErrorContains(t, err, "concurrency benchmark failed", "runBenchmarks concurrency error")
})
t.Run("format benchmark error path", func(t *testing.T) {
// Test error path for format benchmark
nonExistentDir := testNonExistent
*benchmarkType = "format"
*sourceDir = nonExistentDir
*formatList = "json,yaml"
err := runBenchmarks()
testutil.AssertExpectedError(t, err, "runBenchmarks format with non-existent directory")
testutil.AssertErrorContains(t, err, "format benchmark failed", "runBenchmarks format error")
})
t.Run("all benchmarks error path", func(t *testing.T) {
// Test error path for all benchmarks
nonExistentDir := testNonExistent
*benchmarkType = "all"
*sourceDir = nonExistentDir
err := runBenchmarks()
testutil.AssertExpectedError(t, err, "runBenchmarks all with non-existent directory")
testutil.AssertErrorContains(t, err, "benchmark failed", "runBenchmarks all error")
})
}
// Benchmark functions
// BenchmarkParseConcurrencyList benchmarks the parsing of concurrency lists.
func BenchmarkParseConcurrencyList(b *testing.B) {
benchmarks := []struct {
name string
input string
}{
{
name: "single value",
input: "4",
},
{
name: "multiple values",
input: "1,2,4,8",
},
{
name: "values with whitespace",
input: " 1 , 2 , 4 , 8 , 16 ",
},
{
name: "large list",
input: "1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16",
},
}
for _, bm := range benchmarks {
b.Run(bm.name, func(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
_, _ = parseConcurrencyList(bm.input)
}
})
}
}
// BenchmarkParseFormatList benchmarks the parsing of format lists.
func BenchmarkParseFormatList(b *testing.B) {
benchmarks := []struct {
name string
input string
}{
{
name: "single format",
input: "json",
},
{
name: "multiple formats",
input: shared.TestFormatList,
},
{
name: "formats with whitespace",
input: " json , yaml , markdown , xml , toml ",
},
{
name: "large list",
input: "json,yaml,markdown,xml,toml,csv,tsv,html,txt,log",
},
}
for _, bm := range benchmarks {
b.Run(bm.name, func(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
_ = parseFormatList(bm.input)
}
})
}
}
// Helper functions
// equalSlices compares two slices for equality.
func equalSlices[T comparable](a, b []T) bool {
if len(a) != len(b) {
return false
}
for i := range a {
if a[i] != b[i] {
return false
}
}
return true
}
// resetFlags resets flag variables to their defaults for testing.
func resetFlags() {
flag.CommandLine = flag.NewFlagSet(os.Args[0], flag.ContinueOnError)
flag.CommandLine.SetOutput(io.Discard)
// Reinitialize the flags
sourceDir = flag.String("source", "", "Source directory to benchmark (uses temp files if empty)")
benchmarkType = flag.String("type", "all", "Benchmark type: all, collection, processing, concurrency, format")
format = flag.String("format", "json", "Output format for processing benchmarks")
concurrency = flag.Int("concurrency", runtime.NumCPU(), "Concurrency level for processing benchmarks")
concurrencyList = flag.String(
"concurrency-list", shared.TestConcurrencyList, "Comma-separated list of concurrency levels",
)
formatList = flag.String("format-list", shared.TestFormatList, "Comma-separated list of formats")
numFiles = flag.Int("files", 100, "Number of files to create for benchmarks")
}

View File

@@ -1,101 +1,61 @@
---
# gibidify Configuration Example
# =============================
# This file demonstrates all available configuration options with their defaults
# and validation ranges. Copy this file to one of the following locations:
#
# gibidify configuration example
# Place this file in one of these locations:
# - $XDG_CONFIG_HOME/gibidify/config.yaml
# - $HOME/.config/gibidify/config.yaml
# - Current directory (if no gibidify.yaml output file exists)
# =============================================================================
# BASIC FILE PROCESSING SETTINGS
# =============================================================================
# Maximum size for individual files in bytes
# Default: 5242880 (5MB), Min: 1024 (1KB), Max: 104857600 (100MB)
# File size limit in bytes (default: 5MB)
fileSizeLimit: 5242880
# Directories to ignore during file system traversal
# These are sensible defaults for most projects
# Directories to ignore during scanning
ignoreDirectories:
- vendor # Go vendor directory
- node_modules # Node.js dependencies
- .git # Git repository data
- dist # Distribution/build output
- build # Build artifacts
- target # Maven/Rust build directory
- bower_components # Bower dependencies
- cache # Various cache directories
- tmp # Temporary files
- .next # Next.js build directory
- .nuxt # Nuxt.js build directory
- .vscode # VS Code settings
- .idea # IntelliJ IDEA settings
- __pycache__ # Python cache
- .pytest_cache # Pytest cache
# Maximum number of worker goroutines for concurrent processing
# Default: number of CPU cores, Min: 1, Max: 100
# maxConcurrency: 8
# Supported output formats for validation
# Default: ["json", "yaml", "markdown"]
# supportedFormats:
# - json
# - yaml
# - markdown
# File patterns to include (glob patterns)
# Default: empty (all files), useful for filtering specific file types
# filePatterns:
# - "*.go"
# - "*.py"
# - "*.js"
# - "*.ts"
# - "*.java"
# - "*.c"
# - "*.cpp"
# =============================================================================
# FILE TYPE DETECTION AND CUSTOMIZATION
# =============================================================================
- vendor
- node_modules
- .git
- dist
- build
- target
- bower_components
- cache
- tmp
- .next
- .nuxt
# FileType registry configuration
fileTypes:
# Enable/disable file type detection entirely
# Default: true
# Enable/disable file type detection entirely (default: true)
enabled: true
# Add custom image extensions (beyond built-in: .png, .jpg, .jpeg, .gif, .svg, .ico, .bmp, .tiff, .webp)
# Add custom image extensions
customImageExtensions:
- .avif # AV1 Image File Format
- .heic # High Efficiency Image Container
- .jxl # JPEG XL
- .webp # WebP (if not already included)
- .webp
- .avif
- .heic
- .jxl
# Add custom binary extensions (beyond built-in: .exe, .dll, .so, .dylib, .a, .lib, .obj, .o)
# Add custom binary extensions
customBinaryExtensions:
- .custom # Custom binary format
- .proprietary # Proprietary format
- .blob # Binary large object
- .custom
- .proprietary
- .blob
# Add custom language mappings (extension -> language name)
# Add custom language mappings
customLanguages:
.zig: zig # Zig language
.odin: odin # Odin language
.v: vlang # V language
.grain: grain # Grain language
.gleam: gleam # Gleam language
.roc: roc # Roc language
.janet: janet # Janet language
.fennel: fennel # Fennel language
.wast: wast # WebAssembly text format
.wat: wat # WebAssembly text format
.zig: zig
.odin: odin
.v: vlang
.grain: grain
.gleam: gleam
.roc: roc
.janet: janet
.fennel: fennel
.wast: wast
.wat: wat
# Disable specific default image extensions
disabledImageExtensions:
- .bmp # Disable bitmap support
- .tiff # Disable TIFF support
- .tif # Disable TIFF support
# Disable specific default binary extensions
disabledBinaryExtensions:
@@ -107,227 +67,18 @@ fileTypes:
- .bat # Don't detect batch files
- .cmd # Don't detect command files
# =============================================================================
# BACKPRESSURE AND MEMORY MANAGEMENT
# =============================================================================
# Maximum concurrency (optional)
maxConcurrency: 16
backpressure:
# Enable backpressure management for memory optimization
# Default: true
enabled: true
# Supported output formats (optional validation)
supportedFormats:
- json
- yaml
- markdown
# Maximum number of files to buffer in the processing pipeline
# Default: 1000, helps prevent memory exhaustion with many small files
maxPendingFiles: 1000
# Maximum number of write operations to buffer
# Default: 100, controls write throughput vs memory usage
maxPendingWrites: 100
# Soft memory usage limit in bytes before triggering backpressure
# Default: 104857600 (100MB)
maxMemoryUsage: 104857600
# Check memory usage every N files processed
# Default: 1000, lower values = more frequent checks but higher overhead
memoryCheckInterval: 1000
# =============================================================================
# RESOURCE LIMITS AND SECURITY
# =============================================================================
resourceLimits:
# Enable resource limits for DoS protection
# Default: true
enabled: true
# Maximum number of files to process
# Default: 10000, Min: 1, Max: 1000000
maxFiles: 10000
# Maximum total size of all files combined in bytes
# Default: 1073741824 (1GB), Min: 1048576 (1MB), Max: 107374182400 (100GB)
maxTotalSize: 1073741824
# Timeout for processing individual files in seconds
# Default: 30, Min: 1, Max: 300 (5 minutes)
fileProcessingTimeoutSec: 30
# Overall timeout for the entire operation in seconds
# Default: 3600 (1 hour), Min: 10, Max: 86400 (24 hours)
overallTimeoutSec: 3600
# Maximum concurrent file reading operations
# Default: 10, Min: 1, Max: 100
maxConcurrentReads: 10
# Rate limit for file processing (files per second)
# Default: 0 (disabled), Min: 0, Max: 10000
rateLimitFilesPerSec: 0
# Hard memory limit in MB - terminates processing if exceeded
# Default: 512, Min: 64, Max: 8192 (8GB)
hardMemoryLimitMB: 512
# Enable graceful degradation under resource pressure
# Default: true - reduces concurrency and buffers when under pressure
enableGracefulDegradation: true
# Enable detailed resource monitoring and metrics
# Default: true - tracks memory, timing, and processing statistics
enableResourceMonitoring: true
# =============================================================================
# OUTPUT FORMATTING AND TEMPLATES
# =============================================================================
output:
# Template selection: "" (default), "minimal", "detailed", "compact", or "custom"
# Default: "" (uses built-in default template)
template: ""
# Metadata inclusion options
metadata:
# Include processing statistics in output
# Default: false
includeStats: false
# Include timestamp when processing was done
# Default: false
includeTimestamp: false
# Include total number of files processed
# Default: false
includeFileCount: false
# Include source directory path
# Default: false
includeSourcePath: false
# Include detected file types summary
# Default: false
includeFileTypes: false
# Include processing time information
# Default: false
includeProcessingTime: false
# Include total size of processed files
# Default: false
includeTotalSize: false
# Include detailed processing metrics
# Default: false
includeMetrics: false
# Markdown-specific formatting options
markdown:
# Wrap file content in code blocks
# Default: false
useCodeBlocks: false
# Include language identifier in code blocks
# Default: false
includeLanguage: false
# Header level for file sections (1-6)
# Default: 0 (uses template default, typically 2)
headerLevel: 0
# Generate table of contents
# Default: false
tableOfContents: false
# Use collapsible sections for large files
# Default: false
useCollapsible: false
# Enable syntax highlighting hints
# Default: false
syntaxHighlighting: false
# Include line numbers in code blocks
# Default: false
lineNumbers: false
# Automatically fold files longer than maxLineLength
# Default: false
foldLongFiles: false
# Maximum line length before wrapping/folding
# Default: 0 (no limit)
maxLineLength: 0
# Custom CSS to include in markdown output
# Default: "" (no custom CSS)
customCSS: ""
# Custom template overrides (only used when template is "custom")
custom:
# Custom header template (supports Go template syntax)
header: ""
# Custom footer template
footer: ""
# Custom file header template (prepended to each file)
fileHeader: ""
# Custom file footer template (appended to each file)
fileFooter: ""
# Custom template variables accessible in all templates
variables:
# Example variables - customize as needed
project_name: "My Project"
author: "Developer Name"
version: "1.0.0"
description: "Generated code aggregation"
# Add any custom key-value pairs here
# =============================================================================
# EXAMPLES OF COMMON CONFIGURATIONS
# =============================================================================
# Example 1: Minimal configuration for quick code review
# fileSizeLimit: 1048576 # 1MB limit for faster processing
# maxConcurrency: 4 # Lower concurrency for stability
# ignoreDirectories: [".git", "node_modules", "vendor"]
# output:
# template: "minimal"
# metadata:
# includeStats: true
# Example 2: High-performance configuration for large codebases
# fileSizeLimit: 10485760 # 10MB limit
# maxConcurrency: 16 # High concurrency
# backpressure:
# maxPendingFiles: 5000 # Larger buffers
# maxMemoryUsage: 536870912 # 512MB memory
# resourceLimits:
# maxFiles: 100000 # Process more files
# maxTotalSize: 10737418240 # 10GB total size
# Example 3: Security-focused configuration
# resourceLimits:
# maxFiles: 1000 # Strict file limit
# maxTotalSize: 104857600 # 100MB total limit
# fileProcessingTimeoutSec: 10 # Short timeout
# overallTimeoutSec: 300 # 5-minute overall limit
# hardMemoryLimitMB: 256 # Lower memory limit
# rateLimitFilesPerSec: 50 # Rate limiting enabled
# Example 4: Documentation-friendly output
# output:
# template: "detailed"
# metadata:
# includeStats: true
# includeTimestamp: true
# includeFileCount: true
# includeSourcePath: true
# markdown:
# useCodeBlocks: true
# includeLanguage: true
# headerLevel: 2
# tableOfContents: true
# syntaxHighlighting: true
# File patterns for filtering (optional)
filePatterns:
- "*.go"
- "*.py"
- "*.js"
- "*.ts"

79
config.yaml.example Normal file
View File

@@ -0,0 +1,79 @@
# Gibidify Configuration Example
# This file demonstrates all available configuration options
# File size limit for individual files (in bytes)
# Default: 5242880 (5MB), Min: 1024 (1KB), Max: 104857600 (100MB)
fileSizeLimit: 5242880
# Directories to ignore during traversal
ignoreDirectories:
- vendor
- node_modules
- .git
- dist
- build
- target
- bower_components
- cache
- tmp
# File type detection and filtering
fileTypes:
enabled: true
customImageExtensions: []
customBinaryExtensions: []
customLanguages: {}
disabledImageExtensions: []
disabledBinaryExtensions: []
disabledLanguageExtensions: []
# Back-pressure management for memory optimization
backpressure:
enabled: true
maxPendingFiles: 1000 # Max files in channel buffer
maxPendingWrites: 100 # Max writes in channel buffer
maxMemoryUsage: 104857600 # 100MB soft memory limit
memoryCheckInterval: 1000 # Check memory every N files
# Resource limits for DoS protection and security
resourceLimits:
enabled: true
# File processing limits
maxFiles: 10000 # Maximum number of files to process
maxTotalSize: 1073741824 # Maximum total size (1GB)
# Timeout limits (in seconds)
fileProcessingTimeoutSec: 30 # Timeout for individual file processing
overallTimeoutSec: 3600 # Overall processing timeout (1 hour)
# Concurrency limits
maxConcurrentReads: 10 # Maximum concurrent file reading operations
# Rate limiting (0 = disabled)
rateLimitFilesPerSec: 0 # Files per second rate limit
# Memory limits
hardMemoryLimitMB: 512 # Hard memory limit (512MB)
# Safety features
enableGracefulDegradation: true # Enable graceful degradation on resource pressure
enableResourceMonitoring: true # Enable detailed resource monitoring
# Optional: Maximum concurrency for workers
# Default: number of CPU cores
# maxConcurrency: 4
# Optional: Supported output formats
# Default: ["json", "yaml", "markdown"]
# supportedFormats:
# - json
# - yaml
# - markdown
# Optional: File patterns to include
# Default: all files (empty list means no pattern filtering)
# filePatterns:
# - "*.go"
# - "*.py"
# - "*.js"

View File

@@ -4,223 +4,171 @@ import (
"testing"
"github.com/spf13/viper"
"github.com/ivuorinen/gibidify/shared"
)
// TestFileTypeRegistryDefaultValues tests default configuration values.
func TestFileTypeRegistryDefaultValues(t *testing.T) {
// TestFileTypeRegistryConfig tests the FileTypeRegistry configuration functionality.
func TestFileTypeRegistryConfig(t *testing.T) {
// Test default values
t.Run("DefaultValues", func(t *testing.T) {
viper.Reset()
SetDefaultConfig()
setDefaultConfig()
verifyDefaultValues(t)
}
if !GetFileTypesEnabled() {
t.Error("Expected file types to be enabled by default")
}
// TestFileTypeRegistrySetGet tests configuration setting and getting.
func TestFileTypeRegistrySetGet(t *testing.T) {
if len(GetCustomImageExtensions()) != 0 {
t.Error("Expected custom image extensions to be empty by default")
}
if len(GetCustomBinaryExtensions()) != 0 {
t.Error("Expected custom binary extensions to be empty by default")
}
if len(GetCustomLanguages()) != 0 {
t.Error("Expected custom languages to be empty by default")
}
if len(GetDisabledImageExtensions()) != 0 {
t.Error("Expected disabled image extensions to be empty by default")
}
if len(GetDisabledBinaryExtensions()) != 0 {
t.Error("Expected disabled binary extensions to be empty by default")
}
if len(GetDisabledLanguageExtensions()) != 0 {
t.Error("Expected disabled language extensions to be empty by default")
}
})
// Test configuration setting and getting
t.Run("ConfigurationSetGet", func(t *testing.T) {
viper.Reset()
// Set test values
setTestConfiguration()
viper.Set("fileTypes.enabled", false)
viper.Set("fileTypes.customImageExtensions", []string{".webp", ".avif"})
viper.Set("fileTypes.customBinaryExtensions", []string{".custom", ".mybin"})
viper.Set("fileTypes.customLanguages", map[string]string{
".zig": "zig",
".v": "vlang",
})
viper.Set("fileTypes.disabledImageExtensions", []string{".gif", ".bmp"})
viper.Set("fileTypes.disabledBinaryExtensions", []string{".exe", ".dll"})
viper.Set("fileTypes.disabledLanguageExtensions", []string{".rb", ".pl"})
// Test getter functions
verifyTestConfiguration(t)
}
if GetFileTypesEnabled() {
t.Error("Expected file types to be disabled")
}
// TestFileTypeRegistryValidationSuccess tests successful validation.
func TestFileTypeRegistryValidationSuccess(t *testing.T) {
customImages := GetCustomImageExtensions()
expectedImages := []string{".webp", ".avif"}
if len(customImages) != len(expectedImages) {
t.Errorf("Expected %d custom image extensions, got %d", len(expectedImages), len(customImages))
}
for i, ext := range expectedImages {
if customImages[i] != ext {
t.Errorf("Expected custom image extension %s, got %s", ext, customImages[i])
}
}
customBinary := GetCustomBinaryExtensions()
expectedBinary := []string{".custom", ".mybin"}
if len(customBinary) != len(expectedBinary) {
t.Errorf("Expected %d custom binary extensions, got %d", len(expectedBinary), len(customBinary))
}
for i, ext := range expectedBinary {
if customBinary[i] != ext {
t.Errorf("Expected custom binary extension %s, got %s", ext, customBinary[i])
}
}
customLangs := GetCustomLanguages()
expectedLangs := map[string]string{
".zig": "zig",
".v": "vlang",
}
if len(customLangs) != len(expectedLangs) {
t.Errorf("Expected %d custom languages, got %d", len(expectedLangs), len(customLangs))
}
for ext, lang := range expectedLangs {
if customLangs[ext] != lang {
t.Errorf("Expected custom language %s -> %s, got %s", ext, lang, customLangs[ext])
}
}
disabledImages := GetDisabledImageExtensions()
expectedDisabledImages := []string{".gif", ".bmp"}
if len(disabledImages) != len(expectedDisabledImages) {
t.Errorf("Expected %d disabled image extensions, got %d", len(expectedDisabledImages), len(disabledImages))
}
disabledBinary := GetDisabledBinaryExtensions()
expectedDisabledBinary := []string{".exe", ".dll"}
if len(disabledBinary) != len(expectedDisabledBinary) {
t.Errorf("Expected %d disabled binary extensions, got %d", len(expectedDisabledBinary), len(disabledBinary))
}
disabledLangs := GetDisabledLanguageExtensions()
expectedDisabledLangs := []string{".rb", ".pl"}
if len(disabledLangs) != len(expectedDisabledLangs) {
t.Errorf("Expected %d disabled language extensions, got %d", len(expectedDisabledLangs), len(disabledLangs))
}
})
// Test validation
t.Run("ValidationSuccess", func(t *testing.T) {
viper.Reset()
SetDefaultConfig()
setDefaultConfig()
// Set valid configuration
setValidConfiguration()
viper.Set("fileTypes.customImageExtensions", []string{".webp", ".avif"})
viper.Set("fileTypes.customBinaryExtensions", []string{".custom"})
viper.Set("fileTypes.customLanguages", map[string]string{
".zig": "zig",
".v": "vlang",
})
err := ValidateConfig()
if err != nil {
t.Errorf("Expected validation to pass with valid config, got error: %v", err)
}
}
})
// TestFileTypeRegistryValidationFailure tests validation failures.
func TestFileTypeRegistryValidationFailure(t *testing.T) {
t.Run("ValidationFailure", func(t *testing.T) {
// Test invalid custom image extensions
testInvalidImageExtensions(t)
// Test invalid custom binary extensions
testInvalidBinaryExtensions(t)
// Test invalid custom languages
testInvalidCustomLanguages(t)
}
// verifyDefaultValues verifies that default values are correct.
func verifyDefaultValues(t *testing.T) {
t.Helper()
if !FileTypesEnabled() {
t.Error("Expected file types to be enabled by default")
}
verifyEmptySlice(t, CustomImageExtensions(), "custom image extensions")
verifyEmptySlice(t, CustomBinaryExtensions(), "custom binary extensions")
verifyEmptyMap(t, CustomLanguages(), "custom languages")
verifyEmptySlice(t, DisabledImageExtensions(), "disabled image extensions")
verifyEmptySlice(t, DisabledBinaryExtensions(), "disabled binary extensions")
verifyEmptySlice(t, DisabledLanguageExtensions(), "disabled language extensions")
}
// setTestConfiguration sets test configuration values.
func setTestConfiguration() {
viper.Set("fileTypes.enabled", false)
viper.Set(shared.ConfigKeyFileTypesCustomImageExtensions, []string{".webp", ".avif"})
viper.Set(shared.ConfigKeyFileTypesCustomBinaryExtensions, []string{shared.TestExtensionCustom, ".mybin"})
viper.Set(
shared.ConfigKeyFileTypesCustomLanguages, map[string]string{
".zig": "zig",
".v": "vlang",
},
)
viper.Set("fileTypes.disabledImageExtensions", []string{".gif", ".bmp"})
viper.Set("fileTypes.disabledBinaryExtensions", []string{".exe", ".dll"})
viper.Set("fileTypes.disabledLanguageExtensions", []string{".rb", ".pl"})
}
// verifyTestConfiguration verifies that test configuration is retrieved correctly.
func verifyTestConfiguration(t *testing.T) {
t.Helper()
if FileTypesEnabled() {
t.Error("Expected file types to be disabled")
}
verifyStringSlice(t, CustomImageExtensions(), []string{".webp", ".avif"}, "custom image extensions")
verifyStringSlice(t, CustomBinaryExtensions(), []string{".custom", ".mybin"}, "custom binary extensions")
expectedLangs := map[string]string{
".zig": "zig",
".v": "vlang",
}
verifyStringMap(t, CustomLanguages(), expectedLangs, "custom languages")
verifyStringSliceLength(t, DisabledImageExtensions(), []string{".gif", ".bmp"}, "disabled image extensions")
verifyStringSliceLength(t, DisabledBinaryExtensions(), []string{".exe", ".dll"}, "disabled binary extensions")
verifyStringSliceLength(t, DisabledLanguageExtensions(), []string{".rb", ".pl"}, "disabled language extensions")
}
// setValidConfiguration sets valid configuration for validation tests.
func setValidConfiguration() {
viper.Set(shared.ConfigKeyFileTypesCustomImageExtensions, []string{".webp", ".avif"})
viper.Set(shared.ConfigKeyFileTypesCustomBinaryExtensions, []string{shared.TestExtensionCustom})
viper.Set(
shared.ConfigKeyFileTypesCustomLanguages, map[string]string{
".zig": "zig",
".v": "vlang",
},
)
}
// testInvalidImageExtensions tests validation failure with invalid image extensions.
func testInvalidImageExtensions(t *testing.T) {
t.Helper()
viper.Reset()
SetDefaultConfig()
viper.Set(shared.ConfigKeyFileTypesCustomImageExtensions, []string{"", "webp"}) // Empty and missing dot
setDefaultConfig()
viper.Set("fileTypes.customImageExtensions", []string{"", "webp"}) // Empty and missing dot
err := ValidateConfig()
if err == nil {
t.Error("Expected validation to fail with invalid custom image extensions")
}
}
// testInvalidBinaryExtensions tests validation failure with invalid binary extensions.
func testInvalidBinaryExtensions(t *testing.T) {
t.Helper()
// Test invalid custom binary extensions
viper.Reset()
SetDefaultConfig()
viper.Set(shared.ConfigKeyFileTypesCustomBinaryExtensions, []string{"custom"}) // Missing dot
setDefaultConfig()
viper.Set("fileTypes.customBinaryExtensions", []string{"custom"}) // Missing dot
err := ValidateConfig()
err = ValidateConfig()
if err == nil {
t.Error("Expected validation to fail with invalid custom binary extensions")
}
}
// testInvalidCustomLanguages tests validation failure with invalid custom languages.
func testInvalidCustomLanguages(t *testing.T) {
t.Helper()
// Test invalid custom languages
viper.Reset()
SetDefaultConfig()
viper.Set(
shared.ConfigKeyFileTypesCustomLanguages, map[string]string{
setDefaultConfig()
viper.Set("fileTypes.customLanguages", map[string]string{
"zig": "zig", // Missing dot in extension
".v": "", // Empty language
},
)
})
err := ValidateConfig()
err = ValidateConfig()
if err == nil {
t.Error("Expected validation to fail with invalid custom languages")
}
}
// verifyEmptySlice verifies that a slice is empty.
func verifyEmptySlice(t *testing.T, slice []string, name string) {
t.Helper()
if len(slice) != 0 {
t.Errorf("Expected %s to be empty by default", name)
}
}
// verifyEmptyMap verifies that a map is empty.
func verifyEmptyMap(t *testing.T, m map[string]string, name string) {
t.Helper()
if len(m) != 0 {
t.Errorf("Expected %s to be empty by default", name)
}
}
// verifyStringSlice verifies that a string slice matches expected values.
func verifyStringSlice(t *testing.T, actual, expected []string, name string) {
t.Helper()
if len(actual) != len(expected) {
t.Errorf(shared.TestFmtExpectedCount, len(expected), name, len(actual))
return
}
for i, ext := range expected {
if actual[i] != ext {
t.Errorf("Expected %s %s, got %s", name, ext, actual[i])
}
}
}
// verifyStringMap verifies that a string map matches expected values.
func verifyStringMap(t *testing.T, actual, expected map[string]string, name string) {
t.Helper()
if len(actual) != len(expected) {
t.Errorf(shared.TestFmtExpectedCount, len(expected), name, len(actual))
return
}
for ext, lang := range expected {
if actual[ext] != lang {
t.Errorf("Expected %s %s -> %s, got %s", name, ext, lang, actual[ext])
}
}
}
// verifyStringSliceLength verifies that a string slice has the expected length.
func verifyStringSliceLength(t *testing.T, actual, expected []string, name string) {
t.Helper()
if len(actual) != len(expected) {
t.Errorf(shared.TestFmtExpectedCount, len(expected), name, len(actual))
}
})
}

61
config/constants.go Normal file
View File

@@ -0,0 +1,61 @@
package config
const (
// DefaultFileSizeLimit is the default maximum file size (5MB).
DefaultFileSizeLimit = 5242880
// MinFileSizeLimit is the minimum allowed file size limit (1KB).
MinFileSizeLimit = 1024
// MaxFileSizeLimit is the maximum allowed file size limit (100MB).
MaxFileSizeLimit = 104857600
// Resource Limit Constants
// DefaultMaxFiles is the default maximum number of files to process.
DefaultMaxFiles = 10000
// MinMaxFiles is the minimum allowed file count limit.
MinMaxFiles = 1
// MaxMaxFiles is the maximum allowed file count limit.
MaxMaxFiles = 1000000
// DefaultMaxTotalSize is the default maximum total size of files (1GB).
DefaultMaxTotalSize = 1073741824
// MinMaxTotalSize is the minimum allowed total size limit (1MB).
MinMaxTotalSize = 1048576
// MaxMaxTotalSize is the maximum allowed total size limit (100GB).
MaxMaxTotalSize = 107374182400
// DefaultFileProcessingTimeoutSec is the default timeout for individual file processing (30 seconds).
DefaultFileProcessingTimeoutSec = 30
// MinFileProcessingTimeoutSec is the minimum allowed file processing timeout (1 second).
MinFileProcessingTimeoutSec = 1
// MaxFileProcessingTimeoutSec is the maximum allowed file processing timeout (300 seconds).
MaxFileProcessingTimeoutSec = 300
// DefaultOverallTimeoutSec is the default timeout for overall processing (3600 seconds = 1 hour).
DefaultOverallTimeoutSec = 3600
// MinOverallTimeoutSec is the minimum allowed overall timeout (10 seconds).
MinOverallTimeoutSec = 10
// MaxOverallTimeoutSec is the maximum allowed overall timeout (86400 seconds = 24 hours).
MaxOverallTimeoutSec = 86400
// DefaultMaxConcurrentReads is the default maximum concurrent file reading operations.
DefaultMaxConcurrentReads = 10
// MinMaxConcurrentReads is the minimum allowed concurrent reads.
MinMaxConcurrentReads = 1
// MaxMaxConcurrentReads is the maximum allowed concurrent reads.
MaxMaxConcurrentReads = 100
// DefaultRateLimitFilesPerSec is the default rate limit for file processing (0 = disabled).
DefaultRateLimitFilesPerSec = 0
// MinRateLimitFilesPerSec is the minimum rate limit.
MinRateLimitFilesPerSec = 0
// MaxRateLimitFilesPerSec is the maximum rate limit.
MaxRateLimitFilesPerSec = 10000
// DefaultHardMemoryLimitMB is the default hard memory limit (512MB).
DefaultHardMemoryLimitMB = 512
// MinHardMemoryLimitMB is the minimum hard memory limit (64MB).
MinHardMemoryLimitMB = 64
// MaxHardMemoryLimitMB is the maximum hard memory limit (8192MB = 8GB).
MaxHardMemoryLimitMB = 8192
)

View File

@@ -1,331 +1,157 @@
// Package config handles application configuration management.
package config
import (
"strings"
"github.com/spf13/viper"
"github.com/ivuorinen/gibidify/shared"
)
// FileSizeLimit returns the file size limit from configuration.
// Default: ConfigFileSizeLimitDefault (5MB).
func FileSizeLimit() int64 {
return viper.GetInt64(shared.ConfigKeyFileSizeLimit)
// GetFileSizeLimit returns the file size limit from configuration.
func GetFileSizeLimit() int64 {
return viper.GetInt64("fileSizeLimit")
}
// IgnoredDirectories returns the list of directories to ignore.
// Default: ConfigIgnoredDirectoriesDefault.
func IgnoredDirectories() []string {
return viper.GetStringSlice(shared.ConfigKeyIgnoreDirectories)
// GetIgnoredDirectories returns the list of directories to ignore.
func GetIgnoredDirectories() []string {
return viper.GetStringSlice("ignoreDirectories")
}
// MaxConcurrency returns the maximum concurrency level.
// Returns 0 if not set (caller should determine appropriate default).
func MaxConcurrency() int {
return viper.GetInt(shared.ConfigKeyMaxConcurrency)
// GetMaxConcurrency returns the maximum concurrency level.
func GetMaxConcurrency() int {
return viper.GetInt("maxConcurrency")
}
// SupportedFormats returns the list of supported output formats.
// Returns empty slice if not set.
func SupportedFormats() []string {
return viper.GetStringSlice(shared.ConfigKeySupportedFormats)
// GetSupportedFormats returns the list of supported output formats.
func GetSupportedFormats() []string {
return viper.GetStringSlice("supportedFormats")
}
// FilePatterns returns the list of file patterns.
// Returns empty slice if not set.
func FilePatterns() []string {
return viper.GetStringSlice(shared.ConfigKeyFilePatterns)
// GetFilePatterns returns the list of file patterns.
func GetFilePatterns() []string {
return viper.GetStringSlice("filePatterns")
}
// IsValidFormat checks if the given format is valid.
func IsValidFormat(format string) bool {
format = strings.ToLower(strings.TrimSpace(format))
supportedFormats := map[string]bool{
shared.FormatJSON: true,
shared.FormatYAML: true,
shared.FormatMarkdown: true,
"json": true,
"yaml": true,
"markdown": true,
}
return supportedFormats[format]
}
// FileTypesEnabled returns whether file types are enabled.
// Default: ConfigFileTypesEnabledDefault (true).
func FileTypesEnabled() bool {
return viper.GetBool(shared.ConfigKeyFileTypesEnabled)
// GetFileTypesEnabled returns whether file types are enabled.
func GetFileTypesEnabled() bool {
return viper.GetBool("fileTypes.enabled")
}
// CustomImageExtensions returns custom image extensions.
// Default: ConfigCustomImageExtensionsDefault (empty).
func CustomImageExtensions() []string {
return viper.GetStringSlice(shared.ConfigKeyFileTypesCustomImageExtensions)
// GetCustomImageExtensions returns custom image extensions.
func GetCustomImageExtensions() []string {
return viper.GetStringSlice("fileTypes.customImageExtensions")
}
// CustomBinaryExtensions returns custom binary extensions.
// Default: ConfigCustomBinaryExtensionsDefault (empty).
func CustomBinaryExtensions() []string {
return viper.GetStringSlice(shared.ConfigKeyFileTypesCustomBinaryExtensions)
// GetCustomBinaryExtensions returns custom binary extensions.
func GetCustomBinaryExtensions() []string {
return viper.GetStringSlice("fileTypes.customBinaryExtensions")
}
// CustomLanguages returns custom language mappings.
// Default: ConfigCustomLanguagesDefault (empty).
func CustomLanguages() map[string]string {
return viper.GetStringMapString(shared.ConfigKeyFileTypesCustomLanguages)
// GetCustomLanguages returns custom language mappings.
func GetCustomLanguages() map[string]string {
return viper.GetStringMapString("fileTypes.customLanguages")
}
// DisabledImageExtensions returns disabled image extensions.
// Default: ConfigDisabledImageExtensionsDefault (empty).
func DisabledImageExtensions() []string {
return viper.GetStringSlice(shared.ConfigKeyFileTypesDisabledImageExtensions)
// GetDisabledImageExtensions returns disabled image extensions.
func GetDisabledImageExtensions() []string {
return viper.GetStringSlice("fileTypes.disabledImageExtensions")
}
// DisabledBinaryExtensions returns disabled binary extensions.
// Default: ConfigDisabledBinaryExtensionsDefault (empty).
func DisabledBinaryExtensions() []string {
return viper.GetStringSlice(shared.ConfigKeyFileTypesDisabledBinaryExtensions)
// GetDisabledBinaryExtensions returns disabled binary extensions.
func GetDisabledBinaryExtensions() []string {
return viper.GetStringSlice("fileTypes.disabledBinaryExtensions")
}
// DisabledLanguageExtensions returns disabled language extensions.
// Default: ConfigDisabledLanguageExtensionsDefault (empty).
func DisabledLanguageExtensions() []string {
return viper.GetStringSlice(shared.ConfigKeyFileTypesDisabledLanguageExts)
// GetDisabledLanguageExtensions returns disabled language extensions.
func GetDisabledLanguageExtensions() []string {
return viper.GetStringSlice("fileTypes.disabledLanguageExtensions")
}
// Backpressure getters
// BackpressureEnabled returns whether backpressure is enabled.
// Default: ConfigBackpressureEnabledDefault (true).
func BackpressureEnabled() bool {
return viper.GetBool(shared.ConfigKeyBackpressureEnabled)
// GetBackpressureEnabled returns whether backpressure is enabled.
func GetBackpressureEnabled() bool {
return viper.GetBool("backpressure.enabled")
}
// MaxPendingFiles returns the maximum pending files.
// Default: ConfigMaxPendingFilesDefault (1000).
func MaxPendingFiles() int {
return viper.GetInt(shared.ConfigKeyBackpressureMaxPendingFiles)
// GetMaxPendingFiles returns the maximum pending files.
func GetMaxPendingFiles() int {
return viper.GetInt("backpressure.maxPendingFiles")
}
// MaxPendingWrites returns the maximum pending writes.
// Default: ConfigMaxPendingWritesDefault (100).
func MaxPendingWrites() int {
return viper.GetInt(shared.ConfigKeyBackpressureMaxPendingWrites)
// GetMaxPendingWrites returns the maximum pending writes.
func GetMaxPendingWrites() int {
return viper.GetInt("backpressure.maxPendingWrites")
}
// MaxMemoryUsage returns the maximum memory usage.
// Default: ConfigMaxMemoryUsageDefault (100MB).
func MaxMemoryUsage() int64 {
return viper.GetInt64(shared.ConfigKeyBackpressureMaxMemoryUsage)
// GetMaxMemoryUsage returns the maximum memory usage.
func GetMaxMemoryUsage() int64 {
return viper.GetInt64("backpressure.maxMemoryUsage")
}
// MemoryCheckInterval returns the memory check interval.
// Default: ConfigMemoryCheckIntervalDefault (1000 files).
func MemoryCheckInterval() int {
return viper.GetInt(shared.ConfigKeyBackpressureMemoryCheckInt)
// GetMemoryCheckInterval returns the memory check interval.
func GetMemoryCheckInterval() int {
return viper.GetInt("backpressure.memoryCheckInterval")
}
// Resource limits getters
// ResourceLimitsEnabled returns whether resource limits are enabled.
// Default: ConfigResourceLimitsEnabledDefault (true).
func ResourceLimitsEnabled() bool {
return viper.GetBool(shared.ConfigKeyResourceLimitsEnabled)
// GetResourceLimitsEnabled returns whether resource limits are enabled.
func GetResourceLimitsEnabled() bool {
return viper.GetBool("resourceLimits.enabled")
}
// MaxFiles returns the maximum number of files.
// Default: ConfigMaxFilesDefault (10000).
func MaxFiles() int {
return viper.GetInt(shared.ConfigKeyResourceLimitsMaxFiles)
// GetMaxFiles returns the maximum number of files.
func GetMaxFiles() int {
return viper.GetInt("resourceLimits.maxFiles")
}
// MaxTotalSize returns the maximum total size.
// Default: ConfigMaxTotalSizeDefault (1GB).
func MaxTotalSize() int64 {
return viper.GetInt64(shared.ConfigKeyResourceLimitsMaxTotalSize)
// GetMaxTotalSize returns the maximum total size.
func GetMaxTotalSize() int64 {
return viper.GetInt64("resourceLimits.maxTotalSize")
}
// FileProcessingTimeoutSec returns the file processing timeout in seconds.
// Default: ConfigFileProcessingTimeoutSecDefault (30 seconds).
func FileProcessingTimeoutSec() int {
return viper.GetInt(shared.ConfigKeyResourceLimitsFileProcessingTO)
// GetFileProcessingTimeoutSec returns the file processing timeout in seconds.
func GetFileProcessingTimeoutSec() int {
return viper.GetInt("resourceLimits.fileProcessingTimeoutSec")
}
// OverallTimeoutSec returns the overall timeout in seconds.
// Default: ConfigOverallTimeoutSecDefault (3600 seconds).
func OverallTimeoutSec() int {
return viper.GetInt(shared.ConfigKeyResourceLimitsOverallTO)
// GetOverallTimeoutSec returns the overall timeout in seconds.
func GetOverallTimeoutSec() int {
return viper.GetInt("resourceLimits.overallTimeoutSec")
}
// MaxConcurrentReads returns the maximum concurrent reads.
// Default: ConfigMaxConcurrentReadsDefault (10).
func MaxConcurrentReads() int {
return viper.GetInt(shared.ConfigKeyResourceLimitsMaxConcurrentReads)
// GetMaxConcurrentReads returns the maximum concurrent reads.
func GetMaxConcurrentReads() int {
return viper.GetInt("resourceLimits.maxConcurrentReads")
}
// RateLimitFilesPerSec returns the rate limit files per second.
// Default: ConfigRateLimitFilesPerSecDefault (0 = disabled).
func RateLimitFilesPerSec() int {
return viper.GetInt(shared.ConfigKeyResourceLimitsRateLimitFilesPerSec)
// GetRateLimitFilesPerSec returns the rate limit files per second.
func GetRateLimitFilesPerSec() int {
return viper.GetInt("resourceLimits.rateLimitFilesPerSec")
}
// HardMemoryLimitMB returns the hard memory limit in MB.
// Default: ConfigHardMemoryLimitMBDefault (512MB).
func HardMemoryLimitMB() int {
return viper.GetInt(shared.ConfigKeyResourceLimitsHardMemoryLimitMB)
// GetHardMemoryLimitMB returns the hard memory limit in MB.
func GetHardMemoryLimitMB() int {
return viper.GetInt("resourceLimits.hardMemoryLimitMB")
}
// EnableGracefulDegradation returns whether graceful degradation is enabled.
// Default: ConfigEnableGracefulDegradationDefault (true).
func EnableGracefulDegradation() bool {
return viper.GetBool(shared.ConfigKeyResourceLimitsEnableGracefulDeg)
// GetEnableGracefulDegradation returns whether graceful degradation is enabled.
func GetEnableGracefulDegradation() bool {
return viper.GetBool("resourceLimits.enableGracefulDegradation")
}
// EnableResourceMonitoring returns whether resource monitoring is enabled.
// Default: ConfigEnableResourceMonitoringDefault (true).
func EnableResourceMonitoring() bool {
return viper.GetBool(shared.ConfigKeyResourceLimitsEnableMonitoring)
}
// Template system getters
// OutputTemplate returns the selected output template name.
// Default: ConfigOutputTemplateDefault (empty string).
func OutputTemplate() string {
return viper.GetString(shared.ConfigKeyOutputTemplate)
}
// metadataBool is a helper for metadata boolean configuration values.
// All metadata flags default to false.
func metadataBool(key string) bool {
return viper.GetBool("output.metadata." + key)
}
// TemplateMetadataIncludeStats returns whether to include stats in metadata.
func TemplateMetadataIncludeStats() bool {
return metadataBool("includeStats")
}
// TemplateMetadataIncludeTimestamp returns whether to include timestamp in metadata.
func TemplateMetadataIncludeTimestamp() bool {
return metadataBool("includeTimestamp")
}
// TemplateMetadataIncludeFileCount returns whether to include file count in metadata.
func TemplateMetadataIncludeFileCount() bool {
return metadataBool("includeFileCount")
}
// TemplateMetadataIncludeSourcePath returns whether to include source path in metadata.
func TemplateMetadataIncludeSourcePath() bool {
return metadataBool("includeSourcePath")
}
// TemplateMetadataIncludeFileTypes returns whether to include file types in metadata.
func TemplateMetadataIncludeFileTypes() bool {
return metadataBool("includeFileTypes")
}
// TemplateMetadataIncludeProcessingTime returns whether to include processing time in metadata.
func TemplateMetadataIncludeProcessingTime() bool {
return metadataBool("includeProcessingTime")
}
// TemplateMetadataIncludeTotalSize returns whether to include total size in metadata.
func TemplateMetadataIncludeTotalSize() bool {
return metadataBool("includeTotalSize")
}
// TemplateMetadataIncludeMetrics returns whether to include metrics in metadata.
func TemplateMetadataIncludeMetrics() bool {
return metadataBool("includeMetrics")
}
// markdownBool is a helper for markdown boolean configuration values.
// All markdown flags default to false.
func markdownBool(key string) bool {
return viper.GetBool("output.markdown." + key)
}
// TemplateMarkdownUseCodeBlocks returns whether to use code blocks in markdown.
func TemplateMarkdownUseCodeBlocks() bool {
return markdownBool("useCodeBlocks")
}
// TemplateMarkdownIncludeLanguage returns whether to include language in code blocks.
func TemplateMarkdownIncludeLanguage() bool {
return markdownBool("includeLanguage")
}
// TemplateMarkdownHeaderLevel returns the header level for file sections.
// Default: ConfigMarkdownHeaderLevelDefault (0).
func TemplateMarkdownHeaderLevel() int {
return viper.GetInt(shared.ConfigKeyOutputMarkdownHeaderLevel)
}
// TemplateMarkdownTableOfContents returns whether to include table of contents.
func TemplateMarkdownTableOfContents() bool {
return markdownBool("tableOfContents")
}
// TemplateMarkdownUseCollapsible returns whether to use collapsible sections.
func TemplateMarkdownUseCollapsible() bool {
return markdownBool("useCollapsible")
}
// TemplateMarkdownSyntaxHighlighting returns whether to enable syntax highlighting.
func TemplateMarkdownSyntaxHighlighting() bool {
return markdownBool("syntaxHighlighting")
}
// TemplateMarkdownLineNumbers returns whether to include line numbers.
func TemplateMarkdownLineNumbers() bool {
return markdownBool("lineNumbers")
}
// TemplateMarkdownFoldLongFiles returns whether to fold long files.
func TemplateMarkdownFoldLongFiles() bool {
return markdownBool("foldLongFiles")
}
// TemplateMarkdownMaxLineLength returns the maximum line length.
// Default: ConfigMarkdownMaxLineLengthDefault (0 = unlimited).
func TemplateMarkdownMaxLineLength() int {
return viper.GetInt(shared.ConfigKeyOutputMarkdownMaxLineLen)
}
// TemplateCustomCSS returns custom CSS for markdown output.
// Default: ConfigMarkdownCustomCSSDefault (empty string).
func TemplateCustomCSS() string {
return viper.GetString(shared.ConfigKeyOutputMarkdownCustomCSS)
}
// TemplateCustomHeader returns custom header template.
// Default: ConfigCustomHeaderDefault (empty string).
func TemplateCustomHeader() string {
return viper.GetString(shared.ConfigKeyOutputCustomHeader)
}
// TemplateCustomFooter returns custom footer template.
// Default: ConfigCustomFooterDefault (empty string).
func TemplateCustomFooter() string {
return viper.GetString(shared.ConfigKeyOutputCustomFooter)
}
// TemplateCustomFileHeader returns custom file header template.
// Default: ConfigCustomFileHeaderDefault (empty string).
func TemplateCustomFileHeader() string {
return viper.GetString(shared.ConfigKeyOutputCustomFileHeader)
}
// TemplateCustomFileFooter returns custom file footer template.
// Default: ConfigCustomFileFooterDefault (empty string).
func TemplateCustomFileFooter() string {
return viper.GetString(shared.ConfigKeyOutputCustomFileFooter)
}
// TemplateVariables returns custom template variables.
// Default: ConfigTemplateVariablesDefault (empty map).
func TemplateVariables() map[string]string {
return viper.GetStringMapString(shared.ConfigKeyOutputVariables)
// GetEnableResourceMonitoring returns whether resource monitoring is enabled.
func GetEnableResourceMonitoring() bool {
return viper.GetBool("resourceLimits.enableResourceMonitoring")
}

View File

@@ -1,492 +0,0 @@
package config_test
import (
"reflect"
"testing"
"github.com/ivuorinen/gibidify/config"
"github.com/ivuorinen/gibidify/shared"
"github.com/ivuorinen/gibidify/testutil"
)
// TestConfigGetters tests all configuration getter functions with comprehensive test coverage.
func TestConfigGetters(t *testing.T) {
tests := []struct {
name string
configKey string
configValue any
getterFunc func() any
expectedResult any
}{
// Basic configuration getters
{
name: "GetFileSizeLimit",
configKey: "fileSizeLimit",
configValue: int64(1048576),
getterFunc: func() any { return config.FileSizeLimit() },
expectedResult: int64(1048576),
},
{
name: "GetIgnoredDirectories",
configKey: "ignoreDirectories",
configValue: []string{"node_modules", ".git", "dist"},
getterFunc: func() any { return config.IgnoredDirectories() },
expectedResult: []string{"node_modules", ".git", "dist"},
},
{
name: "GetMaxConcurrency",
configKey: "maxConcurrency",
configValue: 8,
getterFunc: func() any { return config.MaxConcurrency() },
expectedResult: 8,
},
{
name: "GetSupportedFormats",
configKey: "supportedFormats",
configValue: []string{"json", "yaml", "markdown"},
getterFunc: func() any { return config.SupportedFormats() },
expectedResult: []string{"json", "yaml", "markdown"},
},
{
name: "GetFilePatterns",
configKey: "filePatterns",
configValue: []string{"*.go", "*.js", "*.py"},
getterFunc: func() any { return config.FilePatterns() },
expectedResult: []string{"*.go", "*.js", "*.py"},
},
// File type configuration getters
{
name: "GetFileTypesEnabled",
configKey: "fileTypes.enabled",
configValue: true,
getterFunc: func() any { return config.FileTypesEnabled() },
expectedResult: true,
},
{
name: "GetCustomImageExtensions",
configKey: "fileTypes.customImageExtensions",
configValue: []string{".webp", ".avif"},
getterFunc: func() any { return config.CustomImageExtensions() },
expectedResult: []string{".webp", ".avif"},
},
{
name: "GetCustomBinaryExtensions",
configKey: "fileTypes.customBinaryExtensions",
configValue: []string{".custom", ".bin"},
getterFunc: func() any { return config.CustomBinaryExtensions() },
expectedResult: []string{".custom", ".bin"},
},
{
name: "GetDisabledImageExtensions",
configKey: "fileTypes.disabledImageExtensions",
configValue: []string{".gif", ".bmp"},
getterFunc: func() any { return config.DisabledImageExtensions() },
expectedResult: []string{".gif", ".bmp"},
},
{
name: "GetDisabledBinaryExtensions",
configKey: "fileTypes.disabledBinaryExtensions",
configValue: []string{".exe", ".dll"},
getterFunc: func() any { return config.DisabledBinaryExtensions() },
expectedResult: []string{".exe", ".dll"},
},
{
name: "GetDisabledLanguageExtensions",
configKey: "fileTypes.disabledLanguageExtensions",
configValue: []string{".sh", ".bat"},
getterFunc: func() any { return config.DisabledLanguageExtensions() },
expectedResult: []string{".sh", ".bat"},
},
// Backpressure configuration getters
{
name: "GetBackpressureEnabled",
configKey: "backpressure.enabled",
configValue: true,
getterFunc: func() any { return config.BackpressureEnabled() },
expectedResult: true,
},
{
name: "GetMaxPendingFiles",
configKey: "backpressure.maxPendingFiles",
configValue: 1000,
getterFunc: func() any { return config.MaxPendingFiles() },
expectedResult: 1000,
},
{
name: "GetMaxPendingWrites",
configKey: "backpressure.maxPendingWrites",
configValue: 100,
getterFunc: func() any { return config.MaxPendingWrites() },
expectedResult: 100,
},
{
name: "GetMaxMemoryUsage",
configKey: "backpressure.maxMemoryUsage",
configValue: int64(104857600),
getterFunc: func() any { return config.MaxMemoryUsage() },
expectedResult: int64(104857600),
},
{
name: "GetMemoryCheckInterval",
configKey: "backpressure.memoryCheckInterval",
configValue: 500,
getterFunc: func() any { return config.MemoryCheckInterval() },
expectedResult: 500,
},
// Resource limits configuration getters
{
name: "GetResourceLimitsEnabled",
configKey: "resourceLimits.enabled",
configValue: true,
getterFunc: func() any { return config.ResourceLimitsEnabled() },
expectedResult: true,
},
{
name: "GetMaxFiles",
configKey: "resourceLimits.maxFiles",
configValue: 5000,
getterFunc: func() any { return config.MaxFiles() },
expectedResult: 5000,
},
{
name: "GetMaxTotalSize",
configKey: "resourceLimits.maxTotalSize",
configValue: int64(1073741824),
getterFunc: func() any { return config.MaxTotalSize() },
expectedResult: int64(1073741824),
},
{
name: "GetFileProcessingTimeoutSec",
configKey: "resourceLimits.fileProcessingTimeoutSec",
configValue: 30,
getterFunc: func() any { return config.FileProcessingTimeoutSec() },
expectedResult: 30,
},
{
name: "GetOverallTimeoutSec",
configKey: "resourceLimits.overallTimeoutSec",
configValue: 1800,
getterFunc: func() any { return config.OverallTimeoutSec() },
expectedResult: 1800,
},
{
name: "GetMaxConcurrentReads",
configKey: "resourceLimits.maxConcurrentReads",
configValue: 10,
getterFunc: func() any { return config.MaxConcurrentReads() },
expectedResult: 10,
},
{
name: "GetRateLimitFilesPerSec",
configKey: "resourceLimits.rateLimitFilesPerSec",
configValue: 100,
getterFunc: func() any { return config.RateLimitFilesPerSec() },
expectedResult: 100,
},
{
name: "GetHardMemoryLimitMB",
configKey: "resourceLimits.hardMemoryLimitMB",
configValue: 512,
getterFunc: func() any { return config.HardMemoryLimitMB() },
expectedResult: 512,
},
{
name: "GetEnableGracefulDegradation",
configKey: "resourceLimits.enableGracefulDegradation",
configValue: true,
getterFunc: func() any { return config.EnableGracefulDegradation() },
expectedResult: true,
},
{
name: "GetEnableResourceMonitoring",
configKey: "resourceLimits.enableResourceMonitoring",
configValue: true,
getterFunc: func() any { return config.EnableResourceMonitoring() },
expectedResult: true,
},
// Template system configuration getters
{
name: "GetOutputTemplate",
configKey: "output.template",
configValue: "detailed",
getterFunc: func() any { return config.OutputTemplate() },
expectedResult: "detailed",
},
{
name: "GetTemplateMetadataIncludeStats",
configKey: "output.metadata.includeStats",
configValue: true,
getterFunc: func() any { return config.TemplateMetadataIncludeStats() },
expectedResult: true,
},
{
name: "GetTemplateMetadataIncludeTimestamp",
configKey: "output.metadata.includeTimestamp",
configValue: false,
getterFunc: func() any { return config.TemplateMetadataIncludeTimestamp() },
expectedResult: false,
},
{
name: "GetTemplateMetadataIncludeFileCount",
configKey: "output.metadata.includeFileCount",
configValue: true,
getterFunc: func() any { return config.TemplateMetadataIncludeFileCount() },
expectedResult: true,
},
{
name: "GetTemplateMetadataIncludeSourcePath",
configKey: "output.metadata.includeSourcePath",
configValue: false,
getterFunc: func() any { return config.TemplateMetadataIncludeSourcePath() },
expectedResult: false,
},
{
name: "GetTemplateMetadataIncludeFileTypes",
configKey: "output.metadata.includeFileTypes",
configValue: true,
getterFunc: func() any { return config.TemplateMetadataIncludeFileTypes() },
expectedResult: true,
},
{
name: "GetTemplateMetadataIncludeProcessingTime",
configKey: "output.metadata.includeProcessingTime",
configValue: false,
getterFunc: func() any { return config.TemplateMetadataIncludeProcessingTime() },
expectedResult: false,
},
{
name: "GetTemplateMetadataIncludeTotalSize",
configKey: "output.metadata.includeTotalSize",
configValue: true,
getterFunc: func() any { return config.TemplateMetadataIncludeTotalSize() },
expectedResult: true,
},
{
name: "GetTemplateMetadataIncludeMetrics",
configKey: "output.metadata.includeMetrics",
configValue: false,
getterFunc: func() any { return config.TemplateMetadataIncludeMetrics() },
expectedResult: false,
},
// Markdown template configuration getters
{
name: "GetTemplateMarkdownUseCodeBlocks",
configKey: "output.markdown.useCodeBlocks",
configValue: true,
getterFunc: func() any { return config.TemplateMarkdownUseCodeBlocks() },
expectedResult: true,
},
{
name: "GetTemplateMarkdownIncludeLanguage",
configKey: "output.markdown.includeLanguage",
configValue: false,
getterFunc: func() any { return config.TemplateMarkdownIncludeLanguage() },
expectedResult: false,
},
{
name: "GetTemplateMarkdownHeaderLevel",
configKey: "output.markdown.headerLevel",
configValue: 3,
getterFunc: func() any { return config.TemplateMarkdownHeaderLevel() },
expectedResult: 3,
},
{
name: "GetTemplateMarkdownTableOfContents",
configKey: "output.markdown.tableOfContents",
configValue: true,
getterFunc: func() any { return config.TemplateMarkdownTableOfContents() },
expectedResult: true,
},
{
name: "GetTemplateMarkdownUseCollapsible",
configKey: "output.markdown.useCollapsible",
configValue: false,
getterFunc: func() any { return config.TemplateMarkdownUseCollapsible() },
expectedResult: false,
},
{
name: "GetTemplateMarkdownSyntaxHighlighting",
configKey: "output.markdown.syntaxHighlighting",
configValue: true,
getterFunc: func() any { return config.TemplateMarkdownSyntaxHighlighting() },
expectedResult: true,
},
{
name: "GetTemplateMarkdownLineNumbers",
configKey: "output.markdown.lineNumbers",
configValue: false,
getterFunc: func() any { return config.TemplateMarkdownLineNumbers() },
expectedResult: false,
},
{
name: "GetTemplateMarkdownFoldLongFiles",
configKey: "output.markdown.foldLongFiles",
configValue: true,
getterFunc: func() any { return config.TemplateMarkdownFoldLongFiles() },
expectedResult: true,
},
{
name: "GetTemplateMarkdownMaxLineLength",
configKey: "output.markdown.maxLineLength",
configValue: 120,
getterFunc: func() any { return config.TemplateMarkdownMaxLineLength() },
expectedResult: 120,
},
{
name: "GetTemplateCustomCSS",
configKey: "output.markdown.customCSS",
configValue: "body { color: blue; }",
getterFunc: func() any { return config.TemplateCustomCSS() },
expectedResult: "body { color: blue; }",
},
// Custom template configuration getters
{
name: "GetTemplateCustomHeader",
configKey: "output.custom.header",
configValue: "# Custom Header\n",
getterFunc: func() any { return config.TemplateCustomHeader() },
expectedResult: "# Custom Header\n",
},
{
name: "GetTemplateCustomFooter",
configKey: "output.custom.footer",
configValue: "---\nFooter content",
getterFunc: func() any { return config.TemplateCustomFooter() },
expectedResult: "---\nFooter content",
},
{
name: "GetTemplateCustomFileHeader",
configKey: "output.custom.fileHeader",
configValue: "## File: {{ .Path }}",
getterFunc: func() any { return config.TemplateCustomFileHeader() },
expectedResult: "## File: {{ .Path }}",
},
{
name: "GetTemplateCustomFileFooter",
configKey: "output.custom.fileFooter",
configValue: "---",
getterFunc: func() any { return config.TemplateCustomFileFooter() },
expectedResult: "---",
},
// Custom languages map getter
{
name: "GetCustomLanguages",
configKey: "fileTypes.customLanguages",
configValue: map[string]string{".vue": "vue", ".svelte": "svelte"},
getterFunc: func() any { return config.CustomLanguages() },
expectedResult: map[string]string{".vue": "vue", ".svelte": "svelte"},
},
// Template variables map getter
{
name: "GetTemplateVariables",
configKey: "output.variables",
configValue: map[string]string{"project": "gibidify", "version": "1.0"},
getterFunc: func() any { return config.TemplateVariables() },
expectedResult: map[string]string{"project": "gibidify", "version": "1.0"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Reset viper and set the specific configuration
testutil.SetViperKeys(t, map[string]any{
tt.configKey: tt.configValue,
})
// Call the getter function and compare results
result := tt.getterFunc()
if !reflect.DeepEqual(result, tt.expectedResult) {
t.Errorf("Test %s: expected %v (type %T), got %v (type %T)",
tt.name, tt.expectedResult, tt.expectedResult, result, result)
}
})
}
}
// TestConfigGettersWithDefaults tests that getters return appropriate default values
// when configuration keys are not set.
func TestConfigGettersWithDefaults(t *testing.T) {
// Reset viper to ensure clean state
testutil.ResetViperConfig(t, "")
// Test numeric getters with concrete default assertions
t.Run("numeric_getters", func(t *testing.T) {
assertInt64Getter(t, "FileSizeLimit", config.FileSizeLimit, shared.ConfigFileSizeLimitDefault)
assertIntGetter(t, "MaxConcurrency", config.MaxConcurrency, shared.ConfigMaxConcurrencyDefault)
assertIntGetter(t, "TemplateMarkdownHeaderLevel", config.TemplateMarkdownHeaderLevel,
shared.ConfigMarkdownHeaderLevelDefault)
assertIntGetter(t, "MaxFiles", config.MaxFiles, shared.ConfigMaxFilesDefault)
assertInt64Getter(t, "MaxTotalSize", config.MaxTotalSize, shared.ConfigMaxTotalSizeDefault)
assertIntGetter(t, "FileProcessingTimeoutSec", config.FileProcessingTimeoutSec,
shared.ConfigFileProcessingTimeoutSecDefault)
assertIntGetter(t, "OverallTimeoutSec", config.OverallTimeoutSec, shared.ConfigOverallTimeoutSecDefault)
assertIntGetter(t, "MaxConcurrentReads", config.MaxConcurrentReads, shared.ConfigMaxConcurrentReadsDefault)
assertIntGetter(t, "HardMemoryLimitMB", config.HardMemoryLimitMB, shared.ConfigHardMemoryLimitMBDefault)
})
// Test boolean getters with concrete default assertions
t.Run("boolean_getters", func(t *testing.T) {
assertBoolGetter(t, "FileTypesEnabled", config.FileTypesEnabled, shared.ConfigFileTypesEnabledDefault)
assertBoolGetter(t, "BackpressureEnabled", config.BackpressureEnabled, shared.ConfigBackpressureEnabledDefault)
assertBoolGetter(t, "ResourceLimitsEnabled", config.ResourceLimitsEnabled,
shared.ConfigResourceLimitsEnabledDefault)
assertBoolGetter(t, "EnableGracefulDegradation", config.EnableGracefulDegradation,
shared.ConfigEnableGracefulDegradationDefault)
assertBoolGetter(t, "TemplateMarkdownUseCodeBlocks", config.TemplateMarkdownUseCodeBlocks,
shared.ConfigMarkdownUseCodeBlocksDefault)
assertBoolGetter(t, "TemplateMarkdownTableOfContents", config.TemplateMarkdownTableOfContents,
shared.ConfigMarkdownTableOfContentsDefault)
})
// Test string getters with concrete default assertions
t.Run("string_getters", func(t *testing.T) {
assertStringGetter(t, "OutputTemplate", config.OutputTemplate, shared.ConfigOutputTemplateDefault)
assertStringGetter(t, "TemplateCustomCSS", config.TemplateCustomCSS, shared.ConfigMarkdownCustomCSSDefault)
assertStringGetter(t, "TemplateCustomHeader", config.TemplateCustomHeader, shared.ConfigCustomHeaderDefault)
assertStringGetter(t, "TemplateCustomFooter", config.TemplateCustomFooter, shared.ConfigCustomFooterDefault)
})
}
// assertInt64Getter tests an int64 getter returns the expected default value.
func assertInt64Getter(t *testing.T, name string, getter func() int64, expected int64) {
t.Helper()
result := getter()
if result != expected {
t.Errorf("%s: expected %d, got %d", name, expected, result)
}
}
// assertIntGetter tests an int getter returns the expected default value.
func assertIntGetter(t *testing.T, name string, getter func() int, expected int) {
t.Helper()
result := getter()
if result != expected {
t.Errorf("%s: expected %d, got %d", name, expected, result)
}
}
// assertBoolGetter tests a bool getter returns the expected default value.
func assertBoolGetter(t *testing.T, name string, getter func() bool, expected bool) {
t.Helper()
result := getter()
if result != expected {
t.Errorf("%s: expected %v, got %v", name, expected, result)
}
}
// assertStringGetter tests a string getter returns the expected default value.
func assertStringGetter(t *testing.T, name string, getter func() string, expected string) {
t.Helper()
result := getter()
if result != expected {
t.Errorf("%s: expected %q, got %q", name, expected, result)
}
}

View File

@@ -1,13 +1,13 @@
// Package config handles application configuration management.
package config
import (
"os"
"path/filepath"
"github.com/sirupsen/logrus"
"github.com/spf13/viper"
"github.com/ivuorinen/gibidify/shared"
"github.com/ivuorinen/gibidify/utils"
)
// LoadConfig reads configuration from a YAML file.
@@ -17,103 +17,74 @@ import (
// 3. The current directory as fallback.
func LoadConfig() {
viper.SetConfigName("config")
viper.SetConfigType(shared.FormatYAML)
logger := shared.GetLogger()
viper.SetConfigType("yaml")
if xdgConfig := os.Getenv("XDG_CONFIG_HOME"); xdgConfig != "" {
// Validate XDG_CONFIG_HOME for path traversal attempts
if err := shared.ValidateConfigPath(xdgConfig); err != nil {
logger.Warnf("Invalid XDG_CONFIG_HOME path, using default config: %v", err)
if err := utils.ValidateConfigPath(xdgConfig); err != nil {
logrus.Warnf("Invalid XDG_CONFIG_HOME path, using default config: %v", err)
} else {
configPath := filepath.Join(xdgConfig, shared.AppName)
configPath := filepath.Join(xdgConfig, "gibidify")
viper.AddConfigPath(configPath)
}
} else if home, err := os.UserHomeDir(); err == nil {
viper.AddConfigPath(filepath.Join(home, ".config", shared.AppName))
viper.AddConfigPath(filepath.Join(home, ".config", "gibidify"))
}
// Only add current directory if no config file named gibidify.yaml exists
// to avoid conflicts with the project's output file
if _, err := os.Stat(shared.AppName + ".yaml"); os.IsNotExist(err) {
if _, err := os.Stat("gibidify.yaml"); os.IsNotExist(err) {
viper.AddConfigPath(".")
}
if err := viper.ReadInConfig(); err != nil {
logger.Infof("Config file not found, using default values: %v", err)
SetDefaultConfig()
logrus.Infof("Config file not found, using default values: %v", err)
setDefaultConfig()
} else {
logger.Infof("Using config file: %s", viper.ConfigFileUsed())
logrus.Infof("Using config file: %s", viper.ConfigFileUsed())
// Validate configuration after loading
if err := ValidateConfig(); err != nil {
logger.Warnf("Configuration validation failed: %v", err)
logger.Info("Falling back to default configuration")
logrus.Warnf("Configuration validation failed: %v", err)
logrus.Info("Falling back to default configuration")
// Reset viper and set defaults when validation fails
viper.Reset()
SetDefaultConfig()
setDefaultConfig()
}
}
}
// SetDefaultConfig sets default configuration values.
func SetDefaultConfig() {
// File size limits
viper.SetDefault(shared.ConfigKeyFileSizeLimit, shared.ConfigFileSizeLimitDefault)
viper.SetDefault(shared.ConfigKeyIgnoreDirectories, shared.ConfigIgnoredDirectoriesDefault)
viper.SetDefault(shared.ConfigKeyMaxConcurrency, shared.ConfigMaxConcurrencyDefault)
viper.SetDefault(shared.ConfigKeySupportedFormats, shared.ConfigSupportedFormatsDefault)
viper.SetDefault(shared.ConfigKeyFilePatterns, shared.ConfigFilePatternsDefault)
// setDefaultConfig sets default configuration values.
func setDefaultConfig() {
viper.SetDefault("fileSizeLimit", DefaultFileSizeLimit)
// Default ignored directories.
viper.SetDefault("ignoreDirectories", []string{
"vendor", "node_modules", ".git", "dist", "build", "target", "bower_components", "cache", "tmp",
})
// FileTypeRegistry defaults
viper.SetDefault(shared.ConfigKeyFileTypesEnabled, shared.ConfigFileTypesEnabledDefault)
viper.SetDefault(shared.ConfigKeyFileTypesCustomImageExtensions, shared.ConfigCustomImageExtensionsDefault)
viper.SetDefault(shared.ConfigKeyFileTypesCustomBinaryExtensions, shared.ConfigCustomBinaryExtensionsDefault)
viper.SetDefault(shared.ConfigKeyFileTypesCustomLanguages, shared.ConfigCustomLanguagesDefault)
viper.SetDefault(shared.ConfigKeyFileTypesDisabledImageExtensions, shared.ConfigDisabledImageExtensionsDefault)
viper.SetDefault(shared.ConfigKeyFileTypesDisabledBinaryExtensions, shared.ConfigDisabledBinaryExtensionsDefault)
viper.SetDefault(shared.ConfigKeyFileTypesDisabledLanguageExts, shared.ConfigDisabledLanguageExtensionsDefault)
viper.SetDefault("fileTypes.enabled", true)
viper.SetDefault("fileTypes.customImageExtensions", []string{})
viper.SetDefault("fileTypes.customBinaryExtensions", []string{})
viper.SetDefault("fileTypes.customLanguages", map[string]string{})
viper.SetDefault("fileTypes.disabledImageExtensions", []string{})
viper.SetDefault("fileTypes.disabledBinaryExtensions", []string{})
viper.SetDefault("fileTypes.disabledLanguageExtensions", []string{})
// Backpressure and memory management defaults
viper.SetDefault(shared.ConfigKeyBackpressureEnabled, shared.ConfigBackpressureEnabledDefault)
viper.SetDefault(shared.ConfigKeyBackpressureMaxPendingFiles, shared.ConfigMaxPendingFilesDefault)
viper.SetDefault(shared.ConfigKeyBackpressureMaxPendingWrites, shared.ConfigMaxPendingWritesDefault)
viper.SetDefault(shared.ConfigKeyBackpressureMaxMemoryUsage, shared.ConfigMaxMemoryUsageDefault)
viper.SetDefault(shared.ConfigKeyBackpressureMemoryCheckInt, shared.ConfigMemoryCheckIntervalDefault)
// Back-pressure and memory management defaults
viper.SetDefault("backpressure.enabled", true)
viper.SetDefault("backpressure.maxPendingFiles", 1000) // Max files in file channel buffer
viper.SetDefault("backpressure.maxPendingWrites", 100) // Max writes in write channel buffer
viper.SetDefault("backpressure.maxMemoryUsage", 104857600) // 100MB max memory usage
viper.SetDefault("backpressure.memoryCheckInterval", 1000) // Check memory every 1000 files
// Resource limit defaults
viper.SetDefault(shared.ConfigKeyResourceLimitsEnabled, shared.ConfigResourceLimitsEnabledDefault)
viper.SetDefault(shared.ConfigKeyResourceLimitsMaxFiles, shared.ConfigMaxFilesDefault)
viper.SetDefault(shared.ConfigKeyResourceLimitsMaxTotalSize, shared.ConfigMaxTotalSizeDefault)
viper.SetDefault(shared.ConfigKeyResourceLimitsFileProcessingTO, shared.ConfigFileProcessingTimeoutSecDefault)
viper.SetDefault(shared.ConfigKeyResourceLimitsOverallTO, shared.ConfigOverallTimeoutSecDefault)
viper.SetDefault(shared.ConfigKeyResourceLimitsMaxConcurrentReads, shared.ConfigMaxConcurrentReadsDefault)
viper.SetDefault(shared.ConfigKeyResourceLimitsRateLimitFilesPerSec, shared.ConfigRateLimitFilesPerSecDefault)
viper.SetDefault(shared.ConfigKeyResourceLimitsHardMemoryLimitMB, shared.ConfigHardMemoryLimitMBDefault)
viper.SetDefault(shared.ConfigKeyResourceLimitsEnableGracefulDeg, shared.ConfigEnableGracefulDegradationDefault)
viper.SetDefault(shared.ConfigKeyResourceLimitsEnableMonitoring, shared.ConfigEnableResourceMonitoringDefault)
// Output configuration defaults
viper.SetDefault(shared.ConfigKeyOutputTemplate, shared.ConfigOutputTemplateDefault)
viper.SetDefault("output.metadata.includeStats", shared.ConfigMetadataIncludeStatsDefault)
viper.SetDefault("output.metadata.includeTimestamp", shared.ConfigMetadataIncludeTimestampDefault)
viper.SetDefault("output.metadata.includeFileCount", shared.ConfigMetadataIncludeFileCountDefault)
viper.SetDefault("output.metadata.includeSourcePath", shared.ConfigMetadataIncludeSourcePathDefault)
viper.SetDefault("output.metadata.includeFileTypes", shared.ConfigMetadataIncludeFileTypesDefault)
viper.SetDefault("output.metadata.includeProcessingTime", shared.ConfigMetadataIncludeProcessingTimeDefault)
viper.SetDefault("output.metadata.includeTotalSize", shared.ConfigMetadataIncludeTotalSizeDefault)
viper.SetDefault("output.metadata.includeMetrics", shared.ConfigMetadataIncludeMetricsDefault)
viper.SetDefault("output.markdown.useCodeBlocks", shared.ConfigMarkdownUseCodeBlocksDefault)
viper.SetDefault("output.markdown.includeLanguage", shared.ConfigMarkdownIncludeLanguageDefault)
viper.SetDefault(shared.ConfigKeyOutputMarkdownHeaderLevel, shared.ConfigMarkdownHeaderLevelDefault)
viper.SetDefault("output.markdown.tableOfContents", shared.ConfigMarkdownTableOfContentsDefault)
viper.SetDefault("output.markdown.useCollapsible", shared.ConfigMarkdownUseCollapsibleDefault)
viper.SetDefault("output.markdown.syntaxHighlighting", shared.ConfigMarkdownSyntaxHighlightingDefault)
viper.SetDefault("output.markdown.lineNumbers", shared.ConfigMarkdownLineNumbersDefault)
viper.SetDefault("output.markdown.foldLongFiles", shared.ConfigMarkdownFoldLongFilesDefault)
viper.SetDefault(shared.ConfigKeyOutputMarkdownMaxLineLen, shared.ConfigMarkdownMaxLineLengthDefault)
viper.SetDefault(shared.ConfigKeyOutputMarkdownCustomCSS, shared.ConfigMarkdownCustomCSSDefault)
viper.SetDefault(shared.ConfigKeyOutputCustomHeader, shared.ConfigCustomHeaderDefault)
viper.SetDefault(shared.ConfigKeyOutputCustomFooter, shared.ConfigCustomFooterDefault)
viper.SetDefault(shared.ConfigKeyOutputCustomFileHeader, shared.ConfigCustomFileHeaderDefault)
viper.SetDefault(shared.ConfigKeyOutputCustomFileFooter, shared.ConfigCustomFileFooterDefault)
viper.SetDefault(shared.ConfigKeyOutputVariables, shared.ConfigTemplateVariablesDefault)
viper.SetDefault("resourceLimits.enabled", true)
viper.SetDefault("resourceLimits.maxFiles", DefaultMaxFiles)
viper.SetDefault("resourceLimits.maxTotalSize", DefaultMaxTotalSize)
viper.SetDefault("resourceLimits.fileProcessingTimeoutSec", DefaultFileProcessingTimeoutSec)
viper.SetDefault("resourceLimits.overallTimeoutSec", DefaultOverallTimeoutSec)
viper.SetDefault("resourceLimits.maxConcurrentReads", DefaultMaxConcurrentReads)
viper.SetDefault("resourceLimits.rateLimitFilesPerSec", DefaultRateLimitFilesPerSec)
viper.SetDefault("resourceLimits.hardMemoryLimitMB", DefaultHardMemoryLimitMB)
viper.SetDefault("resourceLimits.enableGracefulDegradation", true)
viper.SetDefault("resourceLimits.enableResourceMonitoring", true)
}

View File

@@ -7,7 +7,6 @@ import (
"github.com/spf13/viper"
"github.com/ivuorinen/gibidify/config"
"github.com/ivuorinen/gibidify/shared"
"github.com/ivuorinen/gibidify/testutil"
)
@@ -27,14 +26,14 @@ func TestDefaultConfig(t *testing.T) {
testutil.ResetViperConfig(t, tmpDir)
// Check defaults
defaultSizeLimit := config.FileSizeLimit()
defaultSizeLimit := config.GetFileSizeLimit()
if defaultSizeLimit != defaultFileSizeLimit {
t.Errorf("Expected default file size limit of 5242880, got %d", defaultSizeLimit)
}
ignoredDirs := config.IgnoredDirectories()
ignoredDirs := config.GetIgnoredDirectories()
if len(ignoredDirs) == 0 {
t.Error("Expected some default ignored directories, got none")
t.Errorf("Expected some default ignored directories, got none")
}
// Restore Viper state
@@ -77,16 +76,18 @@ ignoreDirectories:
// TestLoadConfigWithValidation tests that invalid config files fall back to defaults.
func TestLoadConfigWithValidation(t *testing.T) {
// Create a temporary config file with invalid content
configContent := "fileSizeLimit: 100\n" +
"ignoreDirectories:\n" +
"- node_modules\n" +
"- \"\"\n" +
"- .git\n"
configContent := `
fileSizeLimit: 100
ignoreDirectories:
- node_modules
- ""
- .git
`
tempDir := t.TempDir()
configFile := tempDir + "/config.yaml"
err := os.WriteFile(configFile, []byte(configContent), 0o600)
err := os.WriteFile(configFile, []byte(configContent), 0o644)
if err != nil {
t.Fatalf("Failed to write config file: %v", err)
}
@@ -99,14 +100,11 @@ func TestLoadConfigWithValidation(t *testing.T) {
config.LoadConfig()
// Should have fallen back to defaults due to validation failure
if config.FileSizeLimit() != int64(shared.ConfigFileSizeLimitDefault) {
t.Errorf("Expected default file size limit after validation failure, got %d", config.FileSizeLimit())
if config.GetFileSizeLimit() != int64(config.DefaultFileSizeLimit) {
t.Errorf("Expected default file size limit after validation failure, got %d", config.GetFileSizeLimit())
}
if containsString(config.IgnoredDirectories(), "") {
t.Errorf(
"Expected ignored directories not to contain empty string after validation failure, got %v",
config.IgnoredDirectories(),
)
if containsString(config.GetIgnoredDirectories(), "") {
t.Errorf("Expected ignored directories not to contain empty string after validation failure, got %v", config.GetIgnoredDirectories())
}
}
@@ -118,6 +116,5 @@ func containsString(slice []string, item string) bool {
return true
}
}
return false
}

View File

@@ -1,4 +1,3 @@
// Package config handles application configuration management.
package config
import (
@@ -7,611 +6,299 @@ import (
"github.com/spf13/viper"
"github.com/ivuorinen/gibidify/shared"
"github.com/ivuorinen/gibidify/utils"
)
// ValidateConfig validates the loaded configuration.
func ValidateConfig() error {
var validationErrors []string
// Validate basic settings
validationErrors = append(validationErrors, validateBasicSettings()...)
validationErrors = append(validationErrors, validateFileTypeSettings()...)
validationErrors = append(validationErrors, validateBackpressureSettings()...)
validationErrors = append(validationErrors, validateResourceLimitSettings()...)
// Validate file size limit
fileSizeLimit := viper.GetInt64("fileSizeLimit")
if fileSizeLimit < MinFileSizeLimit {
validationErrors = append(validationErrors, fmt.Sprintf("fileSizeLimit (%d) is below minimum (%d)", fileSizeLimit, MinFileSizeLimit))
}
if fileSizeLimit > MaxFileSizeLimit {
validationErrors = append(validationErrors, fmt.Sprintf("fileSizeLimit (%d) exceeds maximum (%d)", fileSizeLimit, MaxFileSizeLimit))
}
// Validate ignore directories
ignoreDirectories := viper.GetStringSlice("ignoreDirectories")
for i, dir := range ignoreDirectories {
dir = strings.TrimSpace(dir)
if dir == "" {
validationErrors = append(validationErrors, fmt.Sprintf("ignoreDirectories[%d] is empty", i))
continue
}
if strings.Contains(dir, "/") {
validationErrors = append(validationErrors, fmt.Sprintf("ignoreDirectories[%d] (%s) contains path separator - only directory names are allowed", i, dir))
}
if strings.HasPrefix(dir, ".") && dir != ".git" && dir != ".vscode" && dir != ".idea" {
validationErrors = append(validationErrors, fmt.Sprintf("ignoreDirectories[%d] (%s) starts with dot - this may cause unexpected behavior", i, dir))
}
}
// Validate supported output formats if configured
if viper.IsSet("supportedFormats") {
supportedFormats := viper.GetStringSlice("supportedFormats")
validFormats := map[string]bool{"json": true, "yaml": true, "markdown": true}
for i, format := range supportedFormats {
format = strings.ToLower(strings.TrimSpace(format))
if !validFormats[format] {
validationErrors = append(validationErrors, fmt.Sprintf("supportedFormats[%d] (%s) is not a valid format (json, yaml, markdown)", i, format))
}
}
}
// Validate concurrency settings if configured
if viper.IsSet("maxConcurrency") {
maxConcurrency := viper.GetInt("maxConcurrency")
if maxConcurrency < 1 {
validationErrors = append(validationErrors, fmt.Sprintf("maxConcurrency (%d) must be at least 1", maxConcurrency))
}
if maxConcurrency > 100 {
validationErrors = append(validationErrors, fmt.Sprintf("maxConcurrency (%d) is unreasonably high (max 100)", maxConcurrency))
}
}
// Validate file patterns if configured
if viper.IsSet("filePatterns") {
filePatterns := viper.GetStringSlice("filePatterns")
for i, pattern := range filePatterns {
pattern = strings.TrimSpace(pattern)
if pattern == "" {
validationErrors = append(validationErrors, fmt.Sprintf("filePatterns[%d] is empty", i))
continue
}
// Basic validation - patterns should contain at least one alphanumeric character
if !strings.ContainsAny(pattern, "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789") {
validationErrors = append(validationErrors, fmt.Sprintf("filePatterns[%d] (%s) appears to be invalid", i, pattern))
}
}
}
// Validate FileTypeRegistry configuration
if viper.IsSet("fileTypes.customImageExtensions") {
customImages := viper.GetStringSlice("fileTypes.customImageExtensions")
for i, ext := range customImages {
ext = strings.TrimSpace(ext)
if ext == "" {
validationErrors = append(validationErrors, fmt.Sprintf("fileTypes.customImageExtensions[%d] is empty", i))
continue
}
if !strings.HasPrefix(ext, ".") {
validationErrors = append(validationErrors, fmt.Sprintf("fileTypes.customImageExtensions[%d] (%s) must start with a dot", i, ext))
}
}
}
if viper.IsSet("fileTypes.customBinaryExtensions") {
customBinary := viper.GetStringSlice("fileTypes.customBinaryExtensions")
for i, ext := range customBinary {
ext = strings.TrimSpace(ext)
if ext == "" {
validationErrors = append(validationErrors, fmt.Sprintf("fileTypes.customBinaryExtensions[%d] is empty", i))
continue
}
if !strings.HasPrefix(ext, ".") {
validationErrors = append(validationErrors, fmt.Sprintf("fileTypes.customBinaryExtensions[%d] (%s) must start with a dot", i, ext))
}
}
}
if viper.IsSet("fileTypes.customLanguages") {
customLangs := viper.GetStringMapString("fileTypes.customLanguages")
for ext, lang := range customLangs {
ext = strings.TrimSpace(ext)
lang = strings.TrimSpace(lang)
if ext == "" {
validationErrors = append(validationErrors, "fileTypes.customLanguages contains empty extension key")
continue
}
if !strings.HasPrefix(ext, ".") {
validationErrors = append(validationErrors, fmt.Sprintf("fileTypes.customLanguages extension (%s) must start with a dot", ext))
}
if lang == "" {
validationErrors = append(validationErrors, fmt.Sprintf("fileTypes.customLanguages[%s] has empty language value", ext))
}
}
}
// Validate back-pressure configuration
if viper.IsSet("backpressure.maxPendingFiles") {
maxPendingFiles := viper.GetInt("backpressure.maxPendingFiles")
if maxPendingFiles < 1 {
validationErrors = append(validationErrors, fmt.Sprintf("backpressure.maxPendingFiles (%d) must be at least 1", maxPendingFiles))
}
if maxPendingFiles > 100000 {
validationErrors = append(validationErrors, fmt.Sprintf("backpressure.maxPendingFiles (%d) is unreasonably high (max 100000)", maxPendingFiles))
}
}
if viper.IsSet("backpressure.maxPendingWrites") {
maxPendingWrites := viper.GetInt("backpressure.maxPendingWrites")
if maxPendingWrites < 1 {
validationErrors = append(validationErrors, fmt.Sprintf("backpressure.maxPendingWrites (%d) must be at least 1", maxPendingWrites))
}
if maxPendingWrites > 10000 {
validationErrors = append(validationErrors, fmt.Sprintf("backpressure.maxPendingWrites (%d) is unreasonably high (max 10000)", maxPendingWrites))
}
}
if viper.IsSet("backpressure.maxMemoryUsage") {
maxMemoryUsage := viper.GetInt64("backpressure.maxMemoryUsage")
if maxMemoryUsage < 1048576 { // 1MB minimum
validationErrors = append(validationErrors, fmt.Sprintf("backpressure.maxMemoryUsage (%d) must be at least 1MB (1048576 bytes)", maxMemoryUsage))
}
if maxMemoryUsage > 10737418240 { // 10GB maximum
validationErrors = append(validationErrors, fmt.Sprintf("backpressure.maxMemoryUsage (%d) is unreasonably high (max 10GB)", maxMemoryUsage))
}
}
if viper.IsSet("backpressure.memoryCheckInterval") {
interval := viper.GetInt("backpressure.memoryCheckInterval")
if interval < 1 {
validationErrors = append(validationErrors, fmt.Sprintf("backpressure.memoryCheckInterval (%d) must be at least 1", interval))
}
if interval > 100000 {
validationErrors = append(validationErrors, fmt.Sprintf("backpressure.memoryCheckInterval (%d) is unreasonably high (max 100000)", interval))
}
}
// Validate resource limits configuration
if viper.IsSet("resourceLimits.maxFiles") {
maxFiles := viper.GetInt("resourceLimits.maxFiles")
if maxFiles < MinMaxFiles {
validationErrors = append(validationErrors, fmt.Sprintf("resourceLimits.maxFiles (%d) must be at least %d", maxFiles, MinMaxFiles))
}
if maxFiles > MaxMaxFiles {
validationErrors = append(validationErrors, fmt.Sprintf("resourceLimits.maxFiles (%d) exceeds maximum (%d)", maxFiles, MaxMaxFiles))
}
}
if viper.IsSet("resourceLimits.maxTotalSize") {
maxTotalSize := viper.GetInt64("resourceLimits.maxTotalSize")
if maxTotalSize < MinMaxTotalSize {
validationErrors = append(validationErrors, fmt.Sprintf("resourceLimits.maxTotalSize (%d) must be at least %d", maxTotalSize, MinMaxTotalSize))
}
if maxTotalSize > MaxMaxTotalSize {
validationErrors = append(validationErrors, fmt.Sprintf("resourceLimits.maxTotalSize (%d) exceeds maximum (%d)", maxTotalSize, MaxMaxTotalSize))
}
}
if viper.IsSet("resourceLimits.fileProcessingTimeoutSec") {
timeout := viper.GetInt("resourceLimits.fileProcessingTimeoutSec")
if timeout < MinFileProcessingTimeoutSec {
validationErrors = append(validationErrors, fmt.Sprintf("resourceLimits.fileProcessingTimeoutSec (%d) must be at least %d", timeout, MinFileProcessingTimeoutSec))
}
if timeout > MaxFileProcessingTimeoutSec {
validationErrors = append(validationErrors, fmt.Sprintf("resourceLimits.fileProcessingTimeoutSec (%d) exceeds maximum (%d)", timeout, MaxFileProcessingTimeoutSec))
}
}
if viper.IsSet("resourceLimits.overallTimeoutSec") {
timeout := viper.GetInt("resourceLimits.overallTimeoutSec")
if timeout < MinOverallTimeoutSec {
validationErrors = append(validationErrors, fmt.Sprintf("resourceLimits.overallTimeoutSec (%d) must be at least %d", timeout, MinOverallTimeoutSec))
}
if timeout > MaxOverallTimeoutSec {
validationErrors = append(validationErrors, fmt.Sprintf("resourceLimits.overallTimeoutSec (%d) exceeds maximum (%d)", timeout, MaxOverallTimeoutSec))
}
}
if viper.IsSet("resourceLimits.maxConcurrentReads") {
maxReads := viper.GetInt("resourceLimits.maxConcurrentReads")
if maxReads < MinMaxConcurrentReads {
validationErrors = append(validationErrors, fmt.Sprintf("resourceLimits.maxConcurrentReads (%d) must be at least %d", maxReads, MinMaxConcurrentReads))
}
if maxReads > MaxMaxConcurrentReads {
validationErrors = append(validationErrors, fmt.Sprintf("resourceLimits.maxConcurrentReads (%d) exceeds maximum (%d)", maxReads, MaxMaxConcurrentReads))
}
}
if viper.IsSet("resourceLimits.rateLimitFilesPerSec") {
rateLimit := viper.GetInt("resourceLimits.rateLimitFilesPerSec")
if rateLimit < MinRateLimitFilesPerSec {
validationErrors = append(validationErrors, fmt.Sprintf("resourceLimits.rateLimitFilesPerSec (%d) must be at least %d", rateLimit, MinRateLimitFilesPerSec))
}
if rateLimit > MaxRateLimitFilesPerSec {
validationErrors = append(validationErrors, fmt.Sprintf("resourceLimits.rateLimitFilesPerSec (%d) exceeds maximum (%d)", rateLimit, MaxRateLimitFilesPerSec))
}
}
if viper.IsSet("resourceLimits.hardMemoryLimitMB") {
memLimit := viper.GetInt("resourceLimits.hardMemoryLimitMB")
if memLimit < MinHardMemoryLimitMB {
validationErrors = append(validationErrors, fmt.Sprintf("resourceLimits.hardMemoryLimitMB (%d) must be at least %d", memLimit, MinHardMemoryLimitMB))
}
if memLimit > MaxHardMemoryLimitMB {
validationErrors = append(validationErrors, fmt.Sprintf("resourceLimits.hardMemoryLimitMB (%d) exceeds maximum (%d)", memLimit, MaxHardMemoryLimitMB))
}
}
if len(validationErrors) > 0 {
return shared.NewStructuredError(
shared.ErrorTypeConfiguration,
shared.CodeConfigValidation,
return utils.NewStructuredError(
utils.ErrorTypeConfiguration,
utils.CodeConfigValidation,
"configuration validation failed: "+strings.Join(validationErrors, "; "),
"",
map[string]any{"validation_errors": validationErrors},
map[string]interface{}{"validation_errors": validationErrors},
)
}
return nil
}
// validateBasicSettings validates basic configuration settings.
func validateBasicSettings() []string {
var validationErrors []string
validationErrors = append(validationErrors, validateFileSizeLimit()...)
validationErrors = append(validationErrors, validateIgnoreDirectories()...)
validationErrors = append(validationErrors, validateSupportedFormats()...)
validationErrors = append(validationErrors, validateConcurrencySettings()...)
validationErrors = append(validationErrors, validateFilePatterns()...)
return validationErrors
}
// validateFileSizeLimit validates the file size limit setting.
func validateFileSizeLimit() []string {
var validationErrors []string
fileSizeLimit := viper.GetInt64(shared.ConfigKeyFileSizeLimit)
if fileSizeLimit < shared.ConfigFileSizeLimitMin {
validationErrors = append(
validationErrors,
fmt.Sprintf("fileSizeLimit (%d) is below minimum (%d)", fileSizeLimit, shared.ConfigFileSizeLimitMin),
)
}
if fileSizeLimit > shared.ConfigFileSizeLimitMax {
validationErrors = append(
validationErrors,
fmt.Sprintf("fileSizeLimit (%d) exceeds maximum (%d)", fileSizeLimit, shared.ConfigFileSizeLimitMax),
)
}
return validationErrors
}
// validateIgnoreDirectories validates the ignore directories setting.
func validateIgnoreDirectories() []string {
var validationErrors []string
ignoreDirectories := viper.GetStringSlice(shared.ConfigKeyIgnoreDirectories)
for i, dir := range ignoreDirectories {
if errMsg := validateEmptyElement(shared.ConfigKeyIgnoreDirectories, dir, i); errMsg != "" {
validationErrors = append(validationErrors, errMsg)
continue
}
dir = strings.TrimSpace(dir)
if strings.Contains(dir, "/") {
validationErrors = append(
validationErrors,
fmt.Sprintf(
"ignoreDirectories[%d] (%s) contains path separator - only directory names are allowed", i, dir,
),
)
}
if strings.HasPrefix(dir, ".") && dir != ".git" && dir != ".vscode" && dir != ".idea" {
validationErrors = append(
validationErrors,
fmt.Sprintf("ignoreDirectories[%d] (%s) starts with dot - this may cause unexpected behavior", i, dir),
)
}
}
return validationErrors
}
// validateSupportedFormats validates the supported formats setting.
func validateSupportedFormats() []string {
var validationErrors []string
if !viper.IsSet(shared.ConfigKeySupportedFormats) {
return validationErrors
}
supportedFormats := viper.GetStringSlice(shared.ConfigKeySupportedFormats)
validFormats := map[string]bool{shared.FormatJSON: true, shared.FormatYAML: true, shared.FormatMarkdown: true}
for i, format := range supportedFormats {
format = strings.ToLower(strings.TrimSpace(format))
if !validFormats[format] {
validationErrors = append(
validationErrors,
fmt.Sprintf("supportedFormats[%d] (%s) is not a valid format (json, yaml, markdown)", i, format),
)
}
}
return validationErrors
}
// validateConcurrencySettings validates the concurrency settings.
func validateConcurrencySettings() []string {
var validationErrors []string
if !viper.IsSet(shared.ConfigKeyMaxConcurrency) {
return validationErrors
}
maxConcurrency := viper.GetInt(shared.ConfigKeyMaxConcurrency)
if maxConcurrency < 1 {
validationErrors = append(
validationErrors, fmt.Sprintf("maxConcurrency (%d) must be at least 1", maxConcurrency),
)
}
if maxConcurrency > 100 {
validationErrors = append(
validationErrors,
fmt.Sprintf("maxConcurrency (%d) is unreasonably high (max 100)", maxConcurrency),
)
}
return validationErrors
}
// validateFilePatterns validates the file patterns setting.
func validateFilePatterns() []string {
var validationErrors []string
if !viper.IsSet(shared.ConfigKeyFilePatterns) {
return validationErrors
}
filePatterns := viper.GetStringSlice(shared.ConfigKeyFilePatterns)
for i, pattern := range filePatterns {
if errMsg := validateEmptyElement(shared.ConfigKeyFilePatterns, pattern, i); errMsg != "" {
validationErrors = append(validationErrors, errMsg)
continue
}
pattern = strings.TrimSpace(pattern)
// Basic validation - patterns should contain at least one alphanumeric character
if !strings.ContainsAny(pattern, "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789") {
validationErrors = append(
validationErrors,
fmt.Sprintf("filePatterns[%d] (%s) appears to be invalid", i, pattern),
)
}
}
return validationErrors
}
// validateFileTypeSettings validates file type configuration settings.
func validateFileTypeSettings() []string {
var validationErrors []string
validationErrors = append(validationErrors, validateCustomImageExtensions()...)
validationErrors = append(validationErrors, validateCustomBinaryExtensions()...)
validationErrors = append(validationErrors, validateCustomLanguages()...)
return validationErrors
}
// validateCustomImageExtensions validates custom image extensions.
func validateCustomImageExtensions() []string {
var validationErrors []string
if !viper.IsSet(shared.ConfigKeyFileTypesCustomImageExtensions) {
return validationErrors
}
customImages := viper.GetStringSlice(shared.ConfigKeyFileTypesCustomImageExtensions)
for i, ext := range customImages {
if errMsg := validateEmptyElement(shared.ConfigKeyFileTypesCustomImageExtensions, ext, i); errMsg != "" {
validationErrors = append(validationErrors, errMsg)
continue
}
ext = strings.TrimSpace(ext)
if errMsg := validateDotPrefix(shared.ConfigKeyFileTypesCustomImageExtensions, ext, i); errMsg != "" {
validationErrors = append(validationErrors, errMsg)
}
}
return validationErrors
}
// validateCustomBinaryExtensions validates custom binary extensions.
func validateCustomBinaryExtensions() []string {
var validationErrors []string
if !viper.IsSet(shared.ConfigKeyFileTypesCustomBinaryExtensions) {
return validationErrors
}
customBinary := viper.GetStringSlice(shared.ConfigKeyFileTypesCustomBinaryExtensions)
for i, ext := range customBinary {
if errMsg := validateEmptyElement(shared.ConfigKeyFileTypesCustomBinaryExtensions, ext, i); errMsg != "" {
validationErrors = append(validationErrors, errMsg)
continue
}
ext = strings.TrimSpace(ext)
if errMsg := validateDotPrefix(shared.ConfigKeyFileTypesCustomBinaryExtensions, ext, i); errMsg != "" {
validationErrors = append(validationErrors, errMsg)
}
}
return validationErrors
}
// validateCustomLanguages validates custom language mappings.
func validateCustomLanguages() []string {
var validationErrors []string
if !viper.IsSet(shared.ConfigKeyFileTypesCustomLanguages) {
return validationErrors
}
customLangs := viper.GetStringMapString(shared.ConfigKeyFileTypesCustomLanguages)
for ext, lang := range customLangs {
ext = strings.TrimSpace(ext)
if ext == "" {
validationErrors = append(
validationErrors,
shared.ConfigKeyFileTypesCustomLanguages+" contains empty extension key",
)
continue
}
if errMsg := validateDotPrefixMap(shared.ConfigKeyFileTypesCustomLanguages, ext); errMsg != "" {
validationErrors = append(validationErrors, errMsg)
}
if errMsg := validateEmptyMapValue(shared.ConfigKeyFileTypesCustomLanguages, ext, lang); errMsg != "" {
validationErrors = append(validationErrors, errMsg)
}
}
return validationErrors
}
// validateBackpressureSettings validates back-pressure configuration settings.
func validateBackpressureSettings() []string {
var validationErrors []string
validationErrors = append(validationErrors, validateMaxPendingFiles()...)
validationErrors = append(validationErrors, validateMaxPendingWrites()...)
validationErrors = append(validationErrors, validateMaxMemoryUsage()...)
validationErrors = append(validationErrors, validateMemoryCheckInterval()...)
return validationErrors
}
// validateMaxPendingFiles validates backpressure.maxPendingFiles setting.
func validateMaxPendingFiles() []string {
var validationErrors []string
if !viper.IsSet(shared.ConfigKeyBackpressureMaxPendingFiles) {
return validationErrors
}
maxPendingFiles := viper.GetInt(shared.ConfigKeyBackpressureMaxPendingFiles)
if maxPendingFiles < 1 {
validationErrors = append(
validationErrors,
fmt.Sprintf("backpressure.maxPendingFiles (%d) must be at least 1", maxPendingFiles),
)
}
if maxPendingFiles > 100000 {
validationErrors = append(
validationErrors,
fmt.Sprintf("backpressure.maxPendingFiles (%d) is unreasonably high (max 100000)", maxPendingFiles),
)
}
return validationErrors
}
// validateMaxPendingWrites validates backpressure.maxPendingWrites setting.
func validateMaxPendingWrites() []string {
var validationErrors []string
if !viper.IsSet(shared.ConfigKeyBackpressureMaxPendingWrites) {
return validationErrors
}
maxPendingWrites := viper.GetInt(shared.ConfigKeyBackpressureMaxPendingWrites)
if maxPendingWrites < 1 {
validationErrors = append(
validationErrors,
fmt.Sprintf("backpressure.maxPendingWrites (%d) must be at least 1", maxPendingWrites),
)
}
if maxPendingWrites > 10000 {
validationErrors = append(
validationErrors,
fmt.Sprintf("backpressure.maxPendingWrites (%d) is unreasonably high (max 10000)", maxPendingWrites),
)
}
return validationErrors
}
// validateMaxMemoryUsage validates backpressure.maxMemoryUsage setting.
func validateMaxMemoryUsage() []string {
var validationErrors []string
if !viper.IsSet(shared.ConfigKeyBackpressureMaxMemoryUsage) {
return validationErrors
}
maxMemoryUsage := viper.GetInt64(shared.ConfigKeyBackpressureMaxMemoryUsage)
minMemory := int64(shared.BytesPerMB) // 1MB minimum
maxMemory := int64(10 * shared.BytesPerGB) // 10GB maximum
if maxMemoryUsage < minMemory {
validationErrors = append(
validationErrors,
fmt.Sprintf("backpressure.maxMemoryUsage (%d) must be at least 1MB (%d bytes)", maxMemoryUsage, minMemory),
)
}
if maxMemoryUsage > maxMemory { // 10GB maximum
validationErrors = append(
validationErrors,
fmt.Sprintf("backpressure.maxMemoryUsage (%d) is unreasonably high (max 10GB)", maxMemoryUsage),
)
}
return validationErrors
}
// validateMemoryCheckInterval validates backpressure.memoryCheckInterval setting.
func validateMemoryCheckInterval() []string {
var validationErrors []string
if !viper.IsSet(shared.ConfigKeyBackpressureMemoryCheckInt) {
return validationErrors
}
interval := viper.GetInt(shared.ConfigKeyBackpressureMemoryCheckInt)
if interval < 1 {
validationErrors = append(
validationErrors,
fmt.Sprintf("backpressure.memoryCheckInterval (%d) must be at least 1", interval),
)
}
if interval > 100000 {
validationErrors = append(
validationErrors,
fmt.Sprintf("backpressure.memoryCheckInterval (%d) is unreasonably high (max 100000)", interval),
)
}
return validationErrors
}
// validateResourceLimitSettings validates resource limit configuration settings.
func validateResourceLimitSettings() []string {
var validationErrors []string
validationErrors = append(validationErrors, validateMaxFilesLimit()...)
validationErrors = append(validationErrors, validateMaxTotalSizeLimit()...)
validationErrors = append(validationErrors, validateTimeoutLimits()...)
validationErrors = append(validationErrors, validateConcurrencyLimits()...)
validationErrors = append(validationErrors, validateMemoryLimits()...)
return validationErrors
}
// validateMaxFilesLimit validates resourceLimits.maxFiles setting.
func validateMaxFilesLimit() []string {
var validationErrors []string
if !viper.IsSet(shared.ConfigKeyResourceLimitsMaxFiles) {
return validationErrors
}
maxFiles := viper.GetInt(shared.ConfigKeyResourceLimitsMaxFiles)
if maxFiles < shared.ConfigMaxFilesMin {
validationErrors = append(
validationErrors,
fmt.Sprintf("resourceLimits.maxFiles (%d) must be at least %d", maxFiles, shared.ConfigMaxFilesMin),
)
}
if maxFiles > shared.ConfigMaxFilesMax {
validationErrors = append(
validationErrors,
fmt.Sprintf("resourceLimits.maxFiles (%d) exceeds maximum (%d)", maxFiles, shared.ConfigMaxFilesMax),
)
}
return validationErrors
}
// validateMaxTotalSizeLimit validates resourceLimits.maxTotalSize setting.
func validateMaxTotalSizeLimit() []string {
var validationErrors []string
if !viper.IsSet(shared.ConfigKeyResourceLimitsMaxTotalSize) {
return validationErrors
}
maxTotalSize := viper.GetInt64(shared.ConfigKeyResourceLimitsMaxTotalSize)
minTotalSize := int64(shared.ConfigMaxTotalSizeMin)
maxTotalSizeLimit := int64(shared.ConfigMaxTotalSizeMax)
if maxTotalSize < minTotalSize {
validationErrors = append(
validationErrors,
fmt.Sprintf("resourceLimits.maxTotalSize (%d) must be at least %d", maxTotalSize, minTotalSize),
)
}
if maxTotalSize > maxTotalSizeLimit {
validationErrors = append(
validationErrors,
fmt.Sprintf("resourceLimits.maxTotalSize (%d) exceeds maximum (%d)", maxTotalSize, maxTotalSizeLimit),
)
}
return validationErrors
}
// validateTimeoutLimits validates timeout-related resource limit settings.
func validateTimeoutLimits() []string {
var validationErrors []string
if viper.IsSet(shared.ConfigKeyResourceLimitsFileProcessingTO) {
timeout := viper.GetInt(shared.ConfigKeyResourceLimitsFileProcessingTO)
if timeout < shared.ConfigFileProcessingTimeoutSecMin {
validationErrors = append(
validationErrors,
fmt.Sprintf(
"resourceLimits.fileProcessingTimeoutSec (%d) must be at least %d",
timeout,
shared.ConfigFileProcessingTimeoutSecMin,
),
)
}
if timeout > shared.ConfigFileProcessingTimeoutSecMax {
validationErrors = append(
validationErrors,
fmt.Sprintf(
"resourceLimits.fileProcessingTimeoutSec (%d) exceeds maximum (%d)",
timeout,
shared.ConfigFileProcessingTimeoutSecMax,
),
)
}
}
if viper.IsSet(shared.ConfigKeyResourceLimitsOverallTO) {
timeout := viper.GetInt(shared.ConfigKeyResourceLimitsOverallTO)
minTimeout := shared.ConfigOverallTimeoutSecMin
maxTimeout := shared.ConfigOverallTimeoutSecMax
if timeout < minTimeout {
validationErrors = append(
validationErrors,
fmt.Sprintf("resourceLimits.overallTimeoutSec (%d) must be at least %d", timeout, minTimeout),
)
}
if timeout > maxTimeout {
validationErrors = append(
validationErrors,
fmt.Sprintf("resourceLimits.overallTimeoutSec (%d) exceeds maximum (%d)", timeout, maxTimeout),
)
}
}
return validationErrors
}
// validateConcurrencyLimits validates concurrency-related resource limit settings.
func validateConcurrencyLimits() []string {
var validationErrors []string
if viper.IsSet(shared.ConfigKeyResourceLimitsMaxConcurrentReads) {
maxReads := viper.GetInt(shared.ConfigKeyResourceLimitsMaxConcurrentReads)
minReads := shared.ConfigMaxConcurrentReadsMin
maxReadsLimit := shared.ConfigMaxConcurrentReadsMax
if maxReads < minReads {
validationErrors = append(
validationErrors,
fmt.Sprintf("resourceLimits.maxConcurrentReads (%d) must be at least %d", maxReads, minReads),
)
}
if maxReads > maxReadsLimit {
validationErrors = append(
validationErrors,
fmt.Sprintf("resourceLimits.maxConcurrentReads (%d) exceeds maximum (%d)", maxReads, maxReadsLimit),
)
}
}
if viper.IsSet(shared.ConfigKeyResourceLimitsRateLimitFilesPerSec) {
rateLimit := viper.GetInt(shared.ConfigKeyResourceLimitsRateLimitFilesPerSec)
minRate := shared.ConfigRateLimitFilesPerSecMin
maxRate := shared.ConfigRateLimitFilesPerSecMax
if rateLimit < minRate {
validationErrors = append(
validationErrors,
fmt.Sprintf("resourceLimits.rateLimitFilesPerSec (%d) must be at least %d", rateLimit, minRate),
)
}
if rateLimit > maxRate {
validationErrors = append(
validationErrors,
fmt.Sprintf("resourceLimits.rateLimitFilesPerSec (%d) exceeds maximum (%d)", rateLimit, maxRate),
)
}
}
return validationErrors
}
// validateMemoryLimits validates memory-related resource limit settings.
func validateMemoryLimits() []string {
var validationErrors []string
if !viper.IsSet(shared.ConfigKeyResourceLimitsHardMemoryLimitMB) {
return validationErrors
}
memLimit := viper.GetInt(shared.ConfigKeyResourceLimitsHardMemoryLimitMB)
minMemLimit := shared.ConfigHardMemoryLimitMBMin
maxMemLimit := shared.ConfigHardMemoryLimitMBMax
if memLimit < minMemLimit {
validationErrors = append(
validationErrors,
fmt.Sprintf("resourceLimits.hardMemoryLimitMB (%d) must be at least %d", memLimit, minMemLimit),
)
}
if memLimit > maxMemLimit {
validationErrors = append(
validationErrors,
fmt.Sprintf("resourceLimits.hardMemoryLimitMB (%d) exceeds maximum (%d)", memLimit, maxMemLimit),
)
}
return validationErrors
}
// ValidateFileSize checks if a file size is within the configured limit.
func ValidateFileSize(size int64) error {
limit := FileSizeLimit()
limit := GetFileSizeLimit()
if size > limit {
return shared.NewStructuredError(
shared.ErrorTypeValidation,
shared.CodeValidationSize,
fmt.Sprintf(shared.FileProcessingMsgSizeExceeds, size, limit),
return utils.NewStructuredError(
utils.ErrorTypeValidation,
utils.CodeValidationSize,
fmt.Sprintf("file size (%d bytes) exceeds limit (%d bytes)", size, limit),
"",
map[string]any{"file_size": size, "size_limit": limit},
map[string]interface{}{"file_size": size, "size_limit": limit},
)
}
return nil
}
// ValidateOutputFormat checks if an output format is valid.
func ValidateOutputFormat(format string) error {
if !IsValidFormat(format) {
return shared.NewStructuredError(
shared.ErrorTypeValidation,
shared.CodeValidationFormat,
return utils.NewStructuredError(
utils.ErrorTypeValidation,
utils.CodeValidationFormat,
fmt.Sprintf("unsupported output format: %s (supported: json, yaml, markdown)", format),
"",
map[string]any{"format": format},
map[string]interface{}{"format": format},
)
}
return nil
}
// ValidateConcurrency checks if a concurrency level is valid.
func ValidateConcurrency(concurrency int) error {
if concurrency < 1 {
return shared.NewStructuredError(
shared.ErrorTypeValidation,
shared.CodeValidationFormat,
return utils.NewStructuredError(
utils.ErrorTypeValidation,
utils.CodeValidationFormat,
fmt.Sprintf("concurrency (%d) must be at least 1", concurrency),
"",
map[string]any{"concurrency": concurrency},
map[string]interface{}{"concurrency": concurrency},
)
}
if viper.IsSet(shared.ConfigKeyMaxConcurrency) {
maxConcurrency := MaxConcurrency()
if viper.IsSet("maxConcurrency") {
maxConcurrency := GetMaxConcurrency()
if concurrency > maxConcurrency {
return shared.NewStructuredError(
shared.ErrorTypeValidation,
shared.CodeValidationFormat,
return utils.NewStructuredError(
utils.ErrorTypeValidation,
utils.CodeValidationFormat,
fmt.Sprintf("concurrency (%d) exceeds maximum (%d)", concurrency, maxConcurrency),
"",
map[string]any{"concurrency": concurrency, "max_concurrency": maxConcurrency},
map[string]interface{}{"concurrency": concurrency, "max_concurrency": maxConcurrency},
)
}
}

View File

@@ -1,51 +0,0 @@
// Package config handles application configuration management.
package config
import (
"fmt"
"strings"
)
// validateEmptyElement checks if an element in a slice is empty after trimming whitespace.
// Returns a formatted error message if empty, or empty string if valid.
func validateEmptyElement(fieldPath, value string, index int) string {
value = strings.TrimSpace(value)
if value == "" {
return fmt.Sprintf("%s[%d] is empty", fieldPath, index)
}
return ""
}
// validateDotPrefix ensures an extension starts with a dot.
// Returns a formatted error message if missing dot prefix, or empty string if valid.
func validateDotPrefix(fieldPath, value string, index int) string {
value = strings.TrimSpace(value)
if !strings.HasPrefix(value, ".") {
return fmt.Sprintf("%s[%d] (%s) must start with a dot", fieldPath, index, value)
}
return ""
}
// validateDotPrefixMap ensures a map key (extension) starts with a dot.
// Returns a formatted error message if missing dot prefix, or empty string if valid.
func validateDotPrefixMap(fieldPath, key string) string {
key = strings.TrimSpace(key)
if !strings.HasPrefix(key, ".") {
return fmt.Sprintf("%s extension (%s) must start with a dot", fieldPath, key)
}
return ""
}
// validateEmptyMapValue checks if a map value is empty after trimming whitespace.
// Returns a formatted error message if empty, or empty string if valid.
func validateEmptyMapValue(fieldPath, key, value string) string {
value = strings.TrimSpace(value)
if value == "" {
return fmt.Sprintf("%s[%s] has empty language value", fieldPath, key)
}
return ""
}

View File

@@ -1,51 +1,50 @@
package config_test
import (
"errors"
"strings"
"testing"
"github.com/spf13/viper"
"github.com/ivuorinen/gibidify/config"
"github.com/ivuorinen/gibidify/shared"
"github.com/ivuorinen/gibidify/utils"
)
// TestValidateConfig tests the configuration validation functionality.
func TestValidateConfig(t *testing.T) {
tests := []struct {
name string
config map[string]any
config map[string]interface{}
wantErr bool
errContains string
}{
{
name: "valid default config",
config: map[string]any{
"fileSizeLimit": shared.ConfigFileSizeLimitDefault,
config: map[string]interface{}{
"fileSizeLimit": config.DefaultFileSizeLimit,
"ignoreDirectories": []string{"node_modules", ".git"},
},
wantErr: false,
},
{
name: "file size limit too small",
config: map[string]any{
"fileSizeLimit": shared.ConfigFileSizeLimitMin - 1,
config: map[string]interface{}{
"fileSizeLimit": config.MinFileSizeLimit - 1,
},
wantErr: true,
errContains: "fileSizeLimit",
},
{
name: "file size limit too large",
config: map[string]any{
"fileSizeLimit": shared.ConfigFileSizeLimitMax + 1,
config: map[string]interface{}{
"fileSizeLimit": config.MaxFileSizeLimit + 1,
},
wantErr: true,
errContains: "fileSizeLimit",
},
{
name: "empty ignore directory",
config: map[string]any{
config: map[string]interface{}{
"ignoreDirectories": []string{"node_modules", "", ".git"},
},
wantErr: true,
@@ -53,7 +52,7 @@ func TestValidateConfig(t *testing.T) {
},
{
name: "ignore directory with path separator",
config: map[string]any{
config: map[string]interface{}{
"ignoreDirectories": []string{"node_modules", "src/build", ".git"},
},
wantErr: true,
@@ -61,7 +60,7 @@ func TestValidateConfig(t *testing.T) {
},
{
name: "invalid supported format",
config: map[string]any{
config: map[string]interface{}{
"supportedFormats": []string{"json", "xml", "yaml"},
},
wantErr: true,
@@ -69,7 +68,7 @@ func TestValidateConfig(t *testing.T) {
},
{
name: "invalid max concurrency",
config: map[string]any{
config: map[string]interface{}{
"maxConcurrency": 0,
},
wantErr: true,
@@ -77,8 +76,8 @@ func TestValidateConfig(t *testing.T) {
},
{
name: "valid comprehensive config",
config: map[string]any{
"fileSizeLimit": shared.ConfigFileSizeLimitDefault,
config: map[string]interface{}{
"fileSizeLimit": config.DefaultFileSizeLimit,
"ignoreDirectories": []string{"node_modules", ".git", ".vscode"},
"supportedFormats": []string{"json", "yaml", "markdown"},
"maxConcurrency": 8,
@@ -89,8 +88,7 @@ func TestValidateConfig(t *testing.T) {
}
for _, tt := range tests {
t.Run(
tt.name, func(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
// Reset viper for each test
viper.Reset()
@@ -99,23 +97,44 @@ func TestValidateConfig(t *testing.T) {
viper.Set(key, value)
}
// Set defaults for missing values without touching disk
config.SetDefaultConfig()
// Load defaults for missing values
config.LoadConfig()
err := config.ValidateConfig()
if tt.wantErr {
validateExpectedError(t, err, tt.errContains)
} else if err != nil {
if err == nil {
t.Errorf("Expected error but got none")
return
}
if tt.errContains != "" && !strings.Contains(err.Error(), tt.errContains) {
t.Errorf("Expected error to contain %q, got %q", tt.errContains, err.Error())
}
// Check that it's a structured error
var structErr *utils.StructuredError
if !errorAs(err, &structErr) {
t.Errorf("Expected structured error, got %T", err)
return
}
if structErr.Type != utils.ErrorTypeConfiguration {
t.Errorf("Expected error type %v, got %v", utils.ErrorTypeConfiguration, structErr.Type)
}
if structErr.Code != utils.CodeConfigValidation {
t.Errorf("Expected error code %v, got %v", utils.CodeConfigValidation, structErr.Code)
}
} else {
if err != nil {
t.Errorf("Expected no error but got: %v", err)
}
},
)
}
})
}
}
// TestIsValidFormat tests the IsValidFormat function.
func TestIsValidFormat(t *testing.T) {
// TestValidationFunctions tests individual validation functions.
func TestValidationFunctions(t *testing.T) {
t.Run("IsValidFormat", func(t *testing.T) {
tests := []struct {
format string
valid bool
@@ -136,21 +155,20 @@ func TestIsValidFormat(t *testing.T) {
t.Errorf("IsValidFormat(%q) = %v, want %v", tt.format, result, tt.valid)
}
}
}
})
// TestValidateFileSize tests the ValidateFileSize function.
func TestValidateFileSize(t *testing.T) {
t.Run("ValidateFileSize", func(t *testing.T) {
viper.Reset()
viper.Set("fileSizeLimit", shared.ConfigFileSizeLimitDefault)
viper.Set("fileSizeLimit", config.DefaultFileSizeLimit)
tests := []struct {
name string
size int64
wantErr bool
}{
{"size within limit", shared.ConfigFileSizeLimitDefault - 1, false},
{"size at limit", shared.ConfigFileSizeLimitDefault, false},
{"size exceeds limit", shared.ConfigFileSizeLimitDefault + 1, true},
{"size within limit", config.DefaultFileSizeLimit - 1, false},
{"size at limit", config.DefaultFileSizeLimit, false},
{"size exceeds limit", config.DefaultFileSizeLimit + 1, true},
{"zero size", 0, false},
}
@@ -160,10 +178,9 @@ func TestValidateFileSize(t *testing.T) {
t.Errorf("%s: ValidateFileSize(%d) error = %v, wantErr %v", tt.name, tt.size, err, tt.wantErr)
}
}
}
})
// TestValidateOutputFormat tests the ValidateOutputFormat function.
func TestValidateOutputFormat(t *testing.T) {
t.Run("ValidateOutputFormat", func(t *testing.T) {
tests := []struct {
format string
wantErr bool
@@ -182,10 +199,9 @@ func TestValidateOutputFormat(t *testing.T) {
t.Errorf("ValidateOutputFormat(%q) error = %v, wantErr %v", tt.format, err, tt.wantErr)
}
}
}
})
// TestValidateConcurrency tests the ValidateConcurrency function.
func TestValidateConcurrency(t *testing.T) {
t.Run("ValidateConcurrency", func(t *testing.T) {
tests := []struct {
name string
concurrency int
@@ -212,47 +228,18 @@ func TestValidateConcurrency(t *testing.T) {
t.Errorf("%s: ValidateConcurrency(%d) error = %v, wantErr %v", tt.name, tt.concurrency, err, tt.wantErr)
}
}
})
}
// validateExpectedError validates that an error occurred and matches expectations.
func validateExpectedError(t *testing.T, err error, errContains string) {
t.Helper()
if err == nil {
t.Error(shared.TestMsgExpectedError)
return
}
if errContains != "" && !strings.Contains(err.Error(), errContains) {
t.Errorf("Expected error to contain %q, got %q", errContains, err.Error())
}
// Check that it's a structured error
var structErr *shared.StructuredError
if !errorAs(err, &structErr) {
t.Errorf("Expected structured error, got %T", err)
return
}
if structErr.Type != shared.ErrorTypeConfiguration {
t.Errorf("Expected error type %v, got %v", shared.ErrorTypeConfiguration, structErr.Type)
}
if structErr.Code != shared.CodeConfigValidation {
t.Errorf("Expected error code %v, got %v", shared.CodeConfigValidation, structErr.Code)
}
}
func errorAs(err error, target any) bool {
func errorAs(err error, target interface{}) bool {
if err == nil {
return false
}
structErr := &shared.StructuredError{}
if errors.As(err, &structErr) {
if ptr, ok := target.(**shared.StructuredError); ok {
if structErr, ok := err.(*utils.StructuredError); ok {
if ptr, ok := target.(**utils.StructuredError); ok {
*ptr = structErr
return true
}
}
return false
}

View File

@@ -1,45 +0,0 @@
# Replace check_secrets with gitleaks
## Problem
The `check_secrets` function in `scripts/security-scan.sh` uses hand-rolled regex
patterns that produce false positives. The pattern `key\s*[:=]\s*['"][^'"]{8,}['"]`
matches every `configKey: "backpressure.maxPendingFiles"` line in
`config/getters_test.go` (40+ matches), causing `make security-full` to fail.
The git history check (`git log --oneline -10 | grep -i "key|token"`) also matches
on benign commit messages containing words like "key" or "token".
## Decision
Replace the custom `check_secrets` function with
[gitleaks](https://github.com/gitleaks/gitleaks), a widely adopted Go-based secret
scanner with built-in rules for AWS keys, GitHub tokens, private keys, high-entropy
strings, and more.
## Approach
- **Drop-in replacement**: Only the `check_secrets` function body changes. The
function signature and return behavior (0 = clean, 1 = findings) remain identical.
- **`go run` invocation**: Use `go run github.com/gitleaks/gitleaks/v8@latest` so
the tool is fetched automatically if not cached. No changes to `install-tools.sh`.
- **Working tree scan only**: Use `gitleaks dir` to scan current files. No git
history scanning (matches current script behavior scope).
- **Config file**: A `.gitleaks.toml` at the project root extends gitleaks' built-in
rules with an allowlist to suppress known false positives in test files.
- **CI unaffected**: `.github/workflows/security.yml` runs its own inline steps
(gosec, govulncheck, checkmake, shfmt, yamllint, Trivy) and does not call
`security-scan.sh` or `check_secrets`.
## Files Changed
| File | Change |
|------|--------|
| `scripts/security-scan.sh` | Replace `check_secrets` function body |
| `.gitleaks.toml` | New file -- gitleaks configuration with allowlist |
## Verification
```bash
make security-full # should pass end-to-end
```

View File

@@ -1,219 +0,0 @@
# Basic Usage Examples
This directory contains practical examples of how to use gibidify for various use cases.
## Simple Code Aggregation
The most basic use case - aggregate all code files from a project into a single output:
```bash
# Aggregate all files from current directory to markdown
gibidify -source . -format markdown -destination output.md
# Aggregate specific directory to JSON
gibidify -source ./src -format json -destination code-dump.json
# Aggregate with custom worker count
gibidify -source ./project -format yaml -destination project.yaml -concurrency 8
```
## With Configuration File
For repeatable processing with custom settings:
1. Copy the configuration example:
```bash
cp config.example.yaml ~/.config/gibidify/config.yaml
```
2. Edit the configuration file to your needs, then run:
```bash
gibidify -source ./my-project
```
## Output Formats
### JSON Output
Best for programmatic processing and data analysis:
```bash
gibidify -source ./src -format json -destination api-code.json
```
Example JSON structure:
```json
{
"files": [
{
"path": "src/main.go",
"content": "package main...",
"language": "go",
"size": 1024
}
],
"metadata": {
"total_files": 15,
"total_size": 45678,
"processing_time": "1.2s"
}
}
```
### Markdown Output
Great for documentation and code reviews:
```bash
gibidify -source ./src -format markdown -destination code-review.md
```
### YAML Output
Structured and human-readable:
```bash
gibidify -source ./config -format yaml -destination config-dump.yaml
```
## Advanced Usage Examples
### Large Codebase Processing
For processing large projects with performance optimizations:
```bash
gibidify -source ./large-project \
-format json \
-destination large-output.json \
-concurrency 16 \
--verbose
```
### Memory-Conscious Processing
For systems with limited memory:
```bash
gibidify -source ./project \
-format markdown \
-destination output.md \
-concurrency 4
```
### Filtered Processing
Process only specific file types (when configured):
```bash
# Configure file patterns in config.yaml
filePatterns:
- "*.go"
- "*.py"
- "*.js"
# Then run
gibidify -source ./mixed-project -destination filtered.json
```
### CI/CD Integration
For automated documentation generation:
```bash
# In your CI pipeline
gibidify -source . \
-format markdown \
-destination docs/codebase.md \
--no-colors \
--no-progress \
-concurrency 2
```
## Error Handling
### Graceful Failure Handling
The tool handles common issues gracefully:
```bash
# This will fail gracefully if source doesn't exist
gibidify -source ./nonexistent -destination out.json
# This will warn about permission issues but continue
gibidify -source ./restricted-dir -destination out.md --verbose
```
### Resource Limits
Configure resource limits in your config file:
```yaml
resourceLimits:
enabled: true
maxFiles: 5000
maxTotalSize: 1073741824 # 1GB
fileProcessingTimeoutSec: 30
overallTimeoutSec: 1800 # 30 minutes
hardMemoryLimitMB: 512
```
## Performance Tips
1. **Adjust Concurrency**: Start with number of CPU cores, adjust based on I/O vs CPU bound work
2. **Use Appropriate Format**: JSON is fastest, Markdown has more overhead
3. **Configure File Limits**: Set reasonable limits in config.yaml for your use case
4. **Monitor Memory**: Use `--verbose` to see memory usage during processing
5. **Use Progress Indicators**: Enable progress bars for long-running operations
## Integration Examples
### With Git Hooks
Create a pre-commit hook to generate code documentation:
```bash
#!/bin/sh
# .git/hooks/pre-commit
gibidify -source . -format markdown -destination docs/current-code.md
git add docs/current-code.md
```
### With Make
Add to your Makefile:
```makefile
.PHONY: code-dump
code-dump:
gibidify -source ./src -format json -destination dist/codebase.json
.PHONY: docs
docs:
gibidify -source . -format markdown -destination docs/codebase.md
```
### Docker Usage
```dockerfile
FROM golang:1.25-alpine
RUN go install github.com/ivuorinen/gibidify@latest
WORKDIR /workspace
COPY . .
RUN gibidify -source . -format json -destination /output/codebase.json
```
## Common Use Cases
### 1. Code Review Preparation
```bash
gibidify -source ./feature-branch -format markdown -destination review.md
```
### 2. AI Code Analysis
```bash
gibidify -source ./src -format json -destination ai-input.json
```
### 3. Documentation Generation
```bash
gibidify -source ./lib -format markdown -destination api-docs.md
```
### 4. Backup Creation
```bash
gibidify -source ./project -format yaml -destination backup-$(date +%Y%m%d).yaml
```
### 5. Code Migration Prep
```bash
gibidify -source ./legacy-code -format json -destination migration-analysis.json
```

View File

@@ -1,469 +0,0 @@
# Configuration Examples
This document provides practical configuration examples for different use cases.
## Basic Configuration
Create `~/.config/gibidify/config.yaml`:
```yaml
# Basic setup for most projects
fileSizeLimit: 5242880 # 5MB per file
maxConcurrency: 8
ignoreDirectories:
- vendor
- node_modules
- .git
- dist
- target
# Enable file type detection
fileTypes:
enabled: true
```
## Development Environment Configuration
Optimized for active development with fast feedback:
```yaml
# ~/.config/gibidify/config.yaml
fileSizeLimit: 1048576 # 1MB - smaller files for faster processing
ignoreDirectories:
- vendor
- node_modules
- .git
- dist
- build
- tmp
- cache
- .vscode
- .idea
# Conservative resource limits for development
resourceLimits:
enabled: true
maxFiles: 1000
maxTotalSize: 104857600 # 100MB
fileProcessingTimeoutSec: 10
overallTimeoutSec: 300 # 5 minutes
maxConcurrentReads: 4
hardMemoryLimitMB: 256
# Fast backpressure for responsive development
backpressure:
enabled: true
maxPendingFiles: 500
maxPendingWrites: 50
maxMemoryUsage: 52428800 # 50MB
memoryCheckInterval: 100
# Simple output for quick reviews
output:
metadata:
includeStats: true
includeTimestamp: true
```
## Production/CI Configuration
High-performance setup for automated processing:
```yaml
# Production configuration
fileSizeLimit: 10485760 # 10MB per file
maxConcurrency: 16
ignoreDirectories:
- vendor
- node_modules
- .git
- dist
- build
- target
- tmp
- cache
- coverage
- .nyc_output
- __pycache__
# High-performance resource limits
resourceLimits:
enabled: true
maxFiles: 50000
maxTotalSize: 10737418240 # 10GB
fileProcessingTimeoutSec: 60
overallTimeoutSec: 7200 # 2 hours
maxConcurrentReads: 20
hardMemoryLimitMB: 2048
# High-throughput backpressure
backpressure:
enabled: true
maxPendingFiles: 5000
maxPendingWrites: 500
maxMemoryUsage: 1073741824 # 1GB
memoryCheckInterval: 1000
# Comprehensive output for analysis
output:
metadata:
includeStats: true
includeTimestamp: true
includeFileCount: true
includeSourcePath: true
includeFileTypes: true
includeProcessingTime: true
includeTotalSize: true
includeMetrics: true
```
## Security-Focused Configuration
Restrictive settings for untrusted input:
```yaml
# Security-first configuration
fileSizeLimit: 1048576 # 1MB maximum
ignoreDirectories:
- "**/.*" # All hidden directories
- vendor
- node_modules
- tmp
- temp
- cache
# Strict resource limits
resourceLimits:
enabled: true
maxFiles: 100 # Very restrictive
maxTotalSize: 10485760 # 10MB total
fileProcessingTimeoutSec: 5
overallTimeoutSec: 60 # 1 minute max
maxConcurrentReads: 2
rateLimitFilesPerSec: 10 # Rate limiting enabled
hardMemoryLimitMB: 128 # Low memory limit
# Conservative backpressure
backpressure:
enabled: true
maxPendingFiles: 50
maxPendingWrites: 10
maxMemoryUsage: 10485760 # 10MB
memoryCheckInterval: 10 # Frequent checks
# Minimal file type detection
fileTypes:
enabled: true
# Disable potentially risky file types
disabledLanguageExtensions:
- .bat
- .cmd
- .ps1
- .sh
disabledBinaryExtensions:
- .exe
- .dll
- .so
```
## Language-Specific Configuration
### Go Projects
```yaml
fileSizeLimit: 5242880
ignoreDirectories:
- vendor
- .git
- bin
- pkg
fileTypes:
enabled: true
customLanguages:
.mod: go-mod
.sum: go-sum
filePatterns:
- "*.go"
- "go.mod"
- "go.sum"
- "*.md"
```
### JavaScript/Node.js Projects
```yaml
fileSizeLimit: 2097152 # 2MB
ignoreDirectories:
- node_modules
- .git
- dist
- build
- coverage
- .nyc_output
fileTypes:
enabled: true
customLanguages:
.vue: vue
.svelte: svelte
.astro: astro
filePatterns:
- "*.js"
- "*.ts"
- "*.jsx"
- "*.tsx"
- "*.vue"
- "*.json"
- "*.md"
```
### Python Projects
```yaml
fileSizeLimit: 5242880
ignoreDirectories:
- .git
- __pycache__
- .pytest_cache
- venv
- env
- .env
- dist
- build
- .tox
fileTypes:
enabled: true
customLanguages:
.pyi: python-interface
.ipynb: jupyter-notebook
filePatterns:
- "*.py"
- "*.pyi"
- "requirements*.txt"
- "*.toml"
- "*.cfg"
- "*.ini"
- "*.md"
```
## Output Format Configurations
### Detailed Markdown Output
```yaml
output:
template: "detailed"
metadata:
includeStats: true
includeTimestamp: true
includeFileCount: true
includeSourcePath: true
includeFileTypes: true
includeProcessingTime: true
markdown:
useCodeBlocks: true
includeLanguage: true
headerLevel: 2
tableOfContents: true
syntaxHighlighting: true
lineNumbers: true
maxLineLength: 120
variables:
project_name: "My Project"
author: "Development Team"
version: "1.0.0"
```
### Compact JSON Output
```yaml
output:
template: "minimal"
metadata:
includeStats: true
includeFileCount: true
```
### Custom Template Output
```yaml
output:
template: "custom"
custom:
header: |
# {{ .ProjectName }} Code Dump
Generated: {{ .Timestamp }}
Total Files: {{ .FileCount }}
footer: |
---
Processing completed in {{ .ProcessingTime }}
fileHeader: |
## {{ .Path }}
Language: {{ .Language }} | Size: {{ .Size }} bytes
fileFooter: ""
variables:
project_name: "Custom Project"
```
## Environment-Specific Configurations
### Docker Container
```yaml
# Optimized for containerized environments
fileSizeLimit: 5242880
maxConcurrency: 4 # Conservative for containers
resourceLimits:
enabled: true
hardMemoryLimitMB: 512
maxFiles: 5000
overallTimeoutSec: 1800
backpressure:
enabled: true
maxMemoryUsage: 268435456 # 256MB
```
### GitHub Actions
```yaml
# CI/CD optimized configuration
fileSizeLimit: 2097152
maxConcurrency: 2 # Conservative for shared runners
ignoreDirectories:
- .git
- .github
- node_modules
- vendor
- dist
- build
resourceLimits:
enabled: true
maxFiles: 2000
overallTimeoutSec: 900 # 15 minutes
hardMemoryLimitMB: 1024
```
### Local Development
```yaml
# Developer-friendly settings
fileSizeLimit: 10485760 # 10MB
maxConcurrency: 8
# Show progress and verbose output
output:
metadata:
includeStats: true
includeTimestamp: true
includeProcessingTime: true
includeMetrics: true
markdown:
useCodeBlocks: true
includeLanguage: true
syntaxHighlighting: true
```
## Template Examples
### Custom API Documentation Template
```yaml
output:
template: "custom"
custom:
header: |
# {{ .Variables.api_name }} API Documentation
Version: {{ .Variables.version }}
Generated: {{ .Timestamp }}
## Overview
This document contains the complete source code for the {{ .Variables.api_name }} API.
## Statistics
- Total Files: {{ .FileCount }}
- Total Size: {{ .TotalSize | formatSize }}
- Processing Time: {{ .ProcessingTime }}
---
fileHeader: |
### {{ .Path }}
**Type:** {{ .Language }}
**Size:** {{ .Size | formatSize }}
```{{ .Language }}
fileFooter: |
```
---
footer: |
## Summary
Documentation generated with [gibidify](https://github.com/ivuorinen/gibidify)
variables:
api_name: "My API"
version: "v1.2.3"
```
### Code Review Template
```yaml
output:
template: "custom"
custom:
header: |
# Code Review: {{ .Variables.pr_title }}
**PR Number:** #{{ .Variables.pr_number }}
**Author:** {{ .Variables.author }}
**Date:** {{ .Timestamp }}
## Files Changed ({{ .FileCount }})
fileHeader: |
## 📄 {{ .Path }}
<details>
<summary>{{ .Language | upper }} • {{ .Size | formatSize }}</summary>
```{{ .Language }}
fileFooter: |
```
</details>
footer: |
---
**Review Summary:**
- Files reviewed: {{ .FileCount }}
- Total size: {{ .TotalSize | formatSize }}
- Generated in: {{ .ProcessingTime }}
variables:
pr_title: "Feature Implementation"
pr_number: "123"
author: "developer@example.com"
```

View File

@@ -8,8 +8,9 @@ import (
"sync/atomic"
"time"
"github.com/sirupsen/logrus"
"github.com/ivuorinen/gibidify/config"
"github.com/ivuorinen/gibidify/shared"
)
// BackpressureManager manages memory usage and applies back-pressure when needed.
@@ -28,11 +29,11 @@ type BackpressureManager struct {
// NewBackpressureManager creates a new back-pressure manager with configuration.
func NewBackpressureManager() *BackpressureManager {
return &BackpressureManager{
enabled: config.BackpressureEnabled(),
maxMemoryUsage: config.MaxMemoryUsage(),
memoryCheckInterval: config.MemoryCheckInterval(),
maxPendingFiles: config.MaxPendingFiles(),
maxPendingWrites: config.MaxPendingWrites(),
enabled: config.GetBackpressureEnabled(),
maxMemoryUsage: config.GetMaxMemoryUsage(),
memoryCheckInterval: config.GetMemoryCheckInterval(),
maxPendingFiles: config.GetMaxPendingFiles(),
maxPendingWrites: config.GetMaxPendingWrites(),
lastMemoryCheck: time.Now(),
}
}
@@ -42,17 +43,16 @@ func (bp *BackpressureManager) CreateChannels() (chan string, chan WriteRequest)
var fileCh chan string
var writeCh chan WriteRequest
logger := shared.GetLogger()
if bp.enabled {
// Use buffered channels with configured limits
fileCh = make(chan string, bp.maxPendingFiles)
writeCh = make(chan WriteRequest, bp.maxPendingWrites)
logger.Debugf("Created buffered channels: files=%d, writes=%d", bp.maxPendingFiles, bp.maxPendingWrites)
logrus.Debugf("Created buffered channels: files=%d, writes=%d", bp.maxPendingFiles, bp.maxPendingWrites)
} else {
// Use unbuffered channels (default behavior)
fileCh = make(chan string)
writeCh = make(chan WriteRequest)
logger.Debug("Created unbuffered channels (back-pressure disabled)")
logrus.Debug("Created unbuffered channels (back-pressure disabled)")
}
return fileCh, writeCh
@@ -60,34 +60,20 @@ func (bp *BackpressureManager) CreateChannels() (chan string, chan WriteRequest)
// ShouldApplyBackpressure checks if back-pressure should be applied.
func (bp *BackpressureManager) ShouldApplyBackpressure(ctx context.Context) bool {
// Check for context cancellation first
select {
case <-ctx.Done():
return false // No need for backpressure if canceled
default:
}
if !bp.enabled {
return false
}
// Check if we should evaluate memory usage
filesProcessed := atomic.AddInt64(&bp.filesProcessed, 1)
// Guard against zero or negative interval to avoid modulo-by-zero panic
interval := bp.memoryCheckInterval
if interval <= 0 {
interval = 1
}
if int(filesProcessed)%interval != 0 {
if int(filesProcessed)%bp.memoryCheckInterval != 0 {
return false
}
// Get current memory usage
var m runtime.MemStats
runtime.ReadMemStats(&m)
currentMemory := shared.SafeUint64ToInt64WithDefault(m.Alloc, 0)
currentMemory := int64(m.Alloc)
bp.mu.Lock()
defer bp.mu.Unlock()
@@ -95,22 +81,18 @@ func (bp *BackpressureManager) ShouldApplyBackpressure(ctx context.Context) bool
bp.lastMemoryCheck = time.Now()
// Check if we're over the memory limit
logger := shared.GetLogger()
if currentMemory > bp.maxMemoryUsage {
if !bp.memoryWarningLogged {
logger.Warnf(
"Memory usage (%d bytes) exceeds limit (%d bytes), applying back-pressure",
currentMemory, bp.maxMemoryUsage,
)
logrus.Warnf("Memory usage (%d bytes) exceeds limit (%d bytes), applying back-pressure",
currentMemory, bp.maxMemoryUsage)
bp.memoryWarningLogged = true
}
return true
}
// Reset warning flag if we're back under the limit
if bp.memoryWarningLogged && currentMemory < bp.maxMemoryUsage*8/10 { // 80% of limit
logger.Infof("Memory usage normalized (%d bytes), removing back-pressure", currentMemory)
logrus.Infof("Memory usage normalized (%d bytes), removing back-pressure", currentMemory)
bp.memoryWarningLogged = false
}
@@ -137,12 +119,11 @@ func (bp *BackpressureManager) ApplyBackpressure(ctx context.Context) {
// Log memory usage after GC
var m runtime.MemStats
runtime.ReadMemStats(&m)
logger := shared.GetLogger()
logger.Debugf("Applied back-pressure: memory after GC = %d bytes", m.Alloc)
logrus.Debugf("Applied back-pressure: memory after GC = %d bytes", m.Alloc)
}
// Stats returns current back-pressure statistics.
func (bp *BackpressureManager) Stats() BackpressureStats {
// GetStats returns current back-pressure statistics.
func (bp *BackpressureManager) GetStats() BackpressureStats {
bp.mu.RLock()
defer bp.mu.RUnlock()
@@ -152,7 +133,7 @@ func (bp *BackpressureManager) Stats() BackpressureStats {
return BackpressureStats{
Enabled: bp.enabled,
FilesProcessed: atomic.LoadInt64(&bp.filesProcessed),
CurrentMemoryUsage: shared.SafeUint64ToInt64WithDefault(m.Alloc, 0),
CurrentMemoryUsage: int64(m.Alloc),
MaxMemoryUsage: bp.maxMemoryUsage,
MemoryWarningActive: bp.memoryWarningLogged,
LastMemoryCheck: bp.lastMemoryCheck,
@@ -179,11 +160,9 @@ func (bp *BackpressureManager) WaitForChannelSpace(ctx context.Context, fileCh c
return
}
logger := shared.GetLogger()
// Check if file channel is getting full (>90% capacity)
fileCap := cap(fileCh)
if fileCap > 0 && len(fileCh) > fileCap*9/10 {
logger.Debugf("File channel is %d%% full, waiting for space", len(fileCh)*100/fileCap)
if len(fileCh) > bp.maxPendingFiles*9/10 {
logrus.Debugf("File channel is %d%% full, waiting for space", len(fileCh)*100/bp.maxPendingFiles)
// Wait a bit for the channel to drain
select {
@@ -194,9 +173,8 @@ func (bp *BackpressureManager) WaitForChannelSpace(ctx context.Context, fileCh c
}
// Check if write channel is getting full (>90% capacity)
writeCap := cap(writeCh)
if writeCap > 0 && len(writeCh) > writeCap*9/10 {
logger.Debugf("Write channel is %d%% full, waiting for space", len(writeCh)*100/writeCap)
if len(writeCh) > bp.maxPendingWrites*9/10 {
logrus.Debugf("Write channel is %d%% full, waiting for space", len(writeCh)*100/bp.maxPendingWrites)
// Wait a bit for the channel to drain
select {
@@ -209,13 +187,10 @@ func (bp *BackpressureManager) WaitForChannelSpace(ctx context.Context, fileCh c
// LogBackpressureInfo logs back-pressure configuration and status.
func (bp *BackpressureManager) LogBackpressureInfo() {
logger := shared.GetLogger()
if bp.enabled {
logger.Infof(
"Back-pressure enabled: maxMemory=%dMB, fileBuffer=%d, writeBuffer=%d, checkInterval=%d",
bp.maxMemoryUsage/int64(shared.BytesPerMB), bp.maxPendingFiles, bp.maxPendingWrites, bp.memoryCheckInterval,
)
logrus.Infof("Back-pressure enabled: maxMemory=%dMB, fileBuffer=%d, writeBuffer=%d, checkInterval=%d",
bp.maxMemoryUsage/1024/1024, bp.maxPendingFiles, bp.maxPendingWrites, bp.memoryCheckInterval)
} else {
logger.Info("Back-pressure disabled")
logrus.Info("Back-pressure disabled")
}
}

View File

@@ -1,344 +0,0 @@
package fileproc_test
import (
"context"
"runtime"
"testing"
"time"
"github.com/ivuorinen/gibidify/fileproc"
"github.com/ivuorinen/gibidify/shared"
"github.com/ivuorinen/gibidify/testutil"
)
func TestNewBackpressureManager(t *testing.T) {
// Test creating a new backpressure manager
bp := fileproc.NewBackpressureManager()
if bp == nil {
t.Error("Expected backpressure manager to be created, got nil")
}
// The backpressure manager should be initialized with config values
// We can't test the internal values directly since they're private,
// but we can test that it was created successfully
}
func TestBackpressureManagerCreateChannels(t *testing.T) {
testutil.ResetViperConfig(t, "")
bp := fileproc.NewBackpressureManager()
// Test creating channels
fileCh, writeCh := bp.CreateChannels()
// Verify channels are created
if fileCh == nil {
t.Error("Expected file channel to be created, got nil")
}
if writeCh == nil {
t.Error("Expected write channel to be created, got nil")
}
// Test that channels can be used
select {
case fileCh <- "test-file":
// Successfully sent to channel
default:
t.Error("Unable to send to file channel")
}
// Read from channel
select {
case file := <-fileCh:
if file != "test-file" {
t.Errorf("Expected 'test-file', got %s", file)
}
case <-time.After(100 * time.Millisecond):
t.Error("Timeout reading from file channel")
}
}
func TestBackpressureManagerShouldApplyBackpressure(t *testing.T) {
testutil.ResetViperConfig(t, "")
bp := fileproc.NewBackpressureManager()
ctx := context.Background()
// Test backpressure decision
shouldApply := bp.ShouldApplyBackpressure(ctx)
// Since we're using default config, backpressure behavior depends on settings
// We just test that the method returns without error
// shouldApply is a valid boolean value
_ = shouldApply
}
func TestBackpressureManagerApplyBackpressure(t *testing.T) {
testutil.ResetViperConfig(t, "")
bp := fileproc.NewBackpressureManager()
ctx := context.Background()
// Test applying backpressure
bp.ApplyBackpressure(ctx)
// ApplyBackpressure is a void method that should not panic
// If we reach here, the method executed successfully
}
func TestBackpressureManagerApplyBackpressureWithCancellation(t *testing.T) {
testutil.ResetViperConfig(t, "")
bp := fileproc.NewBackpressureManager()
// Create canceled context
ctx, cancel := context.WithCancel(context.Background())
cancel() // Cancel immediately
// Test applying backpressure with canceled context
bp.ApplyBackpressure(ctx)
// ApplyBackpressure doesn't return errors, but should handle cancellation gracefully
// If we reach here without hanging, the method handled cancellation properly
}
func TestBackpressureManagerGetStats(t *testing.T) {
testutil.ResetViperConfig(t, "")
bp := fileproc.NewBackpressureManager()
// Test getting stats
stats := bp.Stats()
// Stats should contain relevant information
if stats.FilesProcessed < 0 {
t.Error("Expected non-negative files processed count")
}
if stats.CurrentMemoryUsage < 0 {
t.Error("Expected non-negative memory usage")
}
if stats.MaxMemoryUsage < 0 {
t.Error("Expected non-negative max memory usage")
}
// Test that stats have reasonable values
if stats.MaxPendingFiles < 0 || stats.MaxPendingWrites < 0 {
t.Error("Expected non-negative channel buffer sizes")
}
}
func TestBackpressureManagerWaitForChannelSpace(t *testing.T) {
testutil.ResetViperConfig(t, "")
bp := fileproc.NewBackpressureManager()
ctx := context.Background()
// Create test channels
fileCh, writeCh := bp.CreateChannels()
// Test waiting for channel space
bp.WaitForChannelSpace(ctx, fileCh, writeCh)
// WaitForChannelSpace is void method that should complete without hanging
// If we reach here, the method executed successfully
}
func TestBackpressureManagerWaitForChannelSpaceWithCancellation(t *testing.T) {
testutil.ResetViperConfig(t, "")
bp := fileproc.NewBackpressureManager()
// Create canceled context
ctx, cancel := context.WithCancel(context.Background())
cancel()
// Create test channels
fileCh, writeCh := bp.CreateChannels()
// Test waiting for channel space with canceled context
bp.WaitForChannelSpace(ctx, fileCh, writeCh)
// WaitForChannelSpace should handle cancellation gracefully without hanging
// If we reach here, the method handled cancellation properly
}
func TestBackpressureManagerLogBackpressureInfo(t *testing.T) {
testutil.ResetViperConfig(t, "")
bp := fileproc.NewBackpressureManager()
// Test logging backpressure info
// This method primarily logs information, so we test it executes without panic
bp.LogBackpressureInfo()
// If we reach here without panic, the method worked
}
// BenchmarkBackpressureManager benchmarks backpressure operations.
func BenchmarkBackpressureManagerCreateChannels(b *testing.B) {
bp := fileproc.NewBackpressureManager()
b.ResetTimer()
for i := 0; i < b.N; i++ {
fileCh, writeCh := bp.CreateChannels()
// Use channels to prevent optimization
_ = fileCh
_ = writeCh
runtime.GC() // Force GC to measure memory impact
}
}
func BenchmarkBackpressureManagerShouldApplyBackpressure(b *testing.B) {
bp := fileproc.NewBackpressureManager()
ctx := context.Background()
b.ResetTimer()
for i := 0; i < b.N; i++ {
shouldApply := bp.ShouldApplyBackpressure(ctx)
_ = shouldApply // Prevent optimization
}
}
func BenchmarkBackpressureManagerApplyBackpressure(b *testing.B) {
bp := fileproc.NewBackpressureManager()
ctx := context.Background()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bp.ApplyBackpressure(ctx)
}
}
func BenchmarkBackpressureManagerGetStats(b *testing.B) {
bp := fileproc.NewBackpressureManager()
b.ResetTimer()
for i := 0; i < b.N; i++ {
stats := bp.Stats()
_ = stats // Prevent optimization
}
}
// TestBackpressureManager_ShouldApplyBackpressure_EdgeCases tests various edge cases for backpressure decision.
func TestBackpressureManagerShouldApplyBackpressureEdgeCases(t *testing.T) {
testutil.ApplyBackpressureOverrides(t, map[string]any{
shared.ConfigKeyBackpressureEnabled: true,
"backpressure.memory_check_interval": 2,
"backpressure.memory_limit_mb": 1,
})
bp := fileproc.NewBackpressureManager()
ctx := context.Background()
// Test multiple calls to trigger memory check interval logic
for i := 0; i < 10; i++ {
shouldApply := bp.ShouldApplyBackpressure(ctx)
_ = shouldApply
}
// At this point, memory checking should have triggered multiple times
// The actual decision depends on memory usage, but we're testing the paths
}
// TestBackpressureManager_CreateChannels_EdgeCases tests edge cases in channel creation.
func TestBackpressureManagerCreateChannelsEdgeCases(t *testing.T) {
// Test with custom configuration that might trigger different buffer sizes
testutil.ApplyBackpressureOverrides(t, map[string]any{
"backpressure.file_buffer_size": 50,
"backpressure.write_buffer_size": 25,
})
bp := fileproc.NewBackpressureManager()
// Create multiple channel sets to test resource management
for i := 0; i < 5; i++ {
fileCh, writeCh := bp.CreateChannels()
// Verify channels work correctly
select {
case fileCh <- "test":
// Good - channel accepted value
default:
// This is also acceptable if buffer is full
}
// Test write channel
select {
case writeCh <- fileproc.WriteRequest{Path: "test", Content: "content"}:
// Good - channel accepted value
default:
// This is also acceptable if buffer is full
}
}
}
// TestBackpressureManager_WaitForChannelSpace_EdgeCases tests edge cases in channel space waiting.
func TestBackpressureManagerWaitForChannelSpaceEdgeCases(t *testing.T) {
testutil.ApplyBackpressureOverrides(t, map[string]any{
shared.ConfigKeyBackpressureEnabled: true,
"backpressure.wait_timeout_ms": 10,
})
bp := fileproc.NewBackpressureManager()
ctx := context.Background()
// Create channels with small buffers
fileCh, writeCh := bp.CreateChannels()
// Fill up the channels to create pressure
go func() {
for i := 0; i < 100; i++ {
select {
case fileCh <- "file":
case <-time.After(1 * time.Millisecond):
}
}
}()
go func() {
for i := 0; i < 100; i++ {
select {
case writeCh <- fileproc.WriteRequest{Path: "test", Content: "content"}:
case <-time.After(1 * time.Millisecond):
}
}
}()
// Wait for channel space - should handle the full channels
bp.WaitForChannelSpace(ctx, fileCh, writeCh)
}
// TestBackpressureManager_MemoryPressure tests behavior under simulated memory pressure.
func TestBackpressureManagerMemoryPressure(t *testing.T) {
// Test with very low memory limit to trigger backpressure
testutil.ApplyBackpressureOverrides(t, map[string]any{
shared.ConfigKeyBackpressureEnabled: true,
"backpressure.memory_limit_mb": 0.001,
"backpressure.memory_check_interval": 1,
})
bp := fileproc.NewBackpressureManager()
ctx := context.Background()
// Allocate some memory to potentially trigger limits
largeBuffer := make([]byte, 1024*1024) // 1MB
_ = largeBuffer[0]
// Test backpressure decision under memory pressure
for i := 0; i < 5; i++ {
shouldApply := bp.ShouldApplyBackpressure(ctx)
if shouldApply {
// Test applying backpressure when needed
bp.ApplyBackpressure(ctx)
t.Log("Backpressure applied due to memory pressure")
}
}
// Test logging
bp.LogBackpressureInfo()
}

View File

@@ -1,4 +1,3 @@
// Package fileproc handles file processing, collection, and output formatting.
package fileproc
// getNormalizedExtension efficiently extracts and normalizes the file extension with caching.
@@ -7,7 +6,6 @@ func (r *FileTypeRegistry) getNormalizedExtension(filename string) string {
r.cacheMutex.RLock()
if ext, exists := r.extCache[filename]; exists {
r.cacheMutex.RUnlock()
return ext
}
r.cacheMutex.RUnlock()
@@ -44,7 +42,6 @@ func (r *FileTypeRegistry) getFileTypeResult(filename string) FileTypeResult {
r.updateStats(func() {
r.stats.CacheHits++
})
return result
}
r.cacheMutex.RUnlock()

View File

@@ -5,6 +5,5 @@ package fileproc
// and returns a slice of file paths.
func CollectFiles(root string) ([]string, error) {
w := NewProdWalker()
return w.Walk(root)
}

View File

@@ -2,7 +2,6 @@ package fileproc_test
import (
"os"
"path/filepath"
"testing"
"github.com/ivuorinen/gibidify/fileproc"
@@ -48,70 +47,3 @@ func TestCollectFilesError(t *testing.T) {
t.Fatal("Expected an error, got nil")
}
}
// TestCollectFiles tests the actual CollectFiles function with a real directory.
func TestCollectFiles(t *testing.T) {
// Create a temporary directory with test files
tmpDir := t.TempDir()
// Create test files with known supported extensions
testFiles := map[string]string{
"test1.go": "package main\n\nfunc main() {\n\t// Go file\n}",
"test2.py": "# Python file\nprint('hello world')",
"test3.js": "// JavaScript file\nconsole.log('hello');",
}
for name, content := range testFiles {
filePath := filepath.Join(tmpDir, name)
if err := os.WriteFile(filePath, []byte(content), 0o600); err != nil {
t.Fatalf("Failed to create test file %s: %v", name, err)
}
}
// Test CollectFiles
files, err := fileproc.CollectFiles(tmpDir)
if err != nil {
t.Fatalf("CollectFiles failed: %v", err)
}
// Verify we got the expected number of files
if len(files) != len(testFiles) {
t.Errorf("Expected %d files, got %d", len(testFiles), len(files))
}
// Verify all expected files are found
foundFiles := make(map[string]bool)
for _, file := range files {
foundFiles[file] = true
}
for expectedFile := range testFiles {
expectedPath := filepath.Join(tmpDir, expectedFile)
if !foundFiles[expectedPath] {
t.Errorf("Expected file %s not found in results", expectedPath)
}
}
}
// TestCollectFiles_NonExistentDirectory tests CollectFiles with a non-existent directory.
func TestCollectFilesNonExistentDirectory(t *testing.T) {
_, err := fileproc.CollectFiles("/non/existent/directory")
if err == nil {
t.Error("Expected error for non-existent directory, got nil")
}
}
// TestCollectFiles_EmptyDirectory tests CollectFiles with an empty directory.
func TestCollectFilesEmptyDirectory(t *testing.T) {
tmpDir := t.TempDir()
// Don't create any files
files, err := fileproc.CollectFiles(tmpDir)
if err != nil {
t.Fatalf("CollectFiles failed on empty directory: %v", err)
}
if len(files) != 0 {
t.Errorf("Expected 0 files in empty directory, got %d", len(files))
}
}

View File

@@ -1,13 +1,9 @@
// Package fileproc handles file processing, collection, and output formatting.
package fileproc
import "strings"
// ApplyCustomExtensions applies custom extensions from configuration.
func (r *FileTypeRegistry) ApplyCustomExtensions(
customImages, customBinary []string,
customLanguages map[string]string,
) {
func (r *FileTypeRegistry) ApplyCustomExtensions(customImages, customBinary []string, customLanguages map[string]string) {
// Add custom image extensions
r.addExtensions(customImages, r.AddImageExtension)
@@ -38,7 +34,7 @@ func ConfigureFromSettings(
customLanguages map[string]string,
disabledImages, disabledBinary, disabledLanguages []string,
) {
registry := DefaultRegistry()
registry := GetDefaultRegistry()
registry.ApplyCustomExtensions(customImages, customBinary, customLanguages)
registry.DisableExtensions(disabledImages, disabledBinary, disabledLanguages)
}

View File

@@ -1,4 +1,3 @@
// Package fileproc handles file processing, collection, and output formatting.
package fileproc
import "strings"
@@ -15,9 +14,9 @@ func IsBinary(filename string) bool {
return getRegistry().IsBinary(filename)
}
// Language returns the language identifier for the given filename based on its extension.
func Language(filename string) string {
return getRegistry().Language(filename)
// GetLanguage returns the language identifier for the given filename based on its extension.
func GetLanguage(filename string) string {
return getRegistry().GetLanguage(filename)
}
// Registry methods for detection
@@ -25,24 +24,21 @@ func Language(filename string) string {
// IsImage checks if the file extension indicates an image file.
func (r *FileTypeRegistry) IsImage(filename string) bool {
result := r.getFileTypeResult(filename)
return result.IsImage
}
// IsBinary checks if the file extension indicates a binary file.
func (r *FileTypeRegistry) IsBinary(filename string) bool {
result := r.getFileTypeResult(filename)
return result.IsBinary
}
// Language returns the language identifier for the given filename based on its extension.
func (r *FileTypeRegistry) Language(filename string) string {
// GetLanguage returns the language identifier for the given filename based on its extension.
func (r *FileTypeRegistry) GetLanguage(filename string) string {
if len(filename) < minExtensionLength {
return ""
}
result := r.getFileTypeResult(filename)
return result.Language
}

View File

@@ -1,8 +1,5 @@
// Package fileproc handles file processing, collection, and output formatting.
package fileproc
import "github.com/ivuorinen/gibidify/shared"
// getImageExtensions returns the default image file extensions.
func getImageExtensions() map[string]bool {
return map[string]bool{
@@ -133,15 +130,15 @@ func getLanguageMap() map[string]string {
".cmd": "batch",
// Data formats
".json": shared.FormatJSON,
".yaml": shared.FormatYAML,
".yml": shared.FormatYAML,
".json": "json",
".yaml": "yaml",
".yml": "yaml",
".toml": "toml",
".xml": "xml",
".sql": "sql",
// Documentation
".md": shared.FormatMarkdown,
".md": "markdown",
".rst": "rst",
".tex": "latex",

View File

@@ -12,6 +12,5 @@ func (fw FakeWalker) Walk(_ string) ([]string, error) {
if fw.Err != nil {
return nil, fw.Err
}
return fw.Files, nil
}

View File

@@ -1,4 +1,3 @@
// Package fileproc handles file processing, collection, and output formatting.
package fileproc
import (
@@ -16,8 +15,8 @@ type FileFilter struct {
// NewFileFilter creates a new file filter with current configuration.
func NewFileFilter() *FileFilter {
return &FileFilter{
ignoredDirs: config.IgnoredDirectories(),
sizeLimit: config.FileSizeLimit(),
ignoredDirs: config.GetIgnoredDirectories(),
sizeLimit: config.GetFileSizeLimit(),
}
}
@@ -41,7 +40,6 @@ func (f *FileFilter) shouldSkipDirectory(entry os.DirEntry) bool {
return true
}
}
return false
}

View File

@@ -1,200 +1,105 @@
package fileproc
import (
"errors"
"fmt"
"sync"
"testing"
"github.com/ivuorinen/gibidify/shared"
)
const (
numGoroutines = 100
numOperationsPerGoroutine = 100
)
// TestFileTypeRegistry_ThreadSafety tests thread safety of the FileTypeRegistry.
func TestFileTypeRegistry_ThreadSafety(t *testing.T) {
const numGoroutines = 100
const numOperationsPerGoroutine = 100
// TestFileTypeRegistryConcurrentReads tests concurrent read operations.
// This test verifies thread-safety of registry reads under concurrent access.
// For race condition detection, run with: go test -race
func TestFileTypeRegistryConcurrentReads(t *testing.T) {
var wg sync.WaitGroup
errChan := make(chan error, numGoroutines)
for i := 0; i < numGoroutines; i++ {
wg.Go(func() {
if err := performConcurrentReads(); err != nil {
errChan <- err
}
})
}
wg.Wait()
close(errChan)
// Check for any errors from goroutines
for err := range errChan {
t.Errorf("Concurrent read operation failed: %v", err)
}
}
// TestFileTypeRegistryConcurrentRegistryAccess tests concurrent registry access.
func TestFileTypeRegistryConcurrentRegistryAccess(t *testing.T) {
// Reset the registry to test concurrent initialization
ResetRegistryForTesting()
t.Cleanup(func() {
ResetRegistryForTesting()
})
registries := make([]*FileTypeRegistry, numGoroutines)
var wg sync.WaitGroup
// Test concurrent read operations
t.Run("ConcurrentReads", func(t *testing.T) {
for i := 0; i < numGoroutines; i++ {
idx := i // capture for closure
wg.Go(func() {
registries[idx] = DefaultRegistry()
})
}
wg.Wait()
verifySameRegistryInstance(t, registries)
}
// TestFileTypeRegistryConcurrentModifications tests concurrent modifications.
func TestFileTypeRegistryConcurrentModifications(t *testing.T) {
var wg sync.WaitGroup
for i := 0; i < numGoroutines; i++ {
id := i // capture for closure
wg.Go(func() {
performConcurrentModifications(t, id)
})
}
wg.Wait()
}
// performConcurrentReads performs concurrent read operations on the registry.
// Returns an error if any operation produces unexpected results.
func performConcurrentReads() error {
registry := DefaultRegistry()
wg.Add(1)
go func(id int) {
defer wg.Done()
registry := GetDefaultRegistry()
for j := 0; j < numOperationsPerGoroutine; j++ {
// Test various file detection operations with expected results
if !registry.IsImage(shared.TestFilePNG) {
return errors.New("expected .png to be detected as image")
}
if !registry.IsBinary(shared.TestFileEXE) {
return errors.New("expected .exe to be detected as binary")
}
if lang := registry.Language(shared.TestFileGo); lang != "go" {
return fmt.Errorf("expected .go to have language 'go', got %q", lang)
}
// Test various file detection operations
_ = registry.IsImage("test.png")
_ = registry.IsBinary("test.exe")
_ = registry.GetLanguage("test.go")
// Test global functions with expected results
if !IsImage(shared.TestFileImageJPG) {
return errors.New("expected .jpg to be detected as image")
// Test global functions too
_ = IsImage("image.jpg")
_ = IsBinary("binary.dll")
_ = GetLanguage("script.py")
}
if !IsBinary(shared.TestFileBinaryDLL) {
return errors.New("expected .dll to be detected as binary")
}(i)
}
if lang := Language(shared.TestFileScriptPy); lang != "python" {
return fmt.Errorf("expected .py to have language 'python', got %q", lang)
}
}
return nil
}
wg.Wait()
})
// verifySameRegistryInstance verifies all goroutines got the same registry instance.
func verifySameRegistryInstance(t *testing.T, registries []*FileTypeRegistry) {
t.Helper()
// Test concurrent registry access (singleton creation)
t.Run("ConcurrentRegistryAccess", func(t *testing.T) {
// Reset the registry to test concurrent initialization
// Note: This is not safe in a real application, but needed for testing
registryOnce = sync.Once{}
registry = nil
registries := make([]*FileTypeRegistry, numGoroutines)
for i := 0; i < numGoroutines; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
registries[id] = GetDefaultRegistry()
}(i)
}
wg.Wait()
// Verify all goroutines got the same registry instance
firstRegistry := registries[0]
for i := 1; i < numGoroutines; i++ {
if registries[i] != firstRegistry {
t.Errorf("Registry %d is different from registry 0", i)
}
}
}
})
// performConcurrentModifications performs concurrent modifications on separate registry instances.
func performConcurrentModifications(t *testing.T, id int) {
t.Helper()
// Test concurrent modifications on separate registry instances
t.Run("ConcurrentModifications", func(t *testing.T) {
// Create separate registry instances for each goroutine to test modification thread safety
for i := 0; i < numGoroutines; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
// Create a new registry instance for this goroutine
registry := createConcurrencyTestRegistry()
for j := 0; j < numOperationsPerGoroutine; j++ {
extSuffix := fmt.Sprintf("_%d_%d", id, j)
addTestExtensions(registry, extSuffix)
verifyTestExtensions(t, registry, extSuffix)
}
}
// createConcurrencyTestRegistry creates a new registry instance for concurrency testing.
func createConcurrencyTestRegistry() *FileTypeRegistry {
return &FileTypeRegistry{
registry := &FileTypeRegistry{
imageExts: make(map[string]bool),
binaryExts: make(map[string]bool),
languageMap: make(map[string]string),
extCache: make(map[string]string, shared.FileTypeRegistryMaxCacheSize),
resultCache: make(map[string]FileTypeResult, shared.FileTypeRegistryMaxCacheSize),
maxCacheSize: shared.FileTypeRegistryMaxCacheSize,
}
}
// addTestExtensions adds test extensions to the registry.
func addTestExtensions(registry *FileTypeRegistry, extSuffix string) {
for j := 0; j < numOperationsPerGoroutine; j++ {
// Add unique extensions for this goroutine
extSuffix := fmt.Sprintf("_%d_%d", id, j)
registry.AddImageExtension(".img" + extSuffix)
registry.AddBinaryExtension(".bin" + extSuffix)
registry.AddLanguageMapping(".lang"+extSuffix, "lang"+extSuffix)
}
// verifyTestExtensions verifies that test extensions were added correctly.
func verifyTestExtensions(t *testing.T, registry *FileTypeRegistry, extSuffix string) {
t.Helper()
// Verify the additions worked
if !registry.IsImage("test.img" + extSuffix) {
t.Errorf("Failed to add image extension .img%s", extSuffix)
}
if !registry.IsBinary("test.bin" + extSuffix) {
t.Errorf("Failed to add binary extension .bin%s", extSuffix)
}
if registry.Language("test.lang"+extSuffix) != "lang"+extSuffix {
if registry.GetLanguage("test.lang"+extSuffix) != "lang"+extSuffix {
t.Errorf("Failed to add language mapping .lang%s", extSuffix)
}
}
// Benchmarks for concurrency performance
// BenchmarkConcurrentReads benchmarks concurrent read operations on the registry.
func BenchmarkConcurrentReads(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
_ = performConcurrentReads()
}
}(i)
}
wg.Wait()
})
}
// BenchmarkConcurrentRegistryAccess benchmarks concurrent registry singleton access.
func BenchmarkConcurrentRegistryAccess(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
_ = DefaultRegistry()
}
})
}
// BenchmarkConcurrentModifications benchmarks sequential registry modifications.
// Note: Concurrent modifications to the same registry require external synchronization.
// This benchmark measures the cost of modification operations themselves.
func BenchmarkConcurrentModifications(b *testing.B) {
for b.Loop() {
registry := createConcurrencyTestRegistry()
for i := 0; i < 10; i++ {
extSuffix := fmt.Sprintf("_bench_%d", i)
registry.AddImageExtension(".img" + extSuffix)
registry.AddBinaryExtension(".bin" + extSuffix)
registry.AddLanguageMapping(".lang"+extSuffix, "lang"+extSuffix)
}
}
}

View File

@@ -1,108 +1,31 @@
package fileproc
import (
"sync"
"testing"
"github.com/ivuorinen/gibidify/shared"
)
const (
zigLang = "zig"
)
// TestFileTypeRegistryApplyCustomExtensions tests applying custom extensions.
func TestFileTypeRegistryApplyCustomExtensions(t *testing.T) {
registry := createEmptyTestRegistry()
// TestFileTypeRegistry_Configuration tests the configuration functionality.
func TestFileTypeRegistry_Configuration(t *testing.T) {
// Create a new registry instance for testing
registry := &FileTypeRegistry{
imageExts: make(map[string]bool),
binaryExts: make(map[string]bool),
languageMap: make(map[string]string),
}
// Test ApplyCustomExtensions
t.Run("ApplyCustomExtensions", func(t *testing.T) {
customImages := []string{".webp", ".avif", ".heic"}
customBinary := []string{".custom", ".mybin"}
customLanguages := map[string]string{
".zig": zigLang,
".zig": "zig",
".odin": "odin",
".v": "vlang",
}
registry.ApplyCustomExtensions(customImages, customBinary, customLanguages)
verifyCustomExtensions(t, registry, customImages, customBinary, customLanguages)
}
// TestFileTypeRegistryDisableExtensions tests disabling extensions.
func TestFileTypeRegistryDisableExtensions(t *testing.T) {
registry := createEmptyTestRegistry()
// Add some extensions first
setupRegistryExtensions(registry)
// Verify they work before disabling
verifyExtensionsEnabled(t, registry)
// Disable some extensions
disabledImages := []string{".png"}
disabledBinary := []string{".exe"}
disabledLanguages := []string{".go"}
registry.DisableExtensions(disabledImages, disabledBinary, disabledLanguages)
// Verify disabled and remaining extensions
verifyExtensionsDisabled(t, registry)
verifyRemainingExtensions(t, registry)
}
// TestFileTypeRegistryEmptyValuesHandling tests handling of empty values.
func TestFileTypeRegistryEmptyValuesHandling(t *testing.T) {
registry := createEmptyTestRegistry()
customImages := []string{"", shared.TestExtensionValid, ""}
customBinary := []string{"", shared.TestExtensionValid}
customLanguages := map[string]string{
"": "invalid",
shared.TestExtensionValid: "",
".good": "good",
}
registry.ApplyCustomExtensions(customImages, customBinary, customLanguages)
verifyEmptyValueHandling(t, registry)
}
// TestFileTypeRegistryCaseInsensitiveHandling tests case insensitive handling.
func TestFileTypeRegistryCaseInsensitiveHandling(t *testing.T) {
registry := createEmptyTestRegistry()
customImages := []string{".WEBP", ".Avif"}
customBinary := []string{".CUSTOM", ".MyBin"}
customLanguages := map[string]string{
".ZIG": zigLang,
".Odin": "odin",
}
registry.ApplyCustomExtensions(customImages, customBinary, customLanguages)
verifyCaseInsensitiveHandling(t, registry)
}
// createEmptyTestRegistry creates a new empty test registry instance for config testing.
func createEmptyTestRegistry() *FileTypeRegistry {
return &FileTypeRegistry{
imageExts: make(map[string]bool),
binaryExts: make(map[string]bool),
languageMap: make(map[string]string),
extCache: make(map[string]string, shared.FileTypeRegistryMaxCacheSize),
resultCache: make(map[string]FileTypeResult, shared.FileTypeRegistryMaxCacheSize),
maxCacheSize: shared.FileTypeRegistryMaxCacheSize,
}
}
// verifyCustomExtensions verifies that custom extensions are applied correctly.
func verifyCustomExtensions(
t *testing.T,
registry *FileTypeRegistry,
customImages, customBinary []string,
customLanguages map[string]string,
) {
t.Helper()
// Test custom image extensions
for _, ext := range customImages {
if !registry.IsImage("test" + ext) {
@@ -119,99 +42,125 @@ func verifyCustomExtensions(
// Test custom language mappings
for ext, expectedLang := range customLanguages {
if lang := registry.Language("test" + ext); lang != expectedLang {
if lang := registry.GetLanguage("test" + ext); lang != expectedLang {
t.Errorf("Expected %s to map to %s, got %s", ext, expectedLang, lang)
}
}
}
})
// setupRegistryExtensions adds test extensions to the registry.
func setupRegistryExtensions(registry *FileTypeRegistry) {
// Test DisableExtensions
t.Run("DisableExtensions", func(t *testing.T) {
// Add some extensions first
registry.AddImageExtension(".png")
registry.AddImageExtension(".jpg")
registry.AddBinaryExtension(".exe")
registry.AddBinaryExtension(".dll")
registry.AddLanguageMapping(".go", "go")
registry.AddLanguageMapping(".py", "python")
}
// verifyExtensionsEnabled verifies that extensions are enabled before disabling.
func verifyExtensionsEnabled(t *testing.T, registry *FileTypeRegistry) {
t.Helper()
if !registry.IsImage(shared.TestFilePNG) {
// Verify they work
if !registry.IsImage("test.png") {
t.Error("Expected .png to be image before disabling")
}
if !registry.IsBinary(shared.TestFileEXE) {
if !registry.IsBinary("test.exe") {
t.Error("Expected .exe to be binary before disabling")
}
if registry.Language(shared.TestFileGo) != "go" {
if registry.GetLanguage("test.go") != "go" {
t.Error("Expected .go to map to go before disabling")
}
}
// verifyExtensionsDisabled verifies that disabled extensions no longer work.
func verifyExtensionsDisabled(t *testing.T, registry *FileTypeRegistry) {
t.Helper()
// Disable some extensions
disabledImages := []string{".png"}
disabledBinary := []string{".exe"}
disabledLanguages := []string{".go"}
if registry.IsImage(shared.TestFilePNG) {
registry.DisableExtensions(disabledImages, disabledBinary, disabledLanguages)
// Test that disabled extensions no longer work
if registry.IsImage("test.png") {
t.Error("Expected .png to not be image after disabling")
}
if registry.IsBinary(shared.TestFileEXE) {
if registry.IsBinary("test.exe") {
t.Error("Expected .exe to not be binary after disabling")
}
if registry.Language(shared.TestFileGo) != "" {
if registry.GetLanguage("test.go") != "" {
t.Error("Expected .go to not map to language after disabling")
}
}
// verifyRemainingExtensions verifies that non-disabled extensions still work.
func verifyRemainingExtensions(t *testing.T, registry *FileTypeRegistry) {
t.Helper()
if !registry.IsImage(shared.TestFileJPG) {
// Test that non-disabled extensions still work
if !registry.IsImage("test.jpg") {
t.Error("Expected .jpg to still be image after disabling .png")
}
if !registry.IsBinary(shared.TestFileDLL) {
if !registry.IsBinary("test.dll") {
t.Error("Expected .dll to still be binary after disabling .exe")
}
if registry.Language(shared.TestFilePy) != "python" {
if registry.GetLanguage("test.py") != "python" {
t.Error("Expected .py to still map to python after disabling .go")
}
}
})
// verifyEmptyValueHandling verifies handling of empty values.
func verifyEmptyValueHandling(t *testing.T, registry *FileTypeRegistry) {
t.Helper()
// Test empty values handling
t.Run("EmptyValuesHandling", func(t *testing.T) {
registry := &FileTypeRegistry{
imageExts: make(map[string]bool),
binaryExts: make(map[string]bool),
languageMap: make(map[string]string),
}
if registry.IsImage("test") {
// Test with empty values
customImages := []string{"", ".valid", ""}
customBinary := []string{"", ".valid"}
customLanguages := map[string]string{
"": "invalid",
".valid": "",
".good": "good",
}
registry.ApplyCustomExtensions(customImages, customBinary, customLanguages)
// Only valid entries should be added
if registry.IsImage("test.") {
t.Error("Expected empty extension to not be added as image")
}
if !registry.IsImage(shared.TestFileValid) {
if !registry.IsImage("test.valid") {
t.Error("Expected .valid to be added as image")
}
if registry.IsBinary("test") {
if registry.IsBinary("test.") {
t.Error("Expected empty extension to not be added as binary")
}
if !registry.IsBinary(shared.TestFileValid) {
if !registry.IsBinary("test.valid") {
t.Error("Expected .valid to be added as binary")
}
if registry.Language("test") != "" {
if registry.GetLanguage("test.") != "" {
t.Error("Expected empty extension to not be added as language")
}
if registry.Language(shared.TestFileValid) != "" {
if registry.GetLanguage("test.valid") != "" {
t.Error("Expected .valid with empty language to not be added")
}
if registry.Language("test.good") != "good" {
if registry.GetLanguage("test.good") != "good" {
t.Error("Expected .good to map to good")
}
}
})
// verifyCaseInsensitiveHandling verifies case insensitive handling.
func verifyCaseInsensitiveHandling(t *testing.T, registry *FileTypeRegistry) {
t.Helper()
// Test case insensitive handling
t.Run("CaseInsensitiveHandling", func(t *testing.T) {
registry := &FileTypeRegistry{
imageExts: make(map[string]bool),
binaryExts: make(map[string]bool),
languageMap: make(map[string]string),
}
if !registry.IsImage(shared.TestFileWebP) {
customImages := []string{".WEBP", ".Avif"}
customBinary := []string{".CUSTOM", ".MyBin"}
customLanguages := map[string]string{
".ZIG": "zig",
".Odin": "odin",
}
registry.ApplyCustomExtensions(customImages, customBinary, customLanguages)
// Test that both upper and lower case work
if !registry.IsImage("test.webp") {
t.Error("Expected .webp (lowercase) to work after adding .WEBP")
}
if !registry.IsImage("test.WEBP") {
@@ -223,23 +172,25 @@ func verifyCaseInsensitiveHandling(t *testing.T, registry *FileTypeRegistry) {
if !registry.IsBinary("test.CUSTOM") {
t.Error("Expected .CUSTOM (uppercase) to work")
}
if registry.Language("test.zig") != zigLang {
if registry.GetLanguage("test.zig") != "zig" {
t.Error("Expected .zig (lowercase) to work after adding .ZIG")
}
if registry.Language("test.ZIG") != zigLang {
if registry.GetLanguage("test.ZIG") != "zig" {
t.Error("Expected .ZIG (uppercase) to work")
}
})
}
// TestConfigureFromSettings tests the global configuration function.
func TestConfigureFromSettings(t *testing.T) {
// Reset registry to ensure clean state
ResetRegistryForTesting()
registryOnce = sync.Once{}
registry = nil
// Test configuration application
customImages := []string{".webp", ".avif"}
customBinary := []string{".custom"}
customLanguages := map[string]string{".zig": zigLang}
customLanguages := map[string]string{".zig": "zig"}
disabledImages := []string{".gif"} // Disable default extension
disabledBinary := []string{".exe"} // Disable default extension
disabledLanguages := []string{".rb"} // Disable default extension
@@ -254,13 +205,13 @@ func TestConfigureFromSettings(t *testing.T) {
)
// Test that custom extensions work
if !IsImage(shared.TestFileWebP) {
if !IsImage("test.webp") {
t.Error("Expected custom image extension .webp to work")
}
if !IsBinary("test.custom") {
t.Error("Expected custom binary extension .custom to work")
}
if Language("test.zig") != zigLang {
if GetLanguage("test.zig") != "zig" {
t.Error("Expected custom language .zig to work")
}
@@ -268,21 +219,21 @@ func TestConfigureFromSettings(t *testing.T) {
if IsImage("test.gif") {
t.Error("Expected disabled image extension .gif to not work")
}
if IsBinary(shared.TestFileEXE) {
if IsBinary("test.exe") {
t.Error("Expected disabled binary extension .exe to not work")
}
if Language("test.rb") != "" {
if GetLanguage("test.rb") != "" {
t.Error("Expected disabled language extension .rb to not work")
}
// Test that non-disabled defaults still work
if !IsImage(shared.TestFilePNG) {
if !IsImage("test.png") {
t.Error("Expected non-disabled image extension .png to still work")
}
if !IsBinary(shared.TestFileDLL) {
if !IsBinary("test.dll") {
t.Error("Expected non-disabled binary extension .dll to still work")
}
if Language(shared.TestFileGo) != "go" {
if GetLanguage("test.go") != "go" {
t.Error("Expected non-disabled language extension .go to still work")
}
@@ -297,14 +248,11 @@ func TestConfigureFromSettings(t *testing.T) {
)
// Previous configuration should still work
if !IsImage(shared.TestFileWebP) {
if !IsImage("test.webp") {
t.Error("Expected previous configuration to persist")
}
// New configuration should also work
if !IsImage("test.extra") {
t.Error("Expected new configuration to be applied")
}
// Reset registry after test to avoid affecting other tests
ResetRegistryForTesting()
}

View File

@@ -2,34 +2,19 @@ package fileproc
import (
"testing"
"github.com/ivuorinen/gibidify/shared"
)
// createTestRegistry creates a fresh FileTypeRegistry instance for testing.
// This helper reduces code duplication and ensures consistent registry initialization.
func createTestRegistry() *FileTypeRegistry {
return &FileTypeRegistry{
imageExts: getImageExtensions(),
binaryExts: getBinaryExtensions(),
languageMap: getLanguageMap(),
extCache: make(map[string]string, shared.FileTypeRegistryMaxCacheSize),
resultCache: make(map[string]FileTypeResult, shared.FileTypeRegistryMaxCacheSize),
maxCacheSize: shared.FileTypeRegistryMaxCacheSize,
}
}
// TestFileTypeRegistry_LanguageDetection tests the language detection functionality.
func TestFileTypeRegistryLanguageDetection(t *testing.T) {
registry := createTestRegistry()
func TestFileTypeRegistry_LanguageDetection(t *testing.T) {
registry := GetDefaultRegistry()
tests := []struct {
filename string
expected string
}{
// Programming languages
{shared.TestFileMainGo, "go"},
{shared.TestFileScriptPy, "python"},
{"main.go", "go"},
{"script.py", "python"},
{"app.js", "javascript"},
{"component.tsx", "typescript"},
{"service.ts", "typescript"},
@@ -99,17 +84,17 @@ func TestFileTypeRegistryLanguageDetection(t *testing.T) {
for _, tt := range tests {
t.Run(tt.filename, func(t *testing.T) {
result := registry.Language(tt.filename)
result := registry.GetLanguage(tt.filename)
if result != tt.expected {
t.Errorf("Language(%q) = %q, expected %q", tt.filename, result, tt.expected)
t.Errorf("GetLanguage(%q) = %q, expected %q", tt.filename, result, tt.expected)
}
})
}
}
// TestFileTypeRegistry_ImageDetection tests the image detection functionality.
func TestFileTypeRegistryImageDetection(t *testing.T) {
registry := createTestRegistry()
func TestFileTypeRegistry_ImageDetection(t *testing.T) {
registry := GetDefaultRegistry()
tests := []struct {
filename string
@@ -117,7 +102,7 @@ func TestFileTypeRegistryImageDetection(t *testing.T) {
}{
// Common image formats
{"photo.png", true},
{shared.TestFileImageJPG, true},
{"image.jpg", true},
{"picture.jpeg", true},
{"animation.gif", true},
{"bitmap.bmp", true},
@@ -158,8 +143,8 @@ func TestFileTypeRegistryImageDetection(t *testing.T) {
}
// TestFileTypeRegistry_BinaryDetection tests the binary detection functionality.
func TestFileTypeRegistryBinaryDetection(t *testing.T) {
registry := createTestRegistry()
func TestFileTypeRegistry_BinaryDetection(t *testing.T) {
registry := GetDefaultRegistry()
tests := []struct {
filename string
@@ -217,7 +202,7 @@ func TestFileTypeRegistryBinaryDetection(t *testing.T) {
// Non-binary files
{"document.txt", false},
{shared.TestFileScriptPy, false},
{"script.py", false},
{"config.json", false},
{"style.css", false},
{"page.html", false},

View File

@@ -2,13 +2,11 @@ package fileproc
import (
"testing"
"github.com/ivuorinen/gibidify/shared"
)
// TestFileTypeRegistry_EdgeCases tests edge cases and boundary conditions.
func TestFileTypeRegistryEdgeCases(t *testing.T) {
registry := DefaultRegistry()
func TestFileTypeRegistry_EdgeCases(t *testing.T) {
registry := GetDefaultRegistry()
// Test various edge cases for filename handling
edgeCases := []struct {
@@ -33,23 +31,23 @@ func TestFileTypeRegistryEdgeCases(t *testing.T) {
}
for _, tc := range edgeCases {
t.Run(tc.name, func(_ *testing.T) {
t.Run(tc.name, func(t *testing.T) {
// These should not panic
_ = registry.IsImage(tc.filename)
_ = registry.IsBinary(tc.filename)
_ = registry.Language(tc.filename)
_ = registry.GetLanguage(tc.filename)
// Global functions should also not panic
_ = IsImage(tc.filename)
_ = IsBinary(tc.filename)
_ = Language(tc.filename)
_ = GetLanguage(tc.filename)
})
}
}
// TestFileTypeRegistry_MinimumExtensionLength tests the minimum extension length requirement.
func TestFileTypeRegistryMinimumExtensionLength(t *testing.T) {
registry := DefaultRegistry()
func TestFileTypeRegistry_MinimumExtensionLength(t *testing.T) {
registry := GetDefaultRegistry()
tests := []struct {
filename string
@@ -67,18 +65,18 @@ func TestFileTypeRegistryMinimumExtensionLength(t *testing.T) {
for _, tt := range tests {
t.Run(tt.filename, func(t *testing.T) {
result := registry.Language(tt.filename)
result := registry.GetLanguage(tt.filename)
if result != tt.expected {
t.Errorf("Language(%q) = %q, expected %q", tt.filename, result, tt.expected)
t.Errorf("GetLanguage(%q) = %q, expected %q", tt.filename, result, tt.expected)
}
})
}
}
// Benchmark tests for performance validation.
func BenchmarkFileTypeRegistryIsImage(b *testing.B) {
registry := DefaultRegistry()
filename := shared.TestFilePNG
// Benchmark tests for performance validation
func BenchmarkFileTypeRegistry_IsImage(b *testing.B) {
registry := GetDefaultRegistry()
filename := "test.png"
b.ResetTimer()
for i := 0; i < b.N; i++ {
@@ -86,9 +84,9 @@ func BenchmarkFileTypeRegistryIsImage(b *testing.B) {
}
}
func BenchmarkFileTypeRegistryIsBinary(b *testing.B) {
registry := DefaultRegistry()
filename := shared.TestFileEXE
func BenchmarkFileTypeRegistry_IsBinary(b *testing.B) {
registry := GetDefaultRegistry()
filename := "test.exe"
b.ResetTimer()
for i := 0; i < b.N; i++ {
@@ -96,35 +94,35 @@ func BenchmarkFileTypeRegistryIsBinary(b *testing.B) {
}
}
func BenchmarkFileTypeRegistryLanguage(b *testing.B) {
registry := DefaultRegistry()
filename := shared.TestFileGo
func BenchmarkFileTypeRegistry_GetLanguage(b *testing.B) {
registry := GetDefaultRegistry()
filename := "test.go"
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = registry.Language(filename)
_ = registry.GetLanguage(filename)
}
}
func BenchmarkFileTypeRegistryGlobalFunctions(b *testing.B) {
filename := shared.TestFileGo
func BenchmarkFileTypeRegistry_GlobalFunctions(b *testing.B) {
filename := "test.go"
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = IsImage(filename)
_ = IsBinary(filename)
_ = Language(filename)
_ = GetLanguage(filename)
}
}
func BenchmarkFileTypeRegistryConcurrentAccess(b *testing.B) {
filename := shared.TestFileGo
func BenchmarkFileTypeRegistry_ConcurrentAccess(b *testing.B) {
filename := "test.go"
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
_ = IsImage(filename)
_ = IsBinary(filename)
_ = Language(filename)
_ = GetLanguage(filename)
}
})
}

View File

@@ -2,254 +2,136 @@ package fileproc
import (
"testing"
"github.com/ivuorinen/gibidify/shared"
)
// TestFileTypeRegistryAddImageExtension tests adding image extensions.
func TestFileTypeRegistryAddImageExtension(t *testing.T) {
registry := createModificationTestRegistry()
testImageExtensionModifications(t, registry)
}
// TestFileTypeRegistryAddBinaryExtension tests adding binary extensions.
func TestFileTypeRegistryAddBinaryExtension(t *testing.T) {
registry := createModificationTestRegistry()
testBinaryExtensionModifications(t, registry)
}
// TestFileTypeRegistryAddLanguageMapping tests adding language mappings.
func TestFileTypeRegistryAddLanguageMapping(t *testing.T) {
registry := createModificationTestRegistry()
testLanguageMappingModifications(t, registry)
}
// createModificationTestRegistry creates a registry for modification tests.
func createModificationTestRegistry() *FileTypeRegistry {
return &FileTypeRegistry{
// TestFileTypeRegistry_ModificationMethods tests the modification methods of FileTypeRegistry.
func TestFileTypeRegistry_ModificationMethods(t *testing.T) {
// Create a new registry instance for testing
registry := &FileTypeRegistry{
imageExts: make(map[string]bool),
binaryExts: make(map[string]bool),
languageMap: make(map[string]string),
extCache: make(map[string]string, shared.FileTypeRegistryMaxCacheSize),
resultCache: make(map[string]FileTypeResult, shared.FileTypeRegistryMaxCacheSize),
maxCacheSize: shared.FileTypeRegistryMaxCacheSize,
}
}
// testImageExtensionModifications tests image extension modifications.
func testImageExtensionModifications(t *testing.T, registry *FileTypeRegistry) {
t.Helper()
// Test AddImageExtension
t.Run("AddImageExtension", func(t *testing.T) {
// Add a new image extension
registry.AddImageExtension(".webp")
verifyImageExtension(t, registry, ".webp", shared.TestFileWebP, true)
if !registry.IsImage("test.webp") {
t.Errorf("Expected .webp to be recognized as image after adding")
}
// Test case-insensitive addition
// Test case insensitive addition
registry.AddImageExtension(".AVIF")
verifyImageExtension(t, registry, ".AVIF", "test.avif", true)
verifyImageExtension(t, registry, ".AVIF", "test.AVIF", true)
if !registry.IsImage("test.avif") {
t.Errorf("Expected .avif to be recognized as image after adding .AVIF")
}
if !registry.IsImage("test.AVIF") {
t.Errorf("Expected .AVIF to be recognized as image")
}
// Test with dot prefix
registry.AddImageExtension("heic")
verifyImageExtension(t, registry, "heic", "test.heic", false)
if registry.IsImage("test.heic") {
t.Errorf("Expected extension without dot to not work")
}
// Test with proper dot prefix
registry.AddImageExtension(".heic")
verifyImageExtension(t, registry, ".heic", "test.heic", true)
}
// testBinaryExtensionModifications tests binary extension modifications.
func testBinaryExtensionModifications(t *testing.T, registry *FileTypeRegistry) {
t.Helper()
if !registry.IsImage("test.heic") {
t.Errorf("Expected .heic to be recognized as image")
}
})
// Test AddBinaryExtension
t.Run("AddBinaryExtension", func(t *testing.T) {
// Add a new binary extension
registry.AddBinaryExtension(".custom")
verifyBinaryExtension(t, registry, ".custom", "file.custom", true)
if !registry.IsBinary("file.custom") {
t.Errorf("Expected .custom to be recognized as binary after adding")
}
// Test case-insensitive addition
registry.AddBinaryExtension(shared.TestExtensionSpecial)
verifyBinaryExtension(t, registry, shared.TestExtensionSpecial, "file.special", true)
verifyBinaryExtension(t, registry, shared.TestExtensionSpecial, "file.SPECIAL", true)
// Test case insensitive addition
registry.AddBinaryExtension(".SPECIAL")
if !registry.IsBinary("file.special") {
t.Errorf("Expected .special to be recognized as binary after adding .SPECIAL")
}
if !registry.IsBinary("file.SPECIAL") {
t.Errorf("Expected .SPECIAL to be recognized as binary")
}
// Test with dot prefix
registry.AddBinaryExtension("bin")
verifyBinaryExtension(t, registry, "bin", "file.bin", false)
if registry.IsBinary("file.bin") {
t.Errorf("Expected extension without dot to not work")
}
// Test with proper dot prefix
registry.AddBinaryExtension(".bin")
verifyBinaryExtension(t, registry, ".bin", "file.bin", true)
}
// testLanguageMappingModifications tests language mapping modifications.
func testLanguageMappingModifications(t *testing.T, registry *FileTypeRegistry) {
t.Helper()
if !registry.IsBinary("file.bin") {
t.Errorf("Expected .bin to be recognized as binary")
}
})
// Test AddLanguageMapping
t.Run("AddLanguageMapping", func(t *testing.T) {
// Add a new language mapping
registry.AddLanguageMapping(".xyz", "CustomLang")
verifyLanguageMapping(t, registry, "file.xyz", "CustomLang")
if lang := registry.GetLanguage("file.xyz"); lang != "CustomLang" {
t.Errorf("Expected CustomLang, got %s", lang)
}
// Test case-insensitive addition
// Test case insensitive addition
registry.AddLanguageMapping(".ABC", "UpperLang")
verifyLanguageMapping(t, registry, "file.abc", "UpperLang")
verifyLanguageMapping(t, registry, "file.ABC", "UpperLang")
if lang := registry.GetLanguage("file.abc"); lang != "UpperLang" {
t.Errorf("Expected UpperLang, got %s", lang)
}
if lang := registry.GetLanguage("file.ABC"); lang != "UpperLang" {
t.Errorf("Expected UpperLang for uppercase, got %s", lang)
}
// Test with dot prefix (should not work)
// Test with dot prefix
registry.AddLanguageMapping("nolang", "NoLang")
verifyLanguageMappingAbsent(t, registry, "nolang", "file.nolang")
if lang := registry.GetLanguage("file.nolang"); lang == "NoLang" {
t.Errorf("Expected extension without dot to not work")
}
// Test with proper dot prefix
registry.AddLanguageMapping(".nolang", "NoLang")
verifyLanguageMapping(t, registry, "file.nolang", "NoLang")
if lang := registry.GetLanguage("file.nolang"); lang != "NoLang" {
t.Errorf("Expected NoLang, got %s", lang)
}
// Test overriding existing mapping
registry.AddLanguageMapping(".xyz", "NewCustomLang")
verifyLanguageMapping(t, registry, "file.xyz", "NewCustomLang")
if lang := registry.GetLanguage("file.xyz"); lang != "NewCustomLang" {
t.Errorf("Expected NewCustomLang after override, got %s", lang)
}
})
}
// verifyImageExtension verifies image extension behavior.
func verifyImageExtension(t *testing.T, registry *FileTypeRegistry, ext, filename string, expected bool) {
t.Helper()
if registry.IsImage(filename) != expected {
if expected {
t.Errorf("Expected %s to be recognized as image after adding %s", filename, ext)
} else {
t.Errorf(shared.TestMsgExpectedExtensionWithoutDot)
}
}
}
// verifyBinaryExtension verifies binary extension behavior.
func verifyBinaryExtension(t *testing.T, registry *FileTypeRegistry, ext, filename string, expected bool) {
t.Helper()
if registry.IsBinary(filename) != expected {
if expected {
t.Errorf("Expected %s to be recognized as binary after adding %s", filename, ext)
} else {
t.Errorf(shared.TestMsgExpectedExtensionWithoutDot)
}
}
}
// verifyLanguageMapping verifies language mapping behavior.
func verifyLanguageMapping(t *testing.T, registry *FileTypeRegistry, filename, expectedLang string) {
t.Helper()
lang := registry.Language(filename)
if lang != expectedLang {
t.Errorf("Expected %s, got %s for %s", expectedLang, lang, filename)
}
}
// verifyLanguageMappingAbsent verifies that a language mapping is absent.
func verifyLanguageMappingAbsent(t *testing.T, registry *FileTypeRegistry, _ string, filename string) {
t.Helper()
lang := registry.Language(filename)
if lang != "" {
t.Errorf(shared.TestMsgExpectedExtensionWithoutDot+", but got %s", lang)
}
}
// TestFileTypeRegistryDefaultRegistryConsistency tests default registry behavior.
func TestFileTypeRegistryDefaultRegistryConsistency(t *testing.T) {
registry := DefaultRegistry()
// TestFileTypeRegistry_DefaultRegistryConsistency tests default registry behavior.
func TestFileTypeRegistry_DefaultRegistryConsistency(t *testing.T) {
registry := GetDefaultRegistry()
// Test that registry methods work consistently
if !registry.IsImage(shared.TestFilePNG) {
if !registry.IsImage("test.png") {
t.Error("Expected .png to be recognized as image")
}
if !registry.IsBinary(shared.TestFileEXE) {
if !registry.IsBinary("test.exe") {
t.Error("Expected .exe to be recognized as binary")
}
if lang := registry.Language(shared.TestFileGo); lang != "go" {
if lang := registry.GetLanguage("test.go"); lang != "go" {
t.Errorf("Expected go, got %s", lang)
}
// Test that multiple calls return consistent results
for i := 0; i < 5; i++ {
if !registry.IsImage(shared.TestFileJPG) {
if !registry.IsImage("test.jpg") {
t.Errorf("Iteration %d: Expected .jpg to be recognized as image", i)
}
if registry.IsBinary(shared.TestFileTXT) {
if registry.IsBinary("test.txt") {
t.Errorf("Iteration %d: Expected .txt to not be recognized as binary", i)
}
}
}
// TestFileTypeRegistryGetStats tests the GetStats method.
func TestFileTypeRegistryGetStats(t *testing.T) {
// Ensure clean, isolated state
ResetRegistryForTesting()
t.Cleanup(ResetRegistryForTesting)
registry := DefaultRegistry()
// Call some methods to populate cache and update stats
registry.IsImage(shared.TestFilePNG)
registry.IsBinary(shared.TestFileEXE)
registry.Language(shared.TestFileGo)
// Repeat to generate cache hits
registry.IsImage(shared.TestFilePNG)
registry.IsBinary(shared.TestFileEXE)
registry.Language(shared.TestFileGo)
// Get stats
stats := registry.Stats()
// Verify stats structure - all values are uint64 and therefore non-negative by definition
// We can verify they exist and are properly initialized
// Test that stats include our calls
if stats.TotalLookups < 6 { // We made at least 6 calls above
t.Errorf("Expected at least 6 total lookups, got %d", stats.TotalLookups)
}
// Total lookups should equal hits + misses
if stats.TotalLookups != stats.CacheHits+stats.CacheMisses {
t.Errorf("Total lookups (%d) should equal hits (%d) + misses (%d)",
stats.TotalLookups, stats.CacheHits, stats.CacheMisses)
}
// With repeated lookups we should see some cache hits
if stats.CacheHits == 0 {
t.Error("Expected some cache hits after repeated lookups")
}
}
// TestFileTypeRegistryGetCacheInfo tests the GetCacheInfo method.
func TestFileTypeRegistryGetCacheInfo(t *testing.T) {
// Ensure clean, isolated state
ResetRegistryForTesting()
t.Cleanup(ResetRegistryForTesting)
registry := DefaultRegistry()
// Call some methods to populate cache
registry.IsImage("test1.png")
registry.IsBinary("test2.exe")
registry.Language("test3.go")
registry.IsImage("test4.jpg")
registry.IsBinary("test5.dll")
// Get cache info
extCacheSize, resultCacheSize, maxCacheSize := registry.CacheInfo()
// Verify cache info
if extCacheSize < 0 {
t.Error("Expected non-negative extension cache size")
}
if resultCacheSize < 0 {
t.Error("Expected non-negative result cache size")
}
if maxCacheSize <= 0 {
t.Error("Expected positive max cache size")
}
// We should have some cache entries from our calls
totalCacheSize := extCacheSize + resultCacheSize
if totalCacheSize == 0 {
t.Error("Expected some cache entries after multiple calls")
}
}

View File

@@ -1,4 +1,3 @@
// Package fileproc handles file processing, collection, and output formatting.
package fileproc
// FileData represents a single file's path and content.
@@ -24,7 +23,6 @@ type FormatWriter interface {
// detectLanguage tries to infer the code block language from the file extension.
func detectLanguage(filePath string) string {
registry := DefaultRegistry()
return registry.Language(filePath)
registry := GetDefaultRegistry()
return registry.GetLanguage(filePath)
}

View File

@@ -1,4 +1,3 @@
// Package fileproc handles file processing, collection, and output formatting.
package fileproc
import (
@@ -35,7 +34,6 @@ func loadIgnoreRules(currentDir string, parentRules []ignoreRule) []ignoreRule {
func tryLoadIgnoreFile(dir, fileName string) *ignoreRule {
ignorePath := filepath.Join(dir, fileName)
if info, err := os.Stat(ignorePath); err == nil && !info.IsDir() {
//nolint:errcheck // Regex compile error handled by validation, safe to ignore here
if gi, err := ignore.CompileIgnoreFile(ignorePath); err == nil {
return &ignoreRule{
base: dir,
@@ -43,7 +41,6 @@ func tryLoadIgnoreFile(dir, fileName string) *ignoreRule {
}
}
}
return nil
}
@@ -54,7 +51,6 @@ func matchesIgnoreRules(fullPath string, rules []ignoreRule) bool {
return true
}
}
return false
}

View File

@@ -1,4 +1,3 @@
// Package fileproc handles file processing, collection, and output formatting.
package fileproc
import (
@@ -7,7 +6,7 @@ import (
"io"
"os"
"github.com/ivuorinen/gibidify/shared"
"github.com/ivuorinen/gibidify/utils"
)
// JSONWriter handles JSON format output with streaming support.
@@ -28,27 +27,27 @@ func NewJSONWriter(outFile *os.File) *JSONWriter {
func (w *JSONWriter) Start(prefix, suffix string) error {
// Start JSON structure
if _, err := w.outFile.WriteString(`{"prefix":"`); err != nil {
return shared.WrapError(err, shared.ErrorTypeIO, shared.CodeIOWrite, "failed to write JSON start")
return utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIOWrite, "failed to write JSON start")
}
// Write escaped prefix
escapedPrefix := shared.EscapeForJSON(prefix)
if err := shared.WriteWithErrorWrap(w.outFile, escapedPrefix, "failed to write JSON prefix", ""); err != nil {
return fmt.Errorf("writing JSON prefix: %w", err)
escapedPrefix := utils.EscapeForJSON(prefix)
if err := utils.WriteWithErrorWrap(w.outFile, escapedPrefix, "failed to write JSON prefix", ""); err != nil {
return err
}
if _, err := w.outFile.WriteString(`","suffix":"`); err != nil {
return shared.WrapError(err, shared.ErrorTypeIO, shared.CodeIOWrite, "failed to write JSON middle")
return utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIOWrite, "failed to write JSON middle")
}
// Write escaped suffix
escapedSuffix := shared.EscapeForJSON(suffix)
if err := shared.WriteWithErrorWrap(w.outFile, escapedSuffix, "failed to write JSON suffix", ""); err != nil {
return fmt.Errorf("writing JSON suffix: %w", err)
escapedSuffix := utils.EscapeForJSON(suffix)
if err := utils.WriteWithErrorWrap(w.outFile, escapedSuffix, "failed to write JSON suffix", ""); err != nil {
return err
}
if _, err := w.outFile.WriteString(`","files":[`); err != nil {
return shared.WrapError(err, shared.ErrorTypeIO, shared.CodeIOWrite, "failed to write JSON files start")
return utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIOWrite, "failed to write JSON files start")
}
return nil
@@ -58,7 +57,7 @@ func (w *JSONWriter) Start(prefix, suffix string) error {
func (w *JSONWriter) WriteFile(req WriteRequest) error {
if !w.firstFile {
if _, err := w.outFile.WriteString(","); err != nil {
return shared.WrapError(err, shared.ErrorTypeIO, shared.CodeIOWrite, "failed to write JSON separator")
return utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIOWrite, "failed to write JSON separator")
}
}
w.firstFile = false
@@ -66,7 +65,6 @@ func (w *JSONWriter) WriteFile(req WriteRequest) error {
if req.IsStream {
return w.writeStreaming(req)
}
return w.writeInline(req)
}
@@ -74,27 +72,21 @@ func (w *JSONWriter) WriteFile(req WriteRequest) error {
func (w *JSONWriter) Close() error {
// Close JSON structure
if _, err := w.outFile.WriteString("]}"); err != nil {
return shared.WrapError(err, shared.ErrorTypeIO, shared.CodeIOWrite, "failed to write JSON end")
return utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIOWrite, "failed to write JSON end")
}
return nil
}
// writeStreaming writes a large file as JSON in streaming chunks.
func (w *JSONWriter) writeStreaming(req WriteRequest) error {
defer shared.SafeCloseReader(req.Reader, req.Path)
defer utils.SafeCloseReader(req.Reader, req.Path)
language := detectLanguage(req.Path)
// Write file start
escapedPath := shared.EscapeForJSON(req.Path)
escapedPath := utils.EscapeForJSON(req.Path)
if _, err := fmt.Fprintf(w.outFile, `{"path":"%s","language":"%s","content":"`, escapedPath, language); err != nil {
return shared.WrapError(
err,
shared.ErrorTypeIO,
shared.CodeIOWrite,
"failed to write JSON file start",
).WithFilePath(req.Path)
return utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIOWrite, "failed to write JSON file start").WithFilePath(req.Path)
}
// Stream content with JSON escaping
@@ -104,12 +96,7 @@ func (w *JSONWriter) writeStreaming(req WriteRequest) error {
// Write file end
if _, err := w.outFile.WriteString(`"}`); err != nil {
return shared.WrapError(
err,
shared.ErrorTypeIO,
shared.CodeIOWrite,
"failed to write JSON file end",
).WithFilePath(req.Path)
return utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIOWrite, "failed to write JSON file end").WithFilePath(req.Path)
}
return nil
@@ -126,44 +113,46 @@ func (w *JSONWriter) writeInline(req WriteRequest) error {
encoded, err := json.Marshal(fileData)
if err != nil {
return shared.WrapError(
err,
shared.ErrorTypeProcessing,
shared.CodeProcessingEncode,
"failed to marshal JSON",
).WithFilePath(req.Path)
return utils.WrapError(err, utils.ErrorTypeProcessing, utils.CodeProcessingEncode, "failed to marshal JSON").WithFilePath(req.Path)
}
if _, err := w.outFile.Write(encoded); err != nil {
return shared.WrapError(
err,
shared.ErrorTypeIO,
shared.CodeIOWrite,
"failed to write JSON file",
).WithFilePath(req.Path)
return utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIOWrite, "failed to write JSON file").WithFilePath(req.Path)
}
return nil
}
// streamJSONContent streams content with JSON escaping.
func (w *JSONWriter) streamJSONContent(reader io.Reader, path string) error {
if err := shared.StreamContent(
reader, w.outFile, shared.FileProcessingStreamChunkSize, path, func(chunk []byte) []byte {
escaped := shared.EscapeForJSON(string(chunk))
return utils.StreamContent(reader, w.outFile, StreamChunkSize, path, func(chunk []byte) []byte {
escaped := utils.EscapeForJSON(string(chunk))
return []byte(escaped)
},
); err != nil {
return fmt.Errorf("streaming JSON content: %w", err)
}
return nil
})
}
// startJSONWriter handles JSON format output with streaming support.
func startJSONWriter(outFile *os.File, writeCh <-chan WriteRequest, done chan<- struct{}, prefix, suffix string) {
startFormatWriter(outFile, writeCh, done, prefix, suffix, func(f *os.File) FormatWriter {
return NewJSONWriter(f)
})
defer close(done)
writer := NewJSONWriter(outFile)
// Start writing
if err := writer.Start(prefix, suffix); err != nil {
utils.LogError("Failed to write JSON start", err)
return
}
// Process files
for req := range writeCh {
if err := writer.WriteFile(req); err != nil {
utils.LogError("Failed to write JSON file", err)
}
}
// Close writer
if err := writer.Close(); err != nil {
utils.LogError("Failed to write JSON end", err)
}
}

View File

@@ -1,17 +1,16 @@
// Package fileproc handles file processing, collection, and output formatting.
package fileproc
import (
"fmt"
"io"
"os"
"github.com/ivuorinen/gibidify/shared"
"github.com/ivuorinen/gibidify/utils"
)
// MarkdownWriter handles Markdown format output with streaming support.
// MarkdownWriter handles markdown format output with streaming support.
type MarkdownWriter struct {
outFile *os.File
suffix string
}
// NewMarkdownWriter creates a new markdown writer.
@@ -19,70 +18,53 @@ func NewMarkdownWriter(outFile *os.File) *MarkdownWriter {
return &MarkdownWriter{outFile: outFile}
}
// Start writes the markdown header and stores the suffix for later use.
// Start writes the markdown header.
func (w *MarkdownWriter) Start(prefix, suffix string) error {
// Store suffix for use in Close method
w.suffix = suffix
if prefix != "" {
if _, err := fmt.Fprintf(w.outFile, "# %s\n\n", prefix); err != nil {
return shared.WrapError(err, shared.ErrorTypeIO, shared.CodeIOWrite, "failed to write prefix")
return utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIOWrite, "failed to write prefix")
}
}
return nil
}
// WriteFile writes a file entry in Markdown format.
// WriteFile writes a file entry in markdown format.
func (w *MarkdownWriter) WriteFile(req WriteRequest) error {
if req.IsStream {
return w.writeStreaming(req)
}
return w.writeInline(req)
}
// Close writes the markdown footer using the suffix stored in Start.
func (w *MarkdownWriter) Close() error {
if w.suffix != "" {
if _, err := fmt.Fprintf(w.outFile, "\n# %s\n", w.suffix); err != nil {
return shared.WrapError(err, shared.ErrorTypeIO, shared.CodeIOWrite, "failed to write suffix")
// Close writes the markdown footer.
func (w *MarkdownWriter) Close(suffix string) error {
if suffix != "" {
if _, err := fmt.Fprintf(w.outFile, "\n# %s\n", suffix); err != nil {
return utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIOWrite, "failed to write suffix")
}
}
return nil
}
// writeStreaming writes a large file in streaming chunks.
func (w *MarkdownWriter) writeStreaming(req WriteRequest) error {
defer shared.SafeCloseReader(req.Reader, req.Path)
defer w.closeReader(req.Reader, req.Path)
language := detectLanguage(req.Path)
// Write file header
if _, err := fmt.Fprintf(w.outFile, "## File: `%s`\n```%s\n", req.Path, language); err != nil {
return shared.WrapError(
err,
shared.ErrorTypeIO,
shared.CodeIOWrite,
"failed to write file header",
).WithFilePath(req.Path)
return utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIOWrite, "failed to write file header").WithFilePath(req.Path)
}
// Stream file content in chunks
chunkSize := shared.FileProcessingStreamChunkSize
if err := shared.StreamContent(req.Reader, w.outFile, chunkSize, req.Path, nil); err != nil {
return shared.WrapError(err, shared.ErrorTypeIO, shared.CodeIOWrite, "streaming content for markdown file")
if err := w.streamContent(req.Reader, req.Path); err != nil {
return err
}
// Write file footer
if _, err := w.outFile.WriteString("\n```\n\n"); err != nil {
return shared.WrapError(
err,
shared.ErrorTypeIO,
shared.CodeIOWrite,
"failed to write file footer",
).WithFilePath(req.Path)
return utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIOWrite, "failed to write file footer").WithFilePath(req.Path)
}
return nil
@@ -94,20 +76,64 @@ func (w *MarkdownWriter) writeInline(req WriteRequest) error {
formatted := fmt.Sprintf("## File: `%s`\n```%s\n%s\n```\n\n", req.Path, language, req.Content)
if _, err := w.outFile.WriteString(formatted); err != nil {
return shared.WrapError(
err,
shared.ErrorTypeIO,
shared.CodeIOWrite,
"failed to write inline content",
).WithFilePath(req.Path)
return utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIOWrite, "failed to write inline content").WithFilePath(req.Path)
}
return nil
}
// startMarkdownWriter handles Markdown format output with streaming support.
func startMarkdownWriter(outFile *os.File, writeCh <-chan WriteRequest, done chan<- struct{}, prefix, suffix string) {
startFormatWriter(outFile, writeCh, done, prefix, suffix, func(f *os.File) FormatWriter {
return NewMarkdownWriter(f)
})
// streamContent streams file content in chunks.
func (w *MarkdownWriter) streamContent(reader io.Reader, path string) error {
buf := make([]byte, StreamChunkSize)
for {
n, err := reader.Read(buf)
if n > 0 {
if _, writeErr := w.outFile.Write(buf[:n]); writeErr != nil {
return utils.WrapError(writeErr, utils.ErrorTypeIO, utils.CodeIOWrite, "failed to write chunk").WithFilePath(path)
}
}
if err == io.EOF {
break
}
if err != nil {
return utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIORead, "failed to read chunk").WithFilePath(path)
}
}
return nil
}
// closeReader safely closes a reader if it implements io.Closer.
func (w *MarkdownWriter) closeReader(reader io.Reader, path string) {
if closer, ok := reader.(io.Closer); ok {
if err := closer.Close(); err != nil {
utils.LogError(
"Failed to close file reader",
utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIOClose, "failed to close file reader").WithFilePath(path),
)
}
}
}
// startMarkdownWriter handles markdown format output with streaming support.
func startMarkdownWriter(outFile *os.File, writeCh <-chan WriteRequest, done chan<- struct{}, prefix, suffix string) {
defer close(done)
writer := NewMarkdownWriter(outFile)
// Start writing
if err := writer.Start(prefix, suffix); err != nil {
utils.LogError("Failed to write markdown prefix", err)
return
}
// Process files
for req := range writeCh {
if err := writer.WriteFile(req); err != nil {
utils.LogError("Failed to write markdown file", err)
}
}
// Close writer
if err := writer.Close(suffix); err != nil {
utils.LogError("Failed to write markdown suffix", err)
}
}

View File

@@ -3,17 +3,26 @@ package fileproc
import (
"context"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/sirupsen/logrus"
"github.com/ivuorinen/gibidify/config"
"github.com/ivuorinen/gibidify/shared"
"github.com/ivuorinen/gibidify/utils"
)
const (
// StreamChunkSize is the size of chunks when streaming large files (64KB).
StreamChunkSize = 65536
// StreamThreshold is the file size above which we use streaming (1MB).
StreamThreshold = 1048576
// MaxMemoryBuffer is the maximum memory to use for buffering content (10MB).
MaxMemoryBuffer = 10485760
)
// WriteRequest represents the content to be written.
@@ -22,7 +31,6 @@ type WriteRequest struct {
Content string
IsStream bool
Reader io.Reader
Size int64 // File size for streaming files
}
// FileProcessor handles file processing operations.
@@ -36,7 +44,7 @@ type FileProcessor struct {
func NewFileProcessor(rootPath string) *FileProcessor {
return &FileProcessor{
rootPath: rootPath,
sizeLimit: config.FileSizeLimit(),
sizeLimit: config.GetFileSizeLimit(),
resourceMonitor: NewResourceMonitor(),
}
}
@@ -45,7 +53,7 @@ func NewFileProcessor(rootPath string) *FileProcessor {
func NewFileProcessorWithMonitor(rootPath string, monitor *ResourceMonitor) *FileProcessor {
return &FileProcessor{
rootPath: rootPath,
sizeLimit: config.FileSizeLimit(),
sizeLimit: config.GetFileSizeLimit(),
resourceMonitor: monitor,
}
}
@@ -55,89 +63,60 @@ func NewFileProcessorWithMonitor(rootPath string, monitor *ResourceMonitor) *Fil
func ProcessFile(filePath string, outCh chan<- WriteRequest, rootPath string) {
processor := NewFileProcessor(rootPath)
ctx := context.Background()
if err := processor.ProcessWithContext(ctx, filePath, outCh); err != nil {
shared.LogErrorf(err, shared.FileProcessingMsgFailedToProcess, filePath)
}
processor.ProcessWithContext(ctx, filePath, outCh)
}
// ProcessFileWithMonitor processes a file using a shared resource monitor.
func ProcessFileWithMonitor(
ctx context.Context,
filePath string,
outCh chan<- WriteRequest,
rootPath string,
monitor *ResourceMonitor,
) error {
if monitor == nil {
monitor = NewResourceMonitor()
}
func ProcessFileWithMonitor(ctx context.Context, filePath string, outCh chan<- WriteRequest, rootPath string, monitor *ResourceMonitor) {
processor := NewFileProcessorWithMonitor(rootPath, monitor)
return processor.ProcessWithContext(ctx, filePath, outCh)
processor.ProcessWithContext(ctx, filePath, outCh)
}
// Process handles file processing with the configured settings.
func (p *FileProcessor) Process(filePath string, outCh chan<- WriteRequest) {
ctx := context.Background()
if err := p.ProcessWithContext(ctx, filePath, outCh); err != nil {
shared.LogErrorf(err, shared.FileProcessingMsgFailedToProcess, filePath)
}
p.ProcessWithContext(ctx, filePath, outCh)
}
// ProcessWithContext handles file processing with context and resource monitoring.
func (p *FileProcessor) ProcessWithContext(ctx context.Context, filePath string, outCh chan<- WriteRequest) error {
func (p *FileProcessor) ProcessWithContext(ctx context.Context, filePath string, outCh chan<- WriteRequest) {
// Create file processing context with timeout
fileCtx, fileCancel := p.resourceMonitor.CreateFileProcessingContext(ctx)
defer fileCancel()
// Wait for rate limiting
if err := p.resourceMonitor.WaitForRateLimit(fileCtx); err != nil {
if errors.Is(err, context.DeadlineExceeded) {
structErr := shared.NewStructuredError(
shared.ErrorTypeValidation,
shared.CodeResourceLimitTimeout,
"file processing timeout during rate limiting",
filePath,
nil,
if err == context.DeadlineExceeded {
utils.LogErrorf(
utils.NewStructuredError(utils.ErrorTypeValidation, utils.CodeResourceLimitTimeout, "file processing timeout during rate limiting", filePath, nil),
"File processing timeout during rate limiting: %s", filePath,
)
shared.LogErrorf(structErr, "File processing timeout during rate limiting: %s", filePath)
return structErr
}
return err
return
}
// Validate file and check resource limits
fileInfo, err := p.validateFileWithLimits(fileCtx, filePath)
if err != nil {
return err // Error already logged
return // Error already logged
}
// Acquire read slot for concurrent processing
if err := p.resourceMonitor.AcquireReadSlot(fileCtx); err != nil {
if errors.Is(err, context.DeadlineExceeded) {
structErr := shared.NewStructuredError(
shared.ErrorTypeValidation,
shared.CodeResourceLimitTimeout,
"file processing timeout waiting for read slot",
filePath,
nil,
if err == context.DeadlineExceeded {
utils.LogErrorf(
utils.NewStructuredError(utils.ErrorTypeValidation, utils.CodeResourceLimitTimeout, "file processing timeout waiting for read slot", filePath, nil),
"File processing timeout waiting for read slot: %s", filePath,
)
shared.LogErrorf(structErr, "File processing timeout waiting for read slot: %s", filePath)
return structErr
}
return err
return
}
defer p.resourceMonitor.ReleaseReadSlot()
// Check hard memory limits before processing
if err := p.resourceMonitor.CheckHardMemoryLimit(); err != nil {
shared.LogErrorf(err, "Hard memory limit check failed for file: %s", filePath)
return err
utils.LogErrorf(err, "Hard memory limit check failed for file: %s", filePath)
return
}
// Get relative path
@@ -145,69 +124,59 @@ func (p *FileProcessor) ProcessWithContext(ctx context.Context, filePath string,
// Process file with timeout
processStart := time.Now()
defer func() {
// Record successful processing
p.resourceMonitor.RecordFileProcessed(fileInfo.Size())
logrus.Debugf("File processed in %v: %s", time.Since(processStart), filePath)
}()
// Choose processing strategy based on file size
if fileInfo.Size() <= shared.FileProcessingStreamThreshold {
err = p.processInMemoryWithContext(fileCtx, filePath, relPath, outCh)
if fileInfo.Size() <= StreamThreshold {
p.processInMemoryWithContext(fileCtx, filePath, relPath, outCh)
} else {
err = p.processStreamingWithContext(fileCtx, filePath, relPath, outCh, fileInfo.Size())
p.processStreamingWithContext(fileCtx, filePath, relPath, outCh)
}
// Only record success if processing completed without error
if err != nil {
return err
}
// Record successful processing only on success path
p.resourceMonitor.RecordFileProcessed(fileInfo.Size())
logger := shared.GetLogger()
logger.Debugf("File processed in %v: %s", time.Since(processStart), filePath)
return nil
}
// validateFileWithLimits checks if the file can be processed with resource limits.
func (p *FileProcessor) validateFileWithLimits(ctx context.Context, filePath string) (os.FileInfo, error) {
// Check context cancellation
if err := shared.CheckContextCancellation(ctx, "file validation"); err != nil {
return nil, fmt.Errorf("context check during file validation: %w", err)
select {
case <-ctx.Done():
return nil, ctx.Err()
default:
}
fileInfo, err := os.Stat(filePath)
if err != nil {
structErr := shared.WrapError(
err,
shared.ErrorTypeFileSystem,
shared.CodeFSAccess,
"failed to stat file",
).WithFilePath(filePath)
shared.LogErrorf(structErr, "Failed to stat file %s", filePath)
return nil, structErr
structErr := utils.WrapError(err, utils.ErrorTypeFileSystem, utils.CodeFSAccess, "failed to stat file").WithFilePath(filePath)
utils.LogErrorf(structErr, "Failed to stat file %s", filePath)
return nil, err
}
// Check traditional size limit
if fileInfo.Size() > p.sizeLimit {
c := map[string]any{
context := map[string]interface{}{
"file_size": fileInfo.Size(),
"size_limit": p.sizeLimit,
}
structErr := shared.NewStructuredError(
shared.ErrorTypeValidation,
shared.CodeValidationSize,
fmt.Sprintf(shared.FileProcessingMsgSizeExceeds, fileInfo.Size(), p.sizeLimit),
utils.LogErrorf(
utils.NewStructuredError(
utils.ErrorTypeValidation,
utils.CodeValidationSize,
fmt.Sprintf("file size (%d bytes) exceeds limit (%d bytes)", fileInfo.Size(), p.sizeLimit),
filePath,
c,
context,
),
"Skipping large file %s", filePath,
)
shared.LogErrorf(structErr, "Skipping large file %s", filePath)
return nil, structErr
return nil, fmt.Errorf("file too large")
}
// Check resource limits
if err := p.resourceMonitor.ValidateFileProcessing(filePath, fileInfo.Size()); err != nil {
shared.LogErrorf(err, "Resource limit validation failed for file: %s", filePath)
utils.LogErrorf(err, "Resource limit validation failed for file: %s", filePath)
return nil, err
}
@@ -220,149 +189,96 @@ func (p *FileProcessor) getRelativePath(filePath string) string {
if err != nil {
return filePath // Fallback
}
return relPath
}
// processInMemoryWithContext loads the entire file into memory with context awareness.
func (p *FileProcessor) processInMemoryWithContext(
ctx context.Context,
filePath, relPath string,
outCh chan<- WriteRequest,
) error {
func (p *FileProcessor) processInMemoryWithContext(ctx context.Context, filePath, relPath string, outCh chan<- WriteRequest) {
// Check context before reading
select {
case <-ctx.Done():
structErr := shared.NewStructuredError(
shared.ErrorTypeValidation,
shared.CodeResourceLimitTimeout,
"file processing canceled",
filePath,
nil,
utils.LogErrorf(
utils.NewStructuredError(utils.ErrorTypeValidation, utils.CodeResourceLimitTimeout, "file processing cancelled", filePath, nil),
"File processing cancelled: %s", filePath,
)
shared.LogErrorf(structErr, "File processing canceled: %s", filePath)
return structErr
return
default:
}
content, err := os.ReadFile(filePath) // #nosec G304 - filePath is validated by walker
if err != nil {
structErr := shared.WrapError(
err,
shared.ErrorTypeProcessing,
shared.CodeProcessingFileRead,
"failed to read file",
).WithFilePath(filePath)
shared.LogErrorf(structErr, "Failed to read file %s", filePath)
return structErr
structErr := utils.WrapError(err, utils.ErrorTypeProcessing, utils.CodeProcessingFileRead, "failed to read file").WithFilePath(filePath)
utils.LogErrorf(structErr, "Failed to read file %s", filePath)
return
}
// Check context again after reading
select {
case <-ctx.Done():
structErr := shared.NewStructuredError(
shared.ErrorTypeValidation,
shared.CodeResourceLimitTimeout,
"file processing canceled after read",
filePath,
nil,
utils.LogErrorf(
utils.NewStructuredError(utils.ErrorTypeValidation, utils.CodeResourceLimitTimeout, "file processing cancelled after read", filePath, nil),
"File processing cancelled after read: %s", filePath,
)
shared.LogErrorf(structErr, "File processing canceled after read: %s", filePath)
return structErr
return
default:
}
// Try to send the result, but respect context cancellation
select {
case <-ctx.Done():
structErr := shared.NewStructuredError(
shared.ErrorTypeValidation,
shared.CodeResourceLimitTimeout,
"file processing canceled before output",
filePath,
nil,
utils.LogErrorf(
utils.NewStructuredError(utils.ErrorTypeValidation, utils.CodeResourceLimitTimeout, "file processing cancelled before output", filePath, nil),
"File processing cancelled before output: %s", filePath,
)
shared.LogErrorf(structErr, "File processing canceled before output: %s", filePath)
return structErr
return
case outCh <- WriteRequest{
Path: relPath,
Content: p.formatContent(relPath, string(content)),
IsStream: false,
Size: int64(len(content)),
}:
}
return nil
}
// processStreamingWithContext creates a streaming reader for large files with context awareness.
func (p *FileProcessor) processStreamingWithContext(
ctx context.Context,
filePath, relPath string,
outCh chan<- WriteRequest,
size int64,
) error {
func (p *FileProcessor) processStreamingWithContext(ctx context.Context, filePath, relPath string, outCh chan<- WriteRequest) {
// Check context before creating reader
select {
case <-ctx.Done():
structErr := shared.NewStructuredError(
shared.ErrorTypeValidation,
shared.CodeResourceLimitTimeout,
"streaming processing canceled",
filePath,
nil,
utils.LogErrorf(
utils.NewStructuredError(utils.ErrorTypeValidation, utils.CodeResourceLimitTimeout, "streaming processing cancelled", filePath, nil),
"Streaming processing cancelled: %s", filePath,
)
shared.LogErrorf(structErr, "Streaming processing canceled: %s", filePath)
return structErr
return
default:
}
reader := p.createStreamReaderWithContext(ctx, filePath, relPath)
if reader == nil {
// Error already logged, create and return error
return shared.NewStructuredError(
shared.ErrorTypeProcessing,
shared.CodeProcessingFileRead,
"failed to create stream reader",
filePath,
nil,
)
return // Error already logged
}
// Try to send the result, but respect context cancellation
select {
case <-ctx.Done():
structErr := shared.NewStructuredError(
shared.ErrorTypeValidation,
shared.CodeResourceLimitTimeout,
"streaming processing canceled before output",
filePath,
nil,
utils.LogErrorf(
utils.NewStructuredError(utils.ErrorTypeValidation, utils.CodeResourceLimitTimeout, "streaming processing cancelled before output", filePath, nil),
"Streaming processing cancelled before output: %s", filePath,
)
shared.LogErrorf(structErr, "Streaming processing canceled before output: %s", filePath)
return structErr
return
case outCh <- WriteRequest{
Path: relPath,
Content: "", // Empty since content is in Reader
IsStream: true,
Reader: reader,
Size: size,
}:
}
return nil
}
// createStreamReaderWithContext creates a reader that combines header and file content with context awareness.
func (p *FileProcessor) createStreamReaderWithContext(
ctx context.Context, filePath, relPath string,
) io.Reader {
func (p *FileProcessor) createStreamReaderWithContext(ctx context.Context, filePath, relPath string) io.Reader {
// Check context before opening file
select {
case <-ctx.Done():
@@ -372,19 +288,14 @@ func (p *FileProcessor) createStreamReaderWithContext(
file, err := os.Open(filePath) // #nosec G304 - filePath is validated by walker
if err != nil {
structErr := shared.WrapError(
err,
shared.ErrorTypeProcessing,
shared.CodeProcessingFileRead,
"failed to open file for streaming",
).WithFilePath(filePath)
shared.LogErrorf(structErr, "Failed to open file for streaming %s", filePath)
structErr := utils.WrapError(err, utils.ErrorTypeProcessing, utils.CodeProcessingFileRead, "failed to open file for streaming").WithFilePath(filePath)
utils.LogErrorf(structErr, "Failed to open file for streaming %s", filePath)
return nil
}
header := p.formatHeader(relPath)
// Note: file will be closed by the writer
return newHeaderFileReader(header, file)
header := p.formatHeader(relPath)
return io.MultiReader(header, file)
}
// formatContent formats the file content with header.
@@ -396,66 +307,3 @@ func (p *FileProcessor) formatContent(relPath, content string) string {
func (p *FileProcessor) formatHeader(relPath string) io.Reader {
return strings.NewReader(fmt.Sprintf("\n---\n%s\n", relPath))
}
// headerFileReader wraps a MultiReader and closes the file when EOF is reached.
type headerFileReader struct {
reader io.Reader
file *os.File
mu sync.Mutex
closed bool
}
// newHeaderFileReader creates a new headerFileReader.
func newHeaderFileReader(header io.Reader, file *os.File) *headerFileReader {
return &headerFileReader{
reader: io.MultiReader(header, file),
file: file,
}
}
// Read implements io.Reader and closes the file on EOF.
func (r *headerFileReader) Read(p []byte) (n int, err error) {
n, err = r.reader.Read(p)
if err == io.EOF {
r.closeFile()
// EOF is a sentinel value that must be passed through unchanged for io.Reader interface
return n, err //nolint:wrapcheck // EOF must not be wrapped
}
if err != nil {
return n, shared.WrapError(
err, shared.ErrorTypeIO, shared.CodeIORead,
"failed to read from header file reader",
)
}
return n, nil
}
// closeFile closes the file once.
func (r *headerFileReader) closeFile() {
r.mu.Lock()
defer r.mu.Unlock()
if !r.closed && r.file != nil {
if err := r.file.Close(); err != nil {
shared.LogError("Failed to close file", err)
}
r.closed = true
}
}
// Close implements io.Closer and ensures the underlying file is closed.
// This allows explicit cleanup when consumers stop reading before EOF.
func (r *headerFileReader) Close() error {
r.mu.Lock()
defer r.mu.Unlock()
if r.closed || r.file == nil {
return nil
}
err := r.file.Close()
if err != nil {
shared.LogError("Failed to close file", err)
}
r.closed = true
return err
}

View File

@@ -1,36 +1,15 @@
package fileproc_test
import (
"context"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"sync"
"testing"
"time"
"github.com/spf13/viper"
"github.com/ivuorinen/gibidify/config"
"github.com/ivuorinen/gibidify/fileproc"
"github.com/ivuorinen/gibidify/shared"
"github.com/ivuorinen/gibidify/testutil"
)
// writeTempConfig creates a temporary config file with the given YAML content
// and returns the directory path containing the config file.
func writeTempConfig(t *testing.T, content string) string {
t.Helper()
dir := t.TempDir()
configPath := filepath.Join(dir, "config.yaml")
if err := os.WriteFile(configPath, []byte(content), 0o600); err != nil {
t.Fatalf("Failed to create temp config: %v", err)
}
return dir
}
func TestProcessFile(t *testing.T) {
// Reset and load default config to ensure proper file size limits
testutil.ResetViperConfig(t, "")
@@ -53,20 +32,23 @@ func TestProcessFile(t *testing.T) {
errTmpFile := tmpFile.Close()
if errTmpFile != nil {
t.Fatal(errTmpFile)
return
}
ch := make(chan fileproc.WriteRequest, 1)
var wg sync.WaitGroup
wg.Go(func() {
defer close(ch)
wg.Add(1)
go func() {
defer wg.Done()
fileproc.ProcessFile(tmpFile.Name(), ch, "")
})
}()
wg.Wait()
close(ch)
var result string
for req := range ch {
result = req.Content
}
wg.Wait()
if !strings.Contains(result, tmpFile.Name()) {
t.Errorf("Output does not contain file path: %s", tmpFile.Name())
@@ -75,686 +57,3 @@ func TestProcessFile(t *testing.T) {
t.Errorf("Output does not contain file content: %s", content)
}
}
// TestNewFileProcessorWithMonitor tests processor creation with resource monitor.
func TestNewFileProcessorWithMonitor(t *testing.T) {
testutil.ResetViperConfig(t, "")
// Create a resource monitor
monitor := fileproc.NewResourceMonitor()
defer monitor.Close()
processor := fileproc.NewFileProcessorWithMonitor("test_source", monitor)
if processor == nil {
t.Error("Expected processor but got nil")
}
// Exercise the processor to verify monitor integration
tmpFile, err := os.CreateTemp(t.TempDir(), "monitor_test")
if err != nil {
t.Fatal(err)
}
defer os.Remove(tmpFile.Name())
if _, err := tmpFile.WriteString("test content"); err != nil {
t.Fatal(err)
}
if err := tmpFile.Close(); err != nil {
t.Fatal(err)
}
ctx := context.Background()
writeCh := make(chan fileproc.WriteRequest, 1)
var wg sync.WaitGroup
wg.Go(func() {
defer close(writeCh)
if err := processor.ProcessWithContext(ctx, tmpFile.Name(), writeCh); err != nil {
t.Errorf("ProcessWithContext failed: %v", err)
}
})
// Drain channel first to avoid deadlock if producer sends multiple requests
requestCount := 0
for range writeCh {
requestCount++
}
// Wait for goroutine to finish after channel is drained
wg.Wait()
if requestCount == 0 {
t.Error("Expected at least one write request from processor")
}
}
// TestProcessFileWithMonitor tests file processing with resource monitoring.
func TestProcessFileWithMonitor(t *testing.T) {
testutil.ResetViperConfig(t, "")
// Create temporary file
tmpFile, err := os.CreateTemp(t.TempDir(), "testfile_monitor_*")
if err != nil {
t.Fatalf(shared.TestMsgFailedToCreateFile, err)
}
defer func() {
if err := os.Remove(tmpFile.Name()); err != nil {
t.Logf("Failed to remove temp file: %v", err)
}
}()
content := "Test content with monitor"
if _, err := tmpFile.WriteString(content); err != nil {
t.Fatalf(shared.TestMsgFailedToWriteContent, err)
}
if err := tmpFile.Close(); err != nil {
t.Fatalf(shared.TestMsgFailedToCloseFile, err)
}
// Create resource monitor
monitor := fileproc.NewResourceMonitor()
defer monitor.Close()
ch := make(chan fileproc.WriteRequest, 1)
ctx := context.Background()
// Test ProcessFileWithMonitor
var wg sync.WaitGroup
var result string
// Start reader goroutine first to prevent deadlock
wg.Go(func() {
for req := range ch {
result = req.Content
}
})
// Process the file
err = fileproc.ProcessFileWithMonitor(ctx, tmpFile.Name(), ch, "", monitor)
close(ch)
if err != nil {
t.Fatalf("ProcessFileWithMonitor failed: %v", err)
}
// Wait for reader to finish
wg.Wait()
if !strings.Contains(result, content) {
t.Error("Expected content not found in processed result")
}
}
const testContent = "package main\nfunc main() {}\n"
// TestProcess tests the basic Process function.
func TestProcess(t *testing.T) {
testutil.ResetViperConfig(t, "")
// Create temporary directory
tmpDir := t.TempDir()
// Create test file with .go extension
testFile := filepath.Join(tmpDir, "test.go")
content := testContent
if err := os.WriteFile(testFile, []byte(content), 0o600); err != nil {
t.Fatalf(shared.TestMsgFailedToCreateTestFile, err)
}
processor := fileproc.NewFileProcessor(tmpDir)
ch := make(chan fileproc.WriteRequest, 10)
var wg sync.WaitGroup
wg.Go(func() {
defer close(ch)
// Process the specific file, not the directory
processor.Process(testFile, ch)
})
// Collect results
results := make([]fileproc.WriteRequest, 0, 1) // Pre-allocate with expected capacity
for req := range ch {
results = append(results, req)
}
wg.Wait()
if len(results) == 0 {
t.Error("Expected at least one processed file")
return
}
// Find our test file in results
found := false
for _, req := range results {
if strings.Contains(req.Path, shared.TestFileGo) && strings.Contains(req.Content, content) {
found = true
break
}
}
if !found {
t.Error("Test file not found in processed results")
}
}
// createLargeTestFile creates a large test file for streaming tests.
func createLargeTestFile(t *testing.T) *os.File {
t.Helper()
tmpFile, err := os.CreateTemp(t.TempDir(), "large_file_*.go")
if err != nil {
t.Fatalf(shared.TestMsgFailedToCreateFile, err)
}
lineContent := "// Repeated comment line to exceed streaming threshold\n"
repeatCount := (1048576 / len(lineContent)) + 1000
largeContent := strings.Repeat(lineContent, repeatCount)
if _, err := tmpFile.WriteString(largeContent); err != nil {
t.Fatalf(shared.TestMsgFailedToWriteContent, err)
}
if err := tmpFile.Close(); err != nil {
t.Fatalf(shared.TestMsgFailedToCloseFile, err)
}
t.Logf("Created test file size: %d bytes", len(largeContent))
return tmpFile
}
// processFileForStreaming processes a file and returns streaming/inline requests.
func processFileForStreaming(t *testing.T, filePath string) (streamingReq, inlineReq *fileproc.WriteRequest) {
t.Helper()
ch := make(chan fileproc.WriteRequest, 1)
var wg sync.WaitGroup
wg.Go(func() {
defer close(ch)
fileproc.ProcessFile(filePath, ch, "")
})
var streamingRequest *fileproc.WriteRequest
var inlineRequest *fileproc.WriteRequest
for req := range ch {
if req.IsStream {
reqCopy := req
streamingRequest = &reqCopy
} else {
reqCopy := req
inlineRequest = &reqCopy
}
}
wg.Wait()
return streamingRequest, inlineRequest
}
// validateStreamingRequest validates a streaming request.
func validateStreamingRequest(t *testing.T, streamingRequest *fileproc.WriteRequest, tmpFile *os.File) {
t.Helper()
if streamingRequest.Reader == nil {
t.Error("Expected reader in streaming request")
}
if streamingRequest.Content != "" {
t.Error("Expected empty content for streaming request")
}
buffer := make([]byte, 1024)
n, err := streamingRequest.Reader.Read(buffer)
if err != nil && err != io.EOF {
t.Errorf("Failed to read from streaming request: %v", err)
}
content := string(buffer[:n])
if !strings.Contains(content, tmpFile.Name()) {
t.Error("Expected file path in streamed header content")
}
t.Log("Successfully triggered streaming for large file and tested reader")
}
// TestProcessorStreamingIntegration tests streaming functionality in processor.
func TestProcessorStreamingIntegration(t *testing.T) {
configDir := writeTempConfig(t, `
max_file_size_mb: 0.001
streaming_threshold_mb: 0.0001
`)
testutil.ResetViperConfig(t, configDir)
tmpFile := createLargeTestFile(t)
defer func() {
if err := os.Remove(tmpFile.Name()); err != nil {
t.Logf("Failed to remove temp file: %v", err)
}
}()
streamingRequest, inlineRequest := processFileForStreaming(t, tmpFile.Name())
if streamingRequest == nil && inlineRequest == nil {
t.Error("Expected either streaming or inline request but got none")
}
if streamingRequest != nil {
validateStreamingRequest(t, streamingRequest, tmpFile)
} else {
t.Log("File processed inline instead of streaming")
}
}
// TestProcessorContextCancellation tests context cancellation during processing.
func TestProcessorContextCancellation(t *testing.T) {
testutil.ResetViperConfig(t, "")
// Create temporary directory with files
tmpDir := t.TempDir()
// Create multiple test files
for i := 0; i < 5; i++ {
testFile := filepath.Join(tmpDir, fmt.Sprintf("test%d.go", i))
content := testContent
if err := os.WriteFile(testFile, []byte(content), 0o600); err != nil {
t.Fatalf(shared.TestMsgFailedToCreateTestFile, err)
}
}
processor := fileproc.NewFileProcessor("test_source")
ch := make(chan fileproc.WriteRequest, 10)
// Use ProcessWithContext with immediate cancellation
ctx, cancel := context.WithCancel(context.Background())
cancel() // Cancel immediately
var wg sync.WaitGroup
wg.Go(func() {
defer close(ch)
// Error is expected due to cancellation
if err := processor.ProcessWithContext(ctx, tmpDir, ch); err != nil {
// Log error for debugging, but don't fail test since cancellation is expected
t.Logf("Expected error due to cancellation: %v", err)
}
})
// Collect results - should be minimal due to cancellation
results := make([]fileproc.WriteRequest, 0, 1) // Pre-allocate with expected capacity
for req := range ch {
results = append(results, req)
}
wg.Wait()
// With immediate cancellation, we might get 0 results
// This tests that cancellation is respected
t.Logf("Processed %d files with immediate cancellation", len(results))
}
// TestProcessorValidationEdgeCases tests edge cases in file validation.
func TestProcessorValidationEdgeCases(t *testing.T) {
configDir := writeTempConfig(t, `
max_file_size_mb: 0.001 # 1KB limit for testing
`)
testutil.ResetViperConfig(t, configDir)
tmpDir := t.TempDir()
// Test case 1: Non-existent file
nonExistentFile := filepath.Join(tmpDir, "does-not-exist.go")
processor := fileproc.NewFileProcessor(tmpDir)
ch := make(chan fileproc.WriteRequest, 1)
var wg sync.WaitGroup
wg.Go(func() {
defer close(ch)
processor.Process(nonExistentFile, ch)
})
results := make([]fileproc.WriteRequest, 0)
for req := range ch {
results = append(results, req)
}
wg.Wait()
// Should get no results due to file not existing
if len(results) > 0 {
t.Error("Expected no results for non-existent file")
}
// Test case 2: File that exceeds size limit
largeFile := filepath.Join(tmpDir, "large.go")
largeContent := strings.Repeat("// Large file content\n", 100) // > 1KB
if err := os.WriteFile(largeFile, []byte(largeContent), 0o600); err != nil {
t.Fatalf("Failed to create large file: %v", err)
}
ch2 := make(chan fileproc.WriteRequest, 1)
wg.Go(func() {
defer close(ch2)
processor.Process(largeFile, ch2)
})
results2 := make([]fileproc.WriteRequest, 0)
for req := range ch2 {
results2 = append(results2, req)
}
wg.Wait()
// Should get results because even large files are processed (just different strategy)
t.Logf("Large file processing results: %d", len(results2))
}
// TestProcessorContextCancellationDuringValidation tests context cancellation during file validation.
func TestProcessorContextCancellationDuringValidation(t *testing.T) {
testutil.ResetViperConfig(t, "")
tmpDir := t.TempDir()
testFile := filepath.Join(tmpDir, "test.go")
content := testContent
if err := os.WriteFile(testFile, []byte(content), 0o600); err != nil {
t.Fatalf(shared.TestMsgFailedToCreateTestFile, err)
}
processor := fileproc.NewFileProcessor(tmpDir)
// Create context that we'll cancel during processing
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
defer cancel()
// Let context expire
time.Sleep(1 * time.Millisecond)
ch := make(chan fileproc.WriteRequest, 1)
var wg sync.WaitGroup
wg.Go(func() {
defer close(ch)
if err := processor.ProcessWithContext(ctx, testFile, ch); err != nil {
t.Logf("ProcessWithContext error (may be expected): %v", err)
}
})
results := make([]fileproc.WriteRequest, 0)
for req := range ch {
results = append(results, req)
}
wg.Wait()
// Should get no results due to context cancellation
t.Logf("Results with canceled context: %d", len(results))
}
// TestProcessorInMemoryProcessingEdgeCases tests edge cases in in-memory processing.
func TestProcessorInMemoryProcessingEdgeCases(t *testing.T) {
testutil.ResetViperConfig(t, "")
tmpDir := t.TempDir()
// Test with empty file
emptyFile := filepath.Join(tmpDir, "empty.go")
if err := os.WriteFile(emptyFile, []byte(""), 0o600); err != nil {
t.Fatalf("Failed to create empty file: %v", err)
}
processor := fileproc.NewFileProcessor(tmpDir)
ch := make(chan fileproc.WriteRequest, 1)
var wg sync.WaitGroup
wg.Go(func() {
defer close(ch)
processor.Process(emptyFile, ch)
})
results := make([]fileproc.WriteRequest, 0)
for req := range ch {
results = append(results, req)
}
wg.Wait()
if len(results) != 1 {
t.Errorf("Expected 1 result for empty file, got %d", len(results))
}
if len(results) > 0 {
result := results[0]
if result.Path == "" {
t.Error("Expected path in result for empty file")
}
// Empty file should still be processed
}
}
// TestProcessorStreamingEdgeCases tests edge cases in streaming processing.
func TestProcessorStreamingEdgeCases(t *testing.T) {
testutil.ResetViperConfig(t, "")
tmpDir := t.TempDir()
// Create a file larger than streaming threshold but test error conditions
largeFile := filepath.Join(tmpDir, "large_stream.go")
largeContent := strings.Repeat("// Large streaming file content line\n", 50000) // > 1MB
if err := os.WriteFile(largeFile, []byte(largeContent), 0o600); err != nil {
t.Fatalf("Failed to create large file: %v", err)
}
processor := fileproc.NewFileProcessor(tmpDir)
// Test with context that gets canceled during streaming
ctx, cancel := context.WithCancel(context.Background())
ch := make(chan fileproc.WriteRequest, 1)
var wg sync.WaitGroup
wg.Go(func() {
defer close(ch)
// Start processing
// Error is expected due to cancellation
if err := processor.ProcessWithContext(ctx, largeFile, ch); err != nil {
// Log error for debugging, but don't fail test since cancellation is expected
t.Logf("Expected error due to cancellation: %v", err)
}
})
// Cancel context after a very short time
go func() {
time.Sleep(1 * time.Millisecond)
cancel()
}()
results := make([]fileproc.WriteRequest, 0)
for req := range ch {
results = append(results, req)
// If we get a streaming request, try to read from it with canceled context
if req.IsStream && req.Reader != nil {
buffer := make([]byte, 1024)
_, err := req.Reader.Read(buffer)
if err != nil && err != io.EOF {
t.Logf("Expected error reading from canceled stream: %v", err)
}
}
}
wg.Wait()
t.Logf("Results with streaming context cancellation: %d", len(results))
}
// Benchmarks for processor hot paths
// BenchmarkProcessFileInline benchmarks inline file processing for small files.
func BenchmarkProcessFileInline(b *testing.B) {
// Initialize config for file processing
viper.Reset()
config.LoadConfig()
// Create a small test file
tmpFile, err := os.CreateTemp(b.TempDir(), "bench_inline_*.go")
if err != nil {
b.Fatalf(shared.TestMsgFailedToCreateFile, err)
}
content := strings.Repeat("// Inline benchmark content\n", 100) // ~2.6KB
if _, err := tmpFile.WriteString(content); err != nil {
b.Fatalf(shared.TestMsgFailedToWriteContent, err)
}
if err := tmpFile.Close(); err != nil {
b.Fatalf(shared.TestMsgFailedToCloseFile, err)
}
b.ResetTimer()
for b.Loop() {
ch := make(chan fileproc.WriteRequest, 1)
var wg sync.WaitGroup
wg.Go(func() {
defer close(ch)
fileproc.ProcessFile(tmpFile.Name(), ch, "")
})
for req := range ch {
_ = req // Drain channel
}
wg.Wait()
}
}
// BenchmarkProcessFileStreaming benchmarks streaming file processing for large files.
func BenchmarkProcessFileStreaming(b *testing.B) {
// Initialize config for file processing
viper.Reset()
config.LoadConfig()
// Create a large test file that triggers streaming
tmpFile, err := os.CreateTemp(b.TempDir(), "bench_streaming_*.go")
if err != nil {
b.Fatalf(shared.TestMsgFailedToCreateFile, err)
}
// Create content larger than streaming threshold (1MB)
lineContent := "// Streaming benchmark content line that will be repeated\n"
repeatCount := (1048576 / len(lineContent)) + 1000
content := strings.Repeat(lineContent, repeatCount)
if _, err := tmpFile.WriteString(content); err != nil {
b.Fatalf(shared.TestMsgFailedToWriteContent, err)
}
if err := tmpFile.Close(); err != nil {
b.Fatalf(shared.TestMsgFailedToCloseFile, err)
}
b.ResetTimer()
for b.Loop() {
ch := make(chan fileproc.WriteRequest, 1)
var wg sync.WaitGroup
wg.Go(func() {
defer close(ch)
fileproc.ProcessFile(tmpFile.Name(), ch, "")
})
for req := range ch {
// If streaming, read some content to exercise the reader
if req.IsStream && req.Reader != nil {
buffer := make([]byte, 4096)
for {
_, err := req.Reader.Read(buffer)
if err != nil {
break
}
}
}
}
wg.Wait()
}
}
// BenchmarkProcessorWithContext benchmarks ProcessWithContext for a single file.
func BenchmarkProcessorWithContext(b *testing.B) {
tmpDir := b.TempDir()
testFile := filepath.Join(tmpDir, "bench_context.go")
content := strings.Repeat("// Benchmark file content\n", 50)
if err := os.WriteFile(testFile, []byte(content), 0o600); err != nil {
b.Fatalf(shared.TestMsgFailedToCreateTestFile, err)
}
processor := fileproc.NewFileProcessor(tmpDir)
ctx := context.Background()
b.ResetTimer()
for b.Loop() {
ch := make(chan fileproc.WriteRequest, 1)
var wg sync.WaitGroup
wg.Go(func() {
defer close(ch)
_ = processor.ProcessWithContext(ctx, testFile, ch)
})
for req := range ch {
_ = req // Drain channel
}
wg.Wait()
}
}
// BenchmarkProcessorWithMonitor benchmarks processing with resource monitoring.
func BenchmarkProcessorWithMonitor(b *testing.B) {
tmpDir := b.TempDir()
testFile := filepath.Join(tmpDir, "bench_monitor.go")
content := strings.Repeat("// Benchmark file content with monitor\n", 50)
if err := os.WriteFile(testFile, []byte(content), 0o600); err != nil {
b.Fatalf(shared.TestMsgFailedToCreateTestFile, err)
}
monitor := fileproc.NewResourceMonitor()
defer monitor.Close()
processor := fileproc.NewFileProcessorWithMonitor(tmpDir, monitor)
ctx := context.Background()
b.ResetTimer()
for b.Loop() {
ch := make(chan fileproc.WriteRequest, 1)
var wg sync.WaitGroup
wg.Go(func() {
defer close(ch)
_ = processor.ProcessWithContext(ctx, testFile, ch)
})
for req := range ch {
_ = req // Drain channel
}
wg.Wait()
}
}
// BenchmarkProcessorConcurrent benchmarks concurrent file processing.
func BenchmarkProcessorConcurrent(b *testing.B) {
tmpDir := b.TempDir()
// Create multiple test files
testFiles := make([]string, 10)
for i := 0; i < 10; i++ {
testFiles[i] = filepath.Join(tmpDir, fmt.Sprintf("bench_concurrent_%d.go", i))
content := strings.Repeat(fmt.Sprintf("// Concurrent file %d content\n", i), 50)
if err := os.WriteFile(testFiles[i], []byte(content), 0o600); err != nil {
b.Fatalf(shared.TestMsgFailedToCreateTestFile, err)
}
}
processor := fileproc.NewFileProcessor(tmpDir)
ctx := context.Background()
fileCount := len(testFiles)
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
i := 0
for pb.Next() {
testFile := testFiles[i%fileCount]
ch := make(chan fileproc.WriteRequest, 1)
var wg sync.WaitGroup
wg.Go(func() {
defer close(ch)
_ = processor.ProcessWithContext(ctx, testFile, ch)
})
for req := range ch {
_ = req // Drain channel
}
wg.Wait()
i++
}
})
}

View File

@@ -5,8 +5,6 @@ import (
"path/filepath"
"strings"
"sync"
"github.com/ivuorinen/gibidify/shared"
)
const minExtensionLength = 2
@@ -54,9 +52,9 @@ func initRegistry() *FileTypeRegistry {
imageExts: getImageExtensions(),
binaryExts: getBinaryExtensions(),
languageMap: getLanguageMap(),
extCache: make(map[string]string, shared.FileTypeRegistryMaxCacheSize),
resultCache: make(map[string]FileTypeResult, shared.FileTypeRegistryMaxCacheSize),
maxCacheSize: shared.FileTypeRegistryMaxCacheSize,
extCache: make(map[string]string, 1000), // Cache for extension normalization
resultCache: make(map[string]FileTypeResult, 500), // Cache for type results
maxCacheSize: 500,
}
}
@@ -65,28 +63,25 @@ func getRegistry() *FileTypeRegistry {
registryOnce.Do(func() {
registry = initRegistry()
})
return registry
}
// DefaultRegistry returns the default file type registry.
func DefaultRegistry() *FileTypeRegistry {
// GetDefaultRegistry returns the default file type registry.
func GetDefaultRegistry() *FileTypeRegistry {
return getRegistry()
}
// Stats returns a copy of the current registry statistics.
func (r *FileTypeRegistry) Stats() RegistryStats {
// GetStats returns a copy of the current registry statistics.
func (r *FileTypeRegistry) GetStats() RegistryStats {
r.cacheMutex.RLock()
defer r.cacheMutex.RUnlock()
return r.stats
}
// CacheInfo returns current cache size information.
func (r *FileTypeRegistry) CacheInfo() (extCacheSize, resultCacheSize, maxCacheSize int) {
// GetCacheInfo returns current cache size information.
func (r *FileTypeRegistry) GetCacheInfo() (extCacheSize, resultCacheSize, maxCacheSize int) {
r.cacheMutex.RLock()
defer r.cacheMutex.RUnlock()
return len(r.extCache), len(r.resultCache), r.maxCacheSize
}
@@ -106,9 +101,7 @@ func normalizeExtension(filename string) string {
func isSpecialFile(filename string, extensions map[string]bool) bool {
if filepath.Ext(filename) == "" {
basename := strings.ToLower(filepath.Base(filename))
return extensions[basename]
}
return false
}

View File

@@ -1,9 +1,7 @@
// Package fileproc handles file processing, collection, and output formatting.
package fileproc
import (
"context"
"fmt"
"sync/atomic"
"time"
)
@@ -28,7 +26,7 @@ func (rm *ResourceMonitor) AcquireReadSlot(ctx context.Context) error {
// Wait and retry
select {
case <-ctx.Done():
return fmt.Errorf("context canceled while waiting for read slot: %w", ctx.Err())
return ctx.Err()
case <-time.After(time.Millisecond):
// Continue loop
}
@@ -47,22 +45,15 @@ func (rm *ResourceMonitor) ReleaseReadSlot() {
// CreateFileProcessingContext creates a context with file processing timeout.
func (rm *ResourceMonitor) CreateFileProcessingContext(parent context.Context) (context.Context, context.CancelFunc) {
if !rm.enabled || rm.fileProcessingTimeout <= 0 {
// No-op cancel function - monitoring disabled or no timeout configured
return parent, func() {}
}
return context.WithTimeout(parent, rm.fileProcessingTimeout)
}
// CreateOverallProcessingContext creates a context with overall processing timeout.
func (rm *ResourceMonitor) CreateOverallProcessingContext(parent context.Context) (
context.Context,
context.CancelFunc,
) {
func (rm *ResourceMonitor) CreateOverallProcessingContext(parent context.Context) (context.Context, context.CancelFunc) {
if !rm.enabled || rm.overallTimeout <= 0 {
// No-op cancel function - monitoring disabled or no timeout configured
return parent, func() {}
}
return context.WithTimeout(parent, rm.overallTimeout)
}

View File

@@ -10,7 +10,7 @@ import (
"github.com/ivuorinen/gibidify/testutil"
)
func TestResourceMonitorConcurrentReadsLimit(t *testing.T) {
func TestResourceMonitor_ConcurrentReadsLimit(t *testing.T) {
testutil.ResetViperConfig(t, "")
// Set a low concurrent reads limit for testing
@@ -35,7 +35,7 @@ func TestResourceMonitorConcurrentReadsLimit(t *testing.T) {
t.Errorf("Expected no error for second read slot, got %v", err)
}
// Third read slot should time out (context deadline exceeded)
// Third read slot should timeout (context deadline exceeded)
err = rm.AcquireReadSlot(ctx)
if err == nil {
t.Error("Expected timeout error for third read slot, got nil")
@@ -58,7 +58,7 @@ func TestResourceMonitorConcurrentReadsLimit(t *testing.T) {
rm.ReleaseReadSlot()
}
func TestResourceMonitorTimeoutContexts(t *testing.T) {
func TestResourceMonitor_TimeoutContexts(t *testing.T) {
testutil.ResetViperConfig(t, "")
// Set short timeouts for testing

View File

@@ -11,7 +11,7 @@ import (
"github.com/ivuorinen/gibidify/testutil"
)
func TestResourceMonitorIntegration(t *testing.T) {
func TestResourceMonitor_Integration(t *testing.T) {
// Create temporary test directory
tempDir := t.TempDir()
@@ -47,7 +47,6 @@ func TestResourceMonitorIntegration(t *testing.T) {
err = rm.ValidateFileProcessing(filePath, fileInfo.Size())
if err != nil {
t.Errorf("Failed to validate file %s: %v", filePath, err)
continue
}
@@ -55,7 +54,6 @@ func TestResourceMonitorIntegration(t *testing.T) {
err = rm.AcquireReadSlot(ctx)
if err != nil {
t.Errorf("Failed to acquire read slot for %s: %v", filePath, err)
continue
}
@@ -73,7 +71,7 @@ func TestResourceMonitorIntegration(t *testing.T) {
}
// Verify final metrics
metrics := rm.Metrics()
metrics := rm.GetMetrics()
if metrics.FilesProcessed != int64(len(testFiles)) {
t.Errorf("Expected %d files processed, got %d", len(testFiles), metrics.FilesProcessed)
}

View File

@@ -1,4 +1,3 @@
// Package fileproc handles file processing, collection, and output formatting.
package fileproc
import (
@@ -6,7 +5,7 @@ import (
"sync/atomic"
"time"
"github.com/ivuorinen/gibidify/shared"
"github.com/sirupsen/logrus"
)
// RecordFileProcessed records that a file has been successfully processed.
@@ -17,8 +16,8 @@ func (rm *ResourceMonitor) RecordFileProcessed(fileSize int64) {
}
}
// Metrics returns current resource usage metrics.
func (rm *ResourceMonitor) Metrics() ResourceMetrics {
// GetMetrics returns current resource usage metrics.
func (rm *ResourceMonitor) GetMetrics() ResourceMetrics {
if !rm.enableResourceMon {
return ResourceMetrics{}
}
@@ -53,11 +52,10 @@ func (rm *ResourceMonitor) Metrics() ResourceMetrics {
FilesProcessed: filesProcessed,
TotalSizeProcessed: totalSize,
ConcurrentReads: atomic.LoadInt64(&rm.concurrentReads),
MaxConcurrentReads: int64(rm.maxConcurrentReads),
ProcessingDuration: duration,
AverageFileSize: avgFileSize,
ProcessingRate: processingRate,
MemoryUsageMB: shared.BytesToMB(m.Alloc),
MemoryUsageMB: int64(m.Alloc) / 1024 / 1024,
MaxMemoryUsageMB: int64(rm.hardMemoryLimitMB),
ViolationsDetected: violations,
DegradationActive: rm.degradationActive,
@@ -68,16 +66,14 @@ func (rm *ResourceMonitor) Metrics() ResourceMetrics {
// LogResourceInfo logs current resource limit configuration.
func (rm *ResourceMonitor) LogResourceInfo() {
logger := shared.GetLogger()
if rm.enabled {
logger.Infof("Resource limits enabled: maxFiles=%d, maxTotalSize=%dMB, fileTimeout=%ds, overallTimeout=%ds",
rm.maxFiles, rm.maxTotalSize/int64(shared.BytesPerMB), int(rm.fileProcessingTimeout.Seconds()),
int(rm.overallTimeout.Seconds()))
logger.Infof("Resource limits: maxConcurrentReads=%d, rateLimitFPS=%d, hardMemoryMB=%d",
logrus.Infof("Resource limits enabled: maxFiles=%d, maxTotalSize=%dMB, fileTimeout=%ds, overallTimeout=%ds",
rm.maxFiles, rm.maxTotalSize/1024/1024, int(rm.fileProcessingTimeout.Seconds()), int(rm.overallTimeout.Seconds()))
logrus.Infof("Resource limits: maxConcurrentReads=%d, rateLimitFPS=%d, hardMemoryMB=%d",
rm.maxConcurrentReads, rm.rateLimitFilesPerSec, rm.hardMemoryLimitMB)
logger.Infof("Resource features: gracefulDegradation=%v, monitoring=%v",
logrus.Infof("Resource features: gracefulDegradation=%v, monitoring=%v",
rm.enableGracefulDegr, rm.enableResourceMon)
} else {
logger.Info("Resource limits disabled")
logrus.Info("Resource limits disabled")
}
}

View File

@@ -9,7 +9,7 @@ import (
"github.com/ivuorinen/gibidify/testutil"
)
func TestResourceMonitorMetrics(t *testing.T) {
func TestResourceMonitor_Metrics(t *testing.T) {
testutil.ResetViperConfig(t, "")
viper.Set("resourceLimits.enabled", true)
@@ -23,7 +23,7 @@ func TestResourceMonitorMetrics(t *testing.T) {
rm.RecordFileProcessed(2000)
rm.RecordFileProcessed(500)
metrics := rm.Metrics()
metrics := rm.GetMetrics()
// Verify metrics
if metrics.FilesProcessed != 3 {

View File

@@ -1,12 +1,10 @@
// Package fileproc handles file processing, collection, and output formatting.
package fileproc
import (
"context"
"fmt"
"time"
"github.com/ivuorinen/gibidify/shared"
"github.com/sirupsen/logrus"
)
// WaitForRateLimit waits for rate limiting if enabled.
@@ -17,29 +15,22 @@ func (rm *ResourceMonitor) WaitForRateLimit(ctx context.Context) error {
select {
case <-ctx.Done():
return fmt.Errorf("context canceled while waiting for rate limit: %w", ctx.Err())
return ctx.Err()
case <-rm.rateLimitChan:
return nil
case <-time.After(time.Second): // Fallback timeout
logger := shared.GetLogger()
logger.Warn("Rate limiting timeout exceeded, continuing without rate limit")
logrus.Warn("Rate limiting timeout exceeded, continuing without rate limit")
return nil
}
}
// rateLimiterRefill refills the rate limiting channel periodically.
func (rm *ResourceMonitor) rateLimiterRefill() {
for {
select {
case <-rm.done:
return
case <-rm.rateLimiter.C:
for range rm.rateLimiter.C {
select {
case rm.rateLimitChan <- struct{}{}:
default:
// Channel is full, skip
}
}
}
}

View File

@@ -10,7 +10,7 @@ import (
"github.com/ivuorinen/gibidify/testutil"
)
func TestResourceMonitorRateLimiting(t *testing.T) {
func TestResourceMonitor_RateLimiting(t *testing.T) {
testutil.ResetViperConfig(t, "")
// Enable rate limiting with a low rate for testing

View File

@@ -1,11 +1,9 @@
// Package fileproc handles file processing, collection, and output formatting.
package fileproc
// IsEmergencyStopActive returns whether emergency stop is active.
func (rm *ResourceMonitor) IsEmergencyStopActive() bool {
rm.mu.RLock()
defer rm.mu.RUnlock()
return rm.emergencyStopRequested
}
@@ -13,27 +11,11 @@ func (rm *ResourceMonitor) IsEmergencyStopActive() bool {
func (rm *ResourceMonitor) IsDegradationActive() bool {
rm.mu.RLock()
defer rm.mu.RUnlock()
return rm.degradationActive
}
// Close cleans up the resource monitor.
func (rm *ResourceMonitor) Close() {
rm.mu.Lock()
defer rm.mu.Unlock()
// Prevent multiple closes
if rm.closed {
return
}
rm.closed = true
// Signal goroutines to stop
if rm.done != nil {
close(rm.done)
}
// Stop the ticker
if rm.rateLimiter != nil {
rm.rateLimiter.Stop()
}

View File

@@ -1,4 +1,3 @@
// Package fileproc handles file processing, collection, and output formatting.
package fileproc
import (
@@ -6,7 +5,6 @@ import (
"time"
"github.com/ivuorinen/gibidify/config"
"github.com/ivuorinen/gibidify/shared"
)
// ResourceMonitor monitors resource usage and enforces limits to prevent DoS attacks.
@@ -33,14 +31,12 @@ type ResourceMonitor struct {
// Rate limiting
rateLimiter *time.Ticker
rateLimitChan chan struct{}
done chan struct{} // Signal to stop goroutines
// Synchronization
mu sync.RWMutex
violationLogged map[string]bool
degradationActive bool
emergencyStopRequested bool
closed bool
}
// ResourceMetrics holds comprehensive resource usage metrics.
@@ -48,7 +44,6 @@ type ResourceMetrics struct {
FilesProcessed int64 `json:"files_processed"`
TotalSizeProcessed int64 `json:"total_size_processed"`
ConcurrentReads int64 `json:"concurrent_reads"`
MaxConcurrentReads int64 `json:"max_concurrent_reads"`
ProcessingDuration time.Duration `json:"processing_duration"`
AverageFileSize float64 `json:"average_file_size"`
ProcessingRate float64 `json:"processing_rate_files_per_sec"`
@@ -64,30 +59,29 @@ type ResourceMetrics struct {
type ResourceViolation struct {
Type string `json:"type"`
Message string `json:"message"`
Current any `json:"current"`
Limit any `json:"limit"`
Current interface{} `json:"current"`
Limit interface{} `json:"limit"`
Timestamp time.Time `json:"timestamp"`
Context map[string]any `json:"context"`
Context map[string]interface{} `json:"context"`
}
// NewResourceMonitor creates a new resource monitor with configuration.
func NewResourceMonitor() *ResourceMonitor {
rm := &ResourceMonitor{
enabled: config.ResourceLimitsEnabled(),
maxFiles: config.MaxFiles(),
maxTotalSize: config.MaxTotalSize(),
fileProcessingTimeout: time.Duration(config.FileProcessingTimeoutSec()) * time.Second,
overallTimeout: time.Duration(config.OverallTimeoutSec()) * time.Second,
maxConcurrentReads: config.MaxConcurrentReads(),
rateLimitFilesPerSec: config.RateLimitFilesPerSec(),
hardMemoryLimitMB: config.HardMemoryLimitMB(),
enableGracefulDegr: config.EnableGracefulDegradation(),
enableResourceMon: config.EnableResourceMonitoring(),
enabled: config.GetResourceLimitsEnabled(),
maxFiles: config.GetMaxFiles(),
maxTotalSize: config.GetMaxTotalSize(),
fileProcessingTimeout: time.Duration(config.GetFileProcessingTimeoutSec()) * time.Second,
overallTimeout: time.Duration(config.GetOverallTimeoutSec()) * time.Second,
maxConcurrentReads: config.GetMaxConcurrentReads(),
rateLimitFilesPerSec: config.GetRateLimitFilesPerSec(),
hardMemoryLimitMB: config.GetHardMemoryLimitMB(),
enableGracefulDegr: config.GetEnableGracefulDegradation(),
enableResourceMon: config.GetEnableResourceMonitoring(),
startTime: time.Now(),
lastRateLimitCheck: time.Now(),
violationLogged: make(map[string]bool),
hardMemoryLimitBytes: int64(config.HardMemoryLimitMB()) * int64(shared.BytesPerMB),
done: make(chan struct{}),
hardMemoryLimitBytes: int64(config.GetHardMemoryLimitMB()) * 1024 * 1024,
}
// Initialize rate limiter if rate limiting is enabled

View File

@@ -7,11 +7,11 @@ import (
"github.com/spf13/viper"
"github.com/ivuorinen/gibidify/shared"
"github.com/ivuorinen/gibidify/config"
"github.com/ivuorinen/gibidify/testutil"
)
func TestResourceMonitorNewResourceMonitor(t *testing.T) {
func TestResourceMonitor_NewResourceMonitor(t *testing.T) {
// Reset viper for clean test state
testutil.ResetViperConfig(t, "")
@@ -25,24 +25,24 @@ func TestResourceMonitorNewResourceMonitor(t *testing.T) {
t.Error("Expected resource monitor to be enabled by default")
}
if rm.maxFiles != shared.ConfigMaxFilesDefault {
t.Errorf("Expected maxFiles to be %d, got %d", shared.ConfigMaxFilesDefault, rm.maxFiles)
if rm.maxFiles != config.DefaultMaxFiles {
t.Errorf("Expected maxFiles to be %d, got %d", config.DefaultMaxFiles, rm.maxFiles)
}
if rm.maxTotalSize != shared.ConfigMaxTotalSizeDefault {
t.Errorf("Expected maxTotalSize to be %d, got %d", shared.ConfigMaxTotalSizeDefault, rm.maxTotalSize)
if rm.maxTotalSize != config.DefaultMaxTotalSize {
t.Errorf("Expected maxTotalSize to be %d, got %d", config.DefaultMaxTotalSize, rm.maxTotalSize)
}
if rm.fileProcessingTimeout != time.Duration(shared.ConfigFileProcessingTimeoutSecDefault)*time.Second {
if rm.fileProcessingTimeout != time.Duration(config.DefaultFileProcessingTimeoutSec)*time.Second {
t.Errorf("Expected fileProcessingTimeout to be %v, got %v",
time.Duration(shared.ConfigFileProcessingTimeoutSecDefault)*time.Second, rm.fileProcessingTimeout)
time.Duration(config.DefaultFileProcessingTimeoutSec)*time.Second, rm.fileProcessingTimeout)
}
// Clean up
rm.Close()
}
func TestResourceMonitorDisabledResourceLimits(t *testing.T) {
func TestResourceMonitor_DisabledResourceLimits(t *testing.T) {
// Reset viper for clean test state
testutil.ResetViperConfig(t, "")
@@ -72,77 +72,3 @@ func TestResourceMonitorDisabledResourceLimits(t *testing.T) {
t.Errorf("Expected no error when rate limiting disabled, got %v", err)
}
}
// TestResourceMonitorStateQueries tests state query functions.
func TestResourceMonitorStateQueries(t *testing.T) {
testutil.ResetViperConfig(t, "")
rm := NewResourceMonitor()
defer rm.Close()
// Test IsEmergencyStopActive - should be false initially
if rm.IsEmergencyStopActive() {
t.Error("Expected emergency stop to be inactive initially")
}
// Test IsDegradationActive - should be false initially
if rm.IsDegradationActive() {
t.Error("Expected degradation mode to be inactive initially")
}
}
// TestResourceMonitorIsEmergencyStopActive tests the IsEmergencyStopActive method.
func TestResourceMonitorIsEmergencyStopActive(t *testing.T) {
testutil.ResetViperConfig(t, "")
rm := NewResourceMonitor()
defer rm.Close()
// Test initial state
active := rm.IsEmergencyStopActive()
if active {
t.Error("Expected emergency stop to be inactive initially")
}
// The method should return a consistent value on multiple calls
for i := 0; i < 5; i++ {
if rm.IsEmergencyStopActive() != active {
t.Error("IsEmergencyStopActive should return consistent values")
}
}
}
// TestResourceMonitorIsDegradationActive tests the IsDegradationActive method.
func TestResourceMonitorIsDegradationActive(t *testing.T) {
testutil.ResetViperConfig(t, "")
rm := NewResourceMonitor()
defer rm.Close()
// Test initial state
active := rm.IsDegradationActive()
if active {
t.Error("Expected degradation mode to be inactive initially")
}
// The method should return a consistent value on multiple calls
for i := 0; i < 5; i++ {
if rm.IsDegradationActive() != active {
t.Error("IsDegradationActive should return consistent values")
}
}
}
// TestResourceMonitorClose tests the Close method.
func TestResourceMonitorClose(t *testing.T) {
testutil.ResetViperConfig(t, "")
rm := NewResourceMonitor()
// Close should not panic
rm.Close()
// Multiple closes should be safe
rm.Close()
rm.Close()
}

View File

@@ -1,4 +1,3 @@
// Package fileproc handles file processing, collection, and output formatting.
package fileproc
import (
@@ -6,7 +5,9 @@ import (
"sync/atomic"
"time"
"github.com/ivuorinen/gibidify/shared"
"github.com/sirupsen/logrus"
"github.com/ivuorinen/gibidify/utils"
)
// ValidateFileProcessing checks if a file can be processed based on resource limits.
@@ -20,12 +21,12 @@ func (rm *ResourceMonitor) ValidateFileProcessing(filePath string, fileSize int6
// Check if emergency stop is active
if rm.emergencyStopRequested {
return shared.NewStructuredError(
shared.ErrorTypeValidation,
shared.CodeResourceLimitMemory,
return utils.NewStructuredError(
utils.ErrorTypeValidation,
utils.CodeResourceLimitMemory,
"processing stopped due to emergency memory condition",
filePath,
map[string]any{
map[string]interface{}{
"emergency_stop_active": true,
},
)
@@ -34,12 +35,12 @@ func (rm *ResourceMonitor) ValidateFileProcessing(filePath string, fileSize int6
// Check file count limit
currentFiles := atomic.LoadInt64(&rm.filesProcessed)
if int(currentFiles) >= rm.maxFiles {
return shared.NewStructuredError(
shared.ErrorTypeValidation,
shared.CodeResourceLimitFiles,
return utils.NewStructuredError(
utils.ErrorTypeValidation,
utils.CodeResourceLimitFiles,
"maximum file count limit exceeded",
filePath,
map[string]any{
map[string]interface{}{
"current_files": currentFiles,
"max_files": rm.maxFiles,
},
@@ -49,12 +50,12 @@ func (rm *ResourceMonitor) ValidateFileProcessing(filePath string, fileSize int6
// Check total size limit
currentTotalSize := atomic.LoadInt64(&rm.totalSizeProcessed)
if currentTotalSize+fileSize > rm.maxTotalSize {
return shared.NewStructuredError(
shared.ErrorTypeValidation,
shared.CodeResourceLimitTotalSize,
return utils.NewStructuredError(
utils.ErrorTypeValidation,
utils.CodeResourceLimitTotalSize,
"maximum total size limit would be exceeded",
filePath,
map[string]any{
map[string]interface{}{
"current_total_size": currentTotalSize,
"file_size": fileSize,
"max_total_size": rm.maxTotalSize,
@@ -64,12 +65,12 @@ func (rm *ResourceMonitor) ValidateFileProcessing(filePath string, fileSize int6
// Check overall timeout
if time.Since(rm.startTime) > rm.overallTimeout {
return shared.NewStructuredError(
shared.ErrorTypeValidation,
shared.CodeResourceLimitTimeout,
return utils.NewStructuredError(
utils.ErrorTypeValidation,
utils.CodeResourceLimitTimeout,
"overall processing timeout exceeded",
filePath,
map[string]any{
map[string]interface{}{
"processing_duration": time.Since(rm.startTime),
"overall_timeout": rm.overallTimeout,
},
@@ -87,93 +88,61 @@ func (rm *ResourceMonitor) CheckHardMemoryLimit() error {
var m runtime.MemStats
runtime.ReadMemStats(&m)
currentMemory := shared.SafeUint64ToInt64WithDefault(m.Alloc, 0)
currentMemory := int64(m.Alloc)
if currentMemory <= rm.hardMemoryLimitBytes {
return nil
}
return rm.handleMemoryLimitExceeded(currentMemory)
}
// handleMemoryLimitExceeded handles the case when hard memory limit is exceeded.
func (rm *ResourceMonitor) handleMemoryLimitExceeded(currentMemory int64) error {
if currentMemory > rm.hardMemoryLimitBytes {
rm.mu.Lock()
defer rm.mu.Unlock()
rm.logMemoryViolation(currentMemory)
if !rm.enableGracefulDegr {
return rm.createHardMemoryLimitError(currentMemory, false)
}
return rm.tryGracefulRecovery(currentMemory)
}
// logMemoryViolation logs memory limit violation if not already logged.
func (rm *ResourceMonitor) logMemoryViolation(currentMemory int64) {
// Log violation if not already logged
violationKey := "hard_memory_limit"
// Ensure map is initialized
if rm.violationLogged == nil {
rm.violationLogged = make(map[string]bool)
}
if rm.violationLogged[violationKey] {
return
}
logger := shared.GetLogger()
logger.Errorf("Hard memory limit exceeded: %dMB > %dMB",
currentMemory/int64(shared.BytesPerMB), rm.hardMemoryLimitMB)
if !rm.violationLogged[violationKey] {
logrus.Errorf("Hard memory limit exceeded: %dMB > %dMB",
currentMemory/1024/1024, rm.hardMemoryLimitMB)
rm.violationLogged[violationKey] = true
}
}
// tryGracefulRecovery attempts graceful recovery by forcing GC.
func (rm *ResourceMonitor) tryGracefulRecovery(_ int64) error {
if rm.enableGracefulDegr {
// Force garbage collection
runtime.GC()
// Check again after GC
var m runtime.MemStats
runtime.ReadMemStats(&m)
newMemory := shared.SafeUint64ToInt64WithDefault(m.Alloc, 0)
currentMemory = int64(m.Alloc)
if newMemory > rm.hardMemoryLimitBytes {
if currentMemory > rm.hardMemoryLimitBytes {
// Still over limit, activate emergency stop
rm.emergencyStopRequested = true
return rm.createHardMemoryLimitError(newMemory, true)
}
return utils.NewStructuredError(
utils.ErrorTypeValidation,
utils.CodeResourceLimitMemory,
"hard memory limit exceeded, emergency stop activated",
"",
map[string]interface{}{
"current_memory_mb": currentMemory / 1024 / 1024,
"limit_mb": rm.hardMemoryLimitMB,
"emergency_stop": true,
},
)
} else {
// Memory freed by GC, continue with degradation
rm.degradationActive = true
logger := shared.GetLogger()
logger.Info("Memory freed by garbage collection, continuing with degradation mode")
logrus.Info("Memory freed by garbage collection, continuing with degradation mode")
}
} else {
// No graceful degradation, hard stop
return utils.NewStructuredError(
utils.ErrorTypeValidation,
utils.CodeResourceLimitMemory,
"hard memory limit exceeded",
"",
map[string]interface{}{
"current_memory_mb": currentMemory / 1024 / 1024,
"limit_mb": rm.hardMemoryLimitMB,
},
)
}
}
return nil
}
// createHardMemoryLimitError creates a structured error for memory limit exceeded.
func (rm *ResourceMonitor) createHardMemoryLimitError(currentMemory int64, emergencyStop bool) error {
message := "hard memory limit exceeded"
if emergencyStop {
message = "hard memory limit exceeded, emergency stop activated"
}
context := map[string]any{
"current_memory_mb": currentMemory / int64(shared.BytesPerMB),
"limit_mb": rm.hardMemoryLimitMB,
}
if emergencyStop {
context["emergency_stop"] = true
}
return shared.NewStructuredError(
shared.ErrorTypeValidation,
shared.CodeResourceLimitMemory,
message,
"",
context,
)
}

View File

@@ -1,47 +1,19 @@
package fileproc
import (
"errors"
"strings"
"testing"
"github.com/spf13/viper"
"github.com/ivuorinen/gibidify/shared"
"github.com/ivuorinen/gibidify/testutil"
"github.com/ivuorinen/gibidify/utils"
)
// assertStructuredError verifies that an error is a StructuredError with the expected code.
func assertStructuredError(t *testing.T, err error, expectedCode string) {
t.Helper()
structErr := &shared.StructuredError{}
ok := errors.As(err, &structErr)
if !ok {
t.Errorf("Expected StructuredError, got %T", err)
} else if structErr.Code != expectedCode {
t.Errorf("Expected error code %s, got %s", expectedCode, structErr.Code)
}
}
// validateMemoryLimitError validates that an error is a proper memory limit StructuredError.
func validateMemoryLimitError(t *testing.T, err error) {
t.Helper()
structErr := &shared.StructuredError{}
if errors.As(err, &structErr) {
if structErr.Code != shared.CodeResourceLimitMemory {
t.Errorf("Expected memory limit error code, got %s", structErr.Code)
}
} else {
t.Errorf("Expected StructuredError, got %T", err)
}
}
func TestResourceMonitorFileCountLimit(t *testing.T) {
func TestResourceMonitor_FileCountLimit(t *testing.T) {
testutil.ResetViperConfig(t, "")
// Set a very low file count limit for testing
viper.Set(shared.TestCfgResourceLimitsEnabled, true)
viper.Set("resourceLimits.enabled", true)
viper.Set("resourceLimits.maxFiles", 2)
rm := NewResourceMonitor()
@@ -68,14 +40,19 @@ func TestResourceMonitorFileCountLimit(t *testing.T) {
}
// Verify it's the correct error type
assertStructuredError(t, err, shared.CodeResourceLimitFiles)
structErr, ok := err.(*utils.StructuredError)
if !ok {
t.Errorf("Expected StructuredError, got %T", err)
} else if structErr.Code != utils.CodeResourceLimitFiles {
t.Errorf("Expected error code %s, got %s", utils.CodeResourceLimitFiles, structErr.Code)
}
}
func TestResourceMonitorTotalSizeLimit(t *testing.T) {
func TestResourceMonitor_TotalSizeLimit(t *testing.T) {
testutil.ResetViperConfig(t, "")
// Set a low total size limit for testing (1KB)
viper.Set(shared.TestCfgResourceLimitsEnabled, true)
viper.Set("resourceLimits.enabled", true)
viper.Set("resourceLimits.maxTotalSize", 1024)
rm := NewResourceMonitor()
@@ -102,103 +79,10 @@ func TestResourceMonitorTotalSizeLimit(t *testing.T) {
}
// Verify it's the correct error type
assertStructuredError(t, err, shared.CodeResourceLimitTotalSize)
}
// TestResourceMonitor_MemoryLimitExceeded tests memory limit violation scenarios.
func TestResourceMonitorMemoryLimitExceeded(t *testing.T) {
testutil.ResetViperConfig(t, "")
// Set very low memory limit to try to force violations
viper.Set(shared.TestCfgResourceLimitsEnabled, true)
viper.Set("resourceLimits.hardMemoryLimitMB", 0.001) // 1KB - extremely low
rm := NewResourceMonitor()
defer rm.Close()
// Allocate large buffer to increase memory usage before check
largeBuffer := make([]byte, 10*1024*1024) // 10MB allocation
_ = largeBuffer[0] // Use the buffer to prevent optimization
// Check hard memory limit - might trigger if actual memory is high enough
err := rm.CheckHardMemoryLimit()
// Note: This test might not always fail since it depends on actual runtime memory
// But if it does fail, verify it's the correct error type
if err != nil {
validateMemoryLimitError(t, err)
t.Log("Successfully triggered memory limit violation")
} else {
t.Log("Memory limit check passed - actual memory usage may be within limits")
}
}
// TestResourceMonitor_MemoryLimitHandling tests the memory violation detection.
func TestResourceMonitorMemoryLimitHandling(t *testing.T) {
testutil.ResetViperConfig(t, "")
// Enable resource limits with very small hard limit
viper.Set(shared.TestCfgResourceLimitsEnabled, true)
viper.Set("resourceLimits.hardMemoryLimitMB", 0.0001) // Very tiny limit
viper.Set("resourceLimits.enableGracefulDegradation", true)
rm := NewResourceMonitor()
defer rm.Close()
// Allocate more memory to increase chances of triggering limit
buffers := make([][]byte, 0, 100) // Pre-allocate capacity
for i := 0; i < 100; i++ {
buffer := make([]byte, 1024*1024) // 1MB each
buffers = append(buffers, buffer)
_ = buffer[0] // Use buffer
_ = buffers // Use the slice to prevent unused variable warning
// Check periodically
if i%10 == 0 {
err := rm.CheckHardMemoryLimit()
if err != nil {
// Successfully triggered memory limit
if !strings.Contains(err.Error(), "memory limit") {
t.Errorf("Expected error message to mention memory limit, got: %v", err)
}
t.Log("Successfully triggered memory limit handling")
return
}
}
}
t.Log("Could not trigger memory limit - actual memory usage may be lower than limit")
}
// TestResourceMonitorGracefulRecovery tests graceful recovery attempts.
func TestResourceMonitorGracefulRecovery(t *testing.T) {
testutil.ResetViperConfig(t, "")
// Set memory limits that will trigger recovery
viper.Set(shared.TestCfgResourceLimitsEnabled, true)
rm := NewResourceMonitor()
defer rm.Close()
// Force a deterministic 1-byte hard memory limit to trigger recovery
rm.hardMemoryLimitBytes = 1
// Process multiple files to accumulate memory usage
for i := 0; i < 3; i++ {
filePath := "/tmp/test" + string(rune('1'+i)) + ".txt"
fileSize := int64(400) // Each file is 400 bytes
// First few might pass, but eventually should trigger recovery mechanisms
err := rm.ValidateFileProcessing(filePath, fileSize)
if err != nil {
// Once we hit the limit, test that the error is appropriate
if !strings.Contains(err.Error(), "resource") && !strings.Contains(err.Error(), "limit") {
t.Errorf("Expected resource limit error, got: %v", err)
}
break
}
rm.RecordFileProcessed(fileSize)
structErr, ok := err.(*utils.StructuredError)
if !ok {
t.Errorf("Expected StructuredError, got %T", err)
} else if structErr.Code != utils.CodeResourceLimitTotalSize {
t.Errorf("Expected error code %s, got %s", utils.CodeResourceLimitTotalSize, structErr.Code)
}
}

View File

@@ -5,7 +5,7 @@ import (
"os"
"path/filepath"
"github.com/ivuorinen/gibidify/shared"
"github.com/ivuorinen/gibidify/utils"
)
// Walker defines an interface for scanning directories.
@@ -30,16 +30,10 @@ func NewProdWalker() *ProdWalker {
// Walk scans the given root directory recursively and returns a slice of file paths
// that are not ignored based on .gitignore/.ignore files, the configuration, or the default binary/image filter.
func (w *ProdWalker) Walk(root string) ([]string, error) {
absRoot, err := shared.AbsolutePath(root)
absRoot, err := utils.GetAbsolutePath(root)
if err != nil {
return nil, shared.WrapError(
err,
shared.ErrorTypeFileSystem,
shared.CodeFSPathResolution,
"failed to resolve root path",
).WithFilePath(root)
return nil, utils.WrapError(err, utils.ErrorTypeFileSystem, utils.CodeFSPathResolution, "failed to resolve root path").WithFilePath(root)
}
return w.walkDir(absRoot, []ignoreRule{})
}
@@ -53,12 +47,7 @@ func (w *ProdWalker) walkDir(currentDir string, parentRules []ignoreRule) ([]str
entries, err := os.ReadDir(currentDir)
if err != nil {
return nil, shared.WrapError(
err,
shared.ErrorTypeFileSystem,
shared.CodeFSAccess,
"failed to read directory",
).WithFilePath(currentDir)
return nil, utils.WrapError(err, utils.ErrorTypeFileSystem, utils.CodeFSAccess, "failed to read directory").WithFilePath(currentDir)
}
rules := loadIgnoreRules(currentDir, parentRules)
@@ -74,12 +63,7 @@ func (w *ProdWalker) walkDir(currentDir string, parentRules []ignoreRule) ([]str
if entry.IsDir() {
subFiles, err := w.walkDir(fullPath, rules)
if err != nil {
return nil, shared.WrapError(
err,
shared.ErrorTypeProcessing,
shared.CodeProcessingTraversal,
"failed to traverse subdirectory",
).WithFilePath(fullPath)
return nil, utils.WrapError(err, utils.ErrorTypeProcessing, utils.CodeProcessingTraversal, "failed to traverse subdirectory").WithFilePath(fullPath)
}
results = append(results, subFiles...)
} else {

View File

@@ -2,66 +2,33 @@
package fileproc
import (
"fmt"
"os"
"github.com/ivuorinen/gibidify/shared"
"github.com/ivuorinen/gibidify/utils"
)
// startFormatWriter handles generic writer orchestration for any format.
// This eliminates code duplication across format-specific writer functions.
// Uses the FormatWriter interface defined in formats.go.
func startFormatWriter(
outFile *os.File,
writeCh <-chan WriteRequest,
done chan<- struct{},
prefix, suffix string,
writerFactory func(*os.File) FormatWriter,
) {
defer close(done)
writer := writerFactory(outFile)
// Start writing
if err := writer.Start(prefix, suffix); err != nil {
shared.LogError("Failed to start writer", err)
return
}
// Process files
for req := range writeCh {
if err := writer.WriteFile(req); err != nil {
shared.LogError("Failed to write file", err)
}
}
// Close writer
if err := writer.Close(); err != nil {
shared.LogError("Failed to close writer", err)
}
}
// StartWriter writes the output in the specified format with memory optimization.
func StartWriter(outFile *os.File, writeCh <-chan WriteRequest, done chan<- struct{}, format, prefix, suffix string) {
switch format {
case shared.FormatMarkdown:
case "markdown":
startMarkdownWriter(outFile, writeCh, done, prefix, suffix)
case shared.FormatJSON:
case "json":
startJSONWriter(outFile, writeCh, done, prefix, suffix)
case shared.FormatYAML:
case "yaml":
startYAMLWriter(outFile, writeCh, done, prefix, suffix)
default:
context := map[string]any{
context := map[string]interface{}{
"format": format,
}
err := shared.NewStructuredError(
shared.ErrorTypeValidation,
shared.CodeValidationFormat,
"unsupported format: "+format,
err := utils.NewStructuredError(
utils.ErrorTypeValidation,
utils.CodeValidationFormat,
fmt.Sprintf("unsupported format: %s", format),
"",
context,
)
shared.LogError("Failed to encode output", err)
utils.LogError("Failed to encode output", err)
close(done)
}
}

View File

@@ -2,23 +2,17 @@ package fileproc_test
import (
"encoding/json"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"sync"
"testing"
"time"
"gopkg.in/yaml.v3"
"github.com/ivuorinen/gibidify/fileproc"
"github.com/ivuorinen/gibidify/shared"
)
func TestStartWriterFormats(t *testing.T) {
func TestStartWriter_Formats(t *testing.T) {
// Define table-driven test cases
tests := []struct {
name string
@@ -32,8 +26,7 @@ func TestStartWriterFormats(t *testing.T) {
}
for _, tc := range tests {
t.Run(
tc.name, func(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
data := runWriterTest(t, tc.format)
if tc.expectError {
verifyErrorOutput(t, data)
@@ -41,8 +34,7 @@ func TestStartWriterFormats(t *testing.T) {
verifyValidOutput(t, data, tc.format)
verifyPrefixSuffix(t, data)
}
},
)
})
}
}
@@ -51,7 +43,7 @@ func runWriterTest(t *testing.T, format string) []byte {
t.Helper()
outFile, err := os.CreateTemp(t.TempDir(), "gibidify_test_output")
if err != nil {
t.Fatalf(shared.TestMsgFailedToCreateFile, err)
t.Fatalf("Failed to create temp file: %v", err)
}
defer func() {
if closeErr := outFile.Close(); closeErr != nil {
@@ -67,23 +59,21 @@ func runWriterTest(t *testing.T, format string) []byte {
doneCh := make(chan struct{})
// Write a couple of sample requests
writeCh <- fileproc.WriteRequest{Path: "sample.go", Content: shared.LiteralPackageMain}
writeCh <- fileproc.WriteRequest{Path: "sample.go", Content: "package main"}
writeCh <- fileproc.WriteRequest{Path: "example.py", Content: "def foo(): pass"}
close(writeCh)
// Start the writer
var wg sync.WaitGroup
wg.Go(func() {
wg.Add(1)
go func() {
defer wg.Done()
fileproc.StartWriter(outFile, writeCh, doneCh, format, "PREFIX", "SUFFIX")
})
}()
// Wait until writer signals completion
wg.Wait()
select {
case <-doneCh: // make sure all writes finished
case <-time.After(3 * time.Second):
t.Fatal(shared.TestMsgTimeoutWriterCompletion)
}
<-doneCh // make sure all writes finished
// Read output
data, err := os.ReadFile(outFile.Name())
@@ -121,11 +111,6 @@ func verifyValidOutput(t *testing.T, data []byte, format string) {
if !strings.Contains(content, "```") {
t.Error("Expected markdown code fences not found")
}
default:
// Unknown format - basic validation that we have content
if len(content) == 0 {
t.Errorf("Unexpected format %s with empty content", format)
}
}
}
@@ -140,490 +125,3 @@ func verifyPrefixSuffix(t *testing.T, data []byte) {
t.Errorf("Missing suffix in output: %s", data)
}
}
// verifyPrefixSuffixWith checks that output contains expected custom prefix and suffix.
func verifyPrefixSuffixWith(t *testing.T, data []byte, expectedPrefix, expectedSuffix string) {
t.Helper()
content := string(data)
if !strings.Contains(content, expectedPrefix) {
t.Errorf("Missing prefix '%s' in output: %s", expectedPrefix, data)
}
if !strings.Contains(content, expectedSuffix) {
t.Errorf("Missing suffix '%s' in output: %s", expectedSuffix, data)
}
}
// TestStartWriterStreamingFormats tests streaming functionality in all writers.
func TestStartWriterStreamingFormats(t *testing.T) {
tests := []struct {
name string
format string
content string
}{
{"JSON streaming", "json", strings.Repeat("line\n", 1000)},
{"YAML streaming", "yaml", strings.Repeat("data: value\n", 1000)},
{"Markdown streaming", "markdown", strings.Repeat("# Header\nContent\n", 1000)},
}
for _, tc := range tests {
t.Run(
tc.name, func(t *testing.T) {
data := runStreamingWriterTest(t, tc.format, tc.content)
// Verify output is not empty
if len(data) == 0 {
t.Error("Expected streaming output but got empty result")
}
// Format-specific validation
verifyValidOutput(t, data, tc.format)
verifyPrefixSuffixWith(t, data, "STREAM_PREFIX", "STREAM_SUFFIX")
// Verify content was written
content := string(data)
if !strings.Contains(content, shared.TestFileStreamTest) {
t.Error("Expected file path in streaming output")
}
},
)
}
}
// runStreamingWriterTest executes the writer with streaming content.
func runStreamingWriterTest(t *testing.T, format, content string) []byte {
t.Helper()
// Create temp file with content for streaming
contentFile, err := os.CreateTemp(t.TempDir(), "content_*.txt")
if err != nil {
t.Fatalf("Failed to create content file: %v", err)
}
defer func() {
if err := os.Remove(contentFile.Name()); err != nil {
t.Logf("Failed to remove content file: %v", err)
}
}()
if _, err := contentFile.WriteString(content); err != nil {
t.Fatalf("Failed to write content file: %v", err)
}
if err := contentFile.Close(); err != nil {
t.Fatalf("Failed to close content file: %v", err)
}
// Create output file
outFile, err := os.CreateTemp(t.TempDir(), "gibidify_stream_test_output")
if err != nil {
t.Fatalf(shared.TestMsgFailedToCreateFile, err)
}
defer func() {
if closeErr := outFile.Close(); closeErr != nil {
t.Errorf("close temp file: %v", closeErr)
}
if removeErr := os.Remove(outFile.Name()); removeErr != nil {
t.Errorf("remove temp file: %v", removeErr)
}
}()
// Prepare channels with streaming request
writeCh := make(chan fileproc.WriteRequest, 1)
doneCh := make(chan struct{})
// Create reader for streaming
reader, err := os.Open(contentFile.Name())
if err != nil {
t.Fatalf("Failed to open content file for reading: %v", err)
}
defer func() {
if err := reader.Close(); err != nil {
t.Logf("Failed to close reader: %v", err)
}
}()
// Write streaming request
writeCh <- fileproc.WriteRequest{
Path: shared.TestFileStreamTest,
Content: "", // Empty for streaming
IsStream: true,
Reader: reader,
}
close(writeCh)
// Start the writer
var wg sync.WaitGroup
wg.Go(func() {
fileproc.StartWriter(outFile, writeCh, doneCh, format, "STREAM_PREFIX", "STREAM_SUFFIX")
})
// Wait until writer signals completion
wg.Wait()
select {
case <-doneCh:
case <-time.After(3 * time.Second):
t.Fatal(shared.TestMsgTimeoutWriterCompletion)
}
// Read output
data, err := os.ReadFile(outFile.Name())
if err != nil {
t.Fatalf("Error reading output file: %v", err)
}
return data
}
// setupReadOnlyFile creates a read-only file for error testing.
func setupReadOnlyFile(t *testing.T) (*os.File, chan fileproc.WriteRequest, chan struct{}) {
t.Helper()
outPath := filepath.Join(t.TempDir(), "readonly_out")
outFile, err := os.Create(outPath)
if err != nil {
t.Fatalf(shared.TestMsgFailedToCreateFile, err)
}
// Close writable FD and reopen as read-only so writes will fail
_ = outFile.Close()
outFile, err = os.OpenFile(outPath, os.O_RDONLY, 0)
if err != nil {
t.Fatalf("Failed to reopen as read-only: %v", err)
}
writeCh := make(chan fileproc.WriteRequest, 1)
doneCh := make(chan struct{})
writeCh <- fileproc.WriteRequest{
Path: shared.TestFileGo,
Content: shared.LiteralPackageMain,
}
close(writeCh)
return outFile, writeCh, doneCh
}
// setupStreamingError creates a streaming request with a failing reader.
func setupStreamingError(t *testing.T) (*os.File, chan fileproc.WriteRequest, chan struct{}) {
t.Helper()
outFile, err := os.CreateTemp(t.TempDir(), "yaml_stream_*")
if err != nil {
t.Fatalf(shared.TestMsgFailedToCreateFile, err)
}
writeCh := make(chan fileproc.WriteRequest, 1)
doneCh := make(chan struct{})
pr, pw := io.Pipe()
if err := pw.CloseWithError(errors.New("simulated stream error")); err != nil {
t.Fatalf("failed to set pipe error: %v", err)
}
writeCh <- fileproc.WriteRequest{
Path: "stream_fail.yaml",
Content: "", // Empty for streaming
IsStream: true,
Reader: pr,
}
close(writeCh)
return outFile, writeCh, doneCh
}
// setupSpecialCharacters creates requests with special characters.
func setupSpecialCharacters(t *testing.T) (*os.File, chan fileproc.WriteRequest, chan struct{}) {
t.Helper()
outFile, err := os.CreateTemp(t.TempDir(), "markdown_special_*")
if err != nil {
t.Fatalf(shared.TestMsgFailedToCreateFile, err)
}
writeCh := make(chan fileproc.WriteRequest, 2)
doneCh := make(chan struct{})
writeCh <- fileproc.WriteRequest{
Path: "special\ncharacters.md",
Content: "Content with\x00null bytes and\ttabs",
}
writeCh <- fileproc.WriteRequest{
Path: "empty.md",
Content: "",
}
close(writeCh)
return outFile, writeCh, doneCh
}
// runErrorHandlingTest runs a single error handling test.
func runErrorHandlingTest(
t *testing.T,
outFile *os.File,
writeCh chan fileproc.WriteRequest,
doneCh chan struct{},
format string,
expectEmpty bool,
) {
t.Helper()
defer func() {
if err := os.Remove(outFile.Name()); err != nil {
t.Logf("Failed to remove temp file: %v", err)
}
}()
defer func() {
if err := outFile.Close(); err != nil {
t.Logf("Failed to close temp file: %v", err)
}
}()
var wg sync.WaitGroup
wg.Go(func() {
fileproc.StartWriter(outFile, writeCh, doneCh, format, "PREFIX", "SUFFIX")
})
wg.Wait()
// Wait for doneCh with timeout to prevent test hangs
select {
case <-doneCh:
case <-time.After(3 * time.Second):
t.Fatal(shared.TestMsgTimeoutWriterCompletion)
}
// Read output file and verify based on expectation
data, err := os.ReadFile(outFile.Name())
if err != nil {
t.Fatalf("Failed to read output file: %v", err)
}
if expectEmpty && len(data) != 0 {
t.Errorf("expected empty output on error, got %d bytes", len(data))
}
if !expectEmpty && len(data) == 0 {
t.Error("expected non-empty output, got empty")
}
}
// TestStartWriterErrorHandling tests error scenarios in writers.
func TestStartWriterErrorHandling(t *testing.T) {
tests := []struct {
name string
format string
setupError func(t *testing.T) (*os.File, chan fileproc.WriteRequest, chan struct{})
expectEmptyOutput bool
}{
{
name: "JSON writer with read-only file",
format: "json",
setupError: setupReadOnlyFile,
expectEmptyOutput: true,
},
{
name: "YAML writer with streaming error",
format: "yaml",
setupError: setupStreamingError,
expectEmptyOutput: false, // Partial writes are acceptable before streaming errors
},
{
name: "Markdown writer with special characters",
format: "markdown",
setupError: setupSpecialCharacters,
expectEmptyOutput: false,
},
}
for _, tc := range tests {
t.Run(
tc.name, func(t *testing.T) {
outFile, writeCh, doneCh := tc.setupError(t)
runErrorHandlingTest(t, outFile, writeCh, doneCh, tc.format, tc.expectEmptyOutput)
},
)
}
}
// setupCloseTest sets up files and channels for close testing.
func setupCloseTest(t *testing.T) (*os.File, chan fileproc.WriteRequest, chan struct{}) {
t.Helper()
outFile, err := os.CreateTemp(t.TempDir(), "close_test_*")
if err != nil {
t.Fatalf(shared.TestMsgFailedToCreateFile, err)
}
writeCh := make(chan fileproc.WriteRequest, 5)
doneCh := make(chan struct{})
for i := 0; i < 5; i++ {
writeCh <- fileproc.WriteRequest{
Path: fmt.Sprintf("file%d.txt", i),
Content: fmt.Sprintf("Content %d", i),
}
}
close(writeCh)
return outFile, writeCh, doneCh
}
// runCloseTest executes writer and validates output.
func runCloseTest(
t *testing.T,
outFile *os.File,
writeCh chan fileproc.WriteRequest,
doneCh chan struct{},
format string,
) {
t.Helper()
defer func() {
if err := os.Remove(outFile.Name()); err != nil {
t.Logf("Failed to remove temp file: %v", err)
}
}()
defer func() {
if err := outFile.Close(); err != nil {
t.Logf("Failed to close temp file: %v", err)
}
}()
var wg sync.WaitGroup
wg.Go(func() {
fileproc.StartWriter(outFile, writeCh, doneCh, format, "TEST_PREFIX", "TEST_SUFFIX")
})
wg.Wait()
select {
case <-doneCh:
case <-time.After(3 * time.Second):
t.Fatal(shared.TestMsgTimeoutWriterCompletion)
}
data, err := os.ReadFile(outFile.Name())
if err != nil {
t.Fatalf("Failed to read output file: %v", err)
}
if len(data) == 0 {
t.Error("Expected non-empty output file")
}
verifyPrefixSuffixWith(t, data, "TEST_PREFIX", "TEST_SUFFIX")
}
// TestStartWriterWriterCloseErrors tests error handling during writer close operations.
func TestStartWriterWriterCloseErrors(t *testing.T) {
tests := []struct {
name string
format string
}{
{"JSON close handling", "json"},
{"YAML close handling", "yaml"},
{"Markdown close handling", "markdown"},
}
for _, tc := range tests {
t.Run(
tc.name, func(t *testing.T) {
outFile, writeCh, doneCh := setupCloseTest(t)
runCloseTest(t, outFile, writeCh, doneCh, tc.format)
},
)
}
}
// Benchmarks for writer performance
// BenchmarkStartWriter benchmarks basic writer operations across formats.
func BenchmarkStartWriter(b *testing.B) {
formats := []string{"json", "yaml", "markdown"}
for _, format := range formats {
b.Run(format, func(b *testing.B) {
for b.Loop() {
outFile, err := os.CreateTemp(b.TempDir(), "bench_output_*")
if err != nil {
b.Fatalf("Failed to create temp file: %v", err)
}
writeCh := make(chan fileproc.WriteRequest, 2)
doneCh := make(chan struct{})
writeCh <- fileproc.WriteRequest{Path: "sample.go", Content: shared.LiteralPackageMain}
writeCh <- fileproc.WriteRequest{Path: "example.py", Content: "def foo(): pass"}
close(writeCh)
fileproc.StartWriter(outFile, writeCh, doneCh, format, "PREFIX", "SUFFIX")
<-doneCh
_ = outFile.Close()
}
})
}
}
// benchStreamingIteration runs a single streaming benchmark iteration.
func benchStreamingIteration(b *testing.B, format, content string) {
b.Helper()
contentFile := createBenchContentFile(b, content)
defer func() { _ = os.Remove(contentFile) }()
reader, err := os.Open(contentFile)
if err != nil {
b.Fatalf("Failed to open content file: %v", err)
}
defer func() { _ = reader.Close() }()
outFile, err := os.CreateTemp(b.TempDir(), "bench_stream_output_*")
if err != nil {
b.Fatalf("Failed to create output file: %v", err)
}
defer func() { _ = outFile.Close() }()
writeCh := make(chan fileproc.WriteRequest, 1)
doneCh := make(chan struct{})
writeCh <- fileproc.WriteRequest{
Path: shared.TestFileStreamTest,
Content: "",
IsStream: true,
Reader: reader,
}
close(writeCh)
fileproc.StartWriter(outFile, writeCh, doneCh, format, "PREFIX", "SUFFIX")
<-doneCh
}
// createBenchContentFile creates a temp file with content for benchmarks.
func createBenchContentFile(b *testing.B, content string) string {
b.Helper()
contentFile, err := os.CreateTemp(b.TempDir(), "content_*")
if err != nil {
b.Fatalf("Failed to create content file: %v", err)
}
if _, err := contentFile.WriteString(content); err != nil {
b.Fatalf("Failed to write content: %v", err)
}
if err := contentFile.Close(); err != nil {
b.Fatalf("Failed to close content file: %v", err)
}
return contentFile.Name()
}
// BenchmarkStartWriterStreaming benchmarks streaming writer operations across formats.
func BenchmarkStartWriterStreaming(b *testing.B) {
formats := []string{"json", "yaml", "markdown"}
content := strings.Repeat("line content\n", 1000)
for _, format := range formats {
b.Run(format, func(b *testing.B) {
for b.Loop() {
benchStreamingIteration(b, format, content)
}
})
}
}

View File

@@ -1,12 +1,13 @@
// Package fileproc handles file processing, collection, and output formatting.
package fileproc
import (
"bufio"
"fmt"
"io"
"os"
"strings"
"github.com/ivuorinen/gibidify/shared"
"github.com/ivuorinen/gibidify/utils"
)
// YAMLWriter handles YAML format output with streaming support.
@@ -22,15 +23,9 @@ func NewYAMLWriter(outFile *os.File) *YAMLWriter {
// Start writes the YAML header.
func (w *YAMLWriter) Start(prefix, suffix string) error {
// Write YAML header
if _, err := fmt.Fprintf(
w.outFile,
"prefix: %s\nsuffix: %s\nfiles:\n",
shared.EscapeForYAML(prefix),
shared.EscapeForYAML(suffix),
); err != nil {
return shared.WrapError(err, shared.ErrorTypeIO, shared.CodeIOWrite, "failed to write YAML header")
if _, err := fmt.Fprintf(w.outFile, "prefix: %s\nsuffix: %s\nfiles:\n", yamlQuoteString(prefix), yamlQuoteString(suffix)); err != nil {
return utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIOWrite, "failed to write YAML header")
}
return nil
}
@@ -39,7 +34,6 @@ func (w *YAMLWriter) WriteFile(req WriteRequest) error {
if req.IsStream {
return w.writeStreaming(req)
}
return w.writeInline(req)
}
@@ -50,35 +44,17 @@ func (w *YAMLWriter) Close() error {
// writeStreaming writes a large file as YAML in streaming chunks.
func (w *YAMLWriter) writeStreaming(req WriteRequest) error {
defer shared.SafeCloseReader(req.Reader, req.Path)
defer w.closeReader(req.Reader, req.Path)
language := detectLanguage(req.Path)
// Write YAML file entry start
if _, err := fmt.Fprintf(
w.outFile,
shared.YAMLFmtFileEntry,
shared.EscapeForYAML(req.Path),
language,
); err != nil {
return shared.WrapError(
err,
shared.ErrorTypeIO,
shared.CodeIOWrite,
"failed to write YAML file start",
).WithFilePath(req.Path)
if _, err := fmt.Fprintf(w.outFile, " - path: %s\n language: %s\n content: |\n", yamlQuoteString(req.Path), language); err != nil {
return utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIOWrite, "failed to write YAML file start").WithFilePath(req.Path)
}
// Stream content with YAML indentation
if err := shared.StreamLines(
req.Reader, w.outFile, req.Path, func(line string) string {
return " " + line
},
); err != nil {
return shared.WrapError(err, shared.ErrorTypeIO, shared.CodeIOWrite, "streaming YAML content")
}
return nil
return w.streamYAMLContent(req.Reader, req.Path)
}
// writeInline writes a small file directly as YAML.
@@ -91,39 +67,82 @@ func (w *YAMLWriter) writeInline(req WriteRequest) error {
}
// Write YAML entry
if _, err := fmt.Fprintf(
w.outFile,
shared.YAMLFmtFileEntry,
shared.EscapeForYAML(fileData.Path),
fileData.Language,
); err != nil {
return shared.WrapError(
err,
shared.ErrorTypeIO,
shared.CodeIOWrite,
"failed to write YAML entry start",
).WithFilePath(req.Path)
if _, err := fmt.Fprintf(w.outFile, " - path: %s\n language: %s\n content: |\n", yamlQuoteString(fileData.Path), fileData.Language); err != nil {
return utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIOWrite, "failed to write YAML entry start").WithFilePath(req.Path)
}
// Write indented content
lines := strings.Split(fileData.Content, "\n")
for _, line := range lines {
if _, err := fmt.Fprintf(w.outFile, " %s\n", line); err != nil {
return shared.WrapError(
err,
shared.ErrorTypeIO,
shared.CodeIOWrite,
"failed to write YAML content line",
).WithFilePath(req.Path)
return utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIOWrite, "failed to write YAML content line").WithFilePath(req.Path)
}
}
return nil
}
// streamYAMLContent streams content with YAML indentation.
func (w *YAMLWriter) streamYAMLContent(reader io.Reader, path string) error {
scanner := bufio.NewScanner(reader)
for scanner.Scan() {
line := scanner.Text()
if _, err := fmt.Fprintf(w.outFile, " %s\n", line); err != nil {
return utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIOWrite, "failed to write YAML line").WithFilePath(path)
}
}
if err := scanner.Err(); err != nil {
return utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIORead, "failed to scan YAML content").WithFilePath(path)
}
return nil
}
// closeReader safely closes a reader if it implements io.Closer.
func (w *YAMLWriter) closeReader(reader io.Reader, path string) {
if closer, ok := reader.(io.Closer); ok {
if err := closer.Close(); err != nil {
utils.LogError(
"Failed to close file reader",
utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIOClose, "failed to close file reader").WithFilePath(path),
)
}
}
}
// yamlQuoteString quotes a string for YAML output if needed.
func yamlQuoteString(s string) string {
if s == "" {
return `""`
}
// Simple YAML quoting - use double quotes if string contains special characters
if strings.ContainsAny(s, "\n\r\t:\"'\\") {
return fmt.Sprintf(`"%s"`, strings.ReplaceAll(s, `"`, `\"`))
}
return s
}
// startYAMLWriter handles YAML format output with streaming support.
func startYAMLWriter(outFile *os.File, writeCh <-chan WriteRequest, done chan<- struct{}, prefix, suffix string) {
startFormatWriter(outFile, writeCh, done, prefix, suffix, func(f *os.File) FormatWriter {
return NewYAMLWriter(f)
})
defer close(done)
writer := NewYAMLWriter(outFile)
// Start writing
if err := writer.Start(prefix, suffix); err != nil {
utils.LogError("Failed to write YAML header", err)
return
}
// Process files
for req := range writeCh {
if err := writer.WriteFile(req); err != nil {
utils.LogError("Failed to write YAML file", err)
}
}
// Close writer
if err := writer.Close(); err != nil {
utils.LogError("Failed to write YAML end", err)
}
}

36
go.mod
View File

@@ -1,34 +1,32 @@
module github.com/ivuorinen/gibidify
go 1.25
toolchain go1.25.6
go 1.24.1
require (
github.com/fatih/color v1.18.0
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06
github.com/schollz/progressbar/v3 v3.19.0
github.com/sirupsen/logrus v1.9.4
github.com/spf13/viper v1.21.0
golang.org/x/text v0.33.0
github.com/schollz/progressbar/v3 v3.18.0
github.com/sirupsen/logrus v1.9.3
github.com/spf13/viper v1.20.0
gopkg.in/yaml.v3 v3.0.1
)
require (
github.com/fsnotify/fsnotify v1.9.0 // indirect
github.com/go-viper/mapstructure/v2 v2.5.0 // indirect
github.com/mattn/go-colorable v0.1.14 // indirect
github.com/fsnotify/fsnotify v1.8.0 // indirect
github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
github.com/pelletier/go-toml/v2 v2.2.3 // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/sagikazarmark/locafero v0.12.0 // indirect
github.com/spf13/afero v1.15.0 // indirect
github.com/spf13/cast v1.10.0 // indirect
github.com/spf13/pflag v1.0.10 // indirect
github.com/sagikazarmark/locafero v0.8.0 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
github.com/spf13/afero v1.14.0 // indirect
github.com/spf13/cast v1.7.1 // indirect
github.com/spf13/pflag v1.0.6 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
golang.org/x/sys v0.40.0 // indirect
golang.org/x/term v0.39.0 // indirect
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/sys v0.31.0 // indirect
golang.org/x/term v0.28.0 // indirect
golang.org/x/text v0.23.0 // indirect
)

70
go.sum
View File

@@ -7,26 +7,27 @@ github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
github.com/go-viper/mapstructure/v2 v2.5.0 h1:vM5IJoUAy3d7zRSVtIwQgBj7BiWtMPfmPEgAXnvj1Ro=
github.com/go-viper/mapstructure/v2 v2.5.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M=
github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss=
github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ=
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw=
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M=
github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
@@ -35,35 +36,40 @@ github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZV
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 h1:OkMGxebDjyw0ULyrTYWeN0UNCCkmCWfjPnIA2W6oviI=
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06/go.mod h1:+ePHsJ1keEjQtpvf9HHw0f4ZeJ0TLRsxhunSI2hYJSs=
github.com/sagikazarmark/locafero v0.12.0 h1:/NQhBAkUb4+fH1jivKHWusDYFjMOOKU88eegjfxfHb4=
github.com/sagikazarmark/locafero v0.12.0/go.mod h1:sZh36u/YSZ918v0Io+U9ogLYQJ9tLLBmM4eneO6WwsI=
github.com/schollz/progressbar/v3 v3.19.0 h1:Ea18xuIRQXLAUidVDox3AbwfUhD0/1IvohyTutOIFoc=
github.com/schollz/progressbar/v3 v3.19.0/go.mod h1:IsO3lpbaGuzh8zIMzgY3+J8l4C8GjO0Y9S69eFvNsec=
github.com/sirupsen/logrus v1.9.4 h1:TsZE7l11zFCLZnZ+teH4Umoq5BhEIfIzfRDZ1Uzql2w=
github.com/sirupsen/logrus v1.9.4/go.mod h1:ftWc9WdOfJ0a92nsE2jF5u5ZwH8Bv2zdeOC42RjbV2g=
github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I=
github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg=
github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY=
github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo=
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU=
github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY=
github.com/sagikazarmark/locafero v0.8.0 h1:mXaMVw7IqxNBxfv3LdWt9MDmcWDQ1fagDH918lOdVaQ=
github.com/sagikazarmark/locafero v0.8.0/go.mod h1:UBUyz37V+EdMS3hDF3QWIiVr/2dPrx49OMO0Bn0hJqk=
github.com/schollz/progressbar/v3 v3.18.0 h1:uXdoHABRFmNIjUfte/Ex7WtuyVslrw2wVPQmCN62HpA=
github.com/schollz/progressbar/v3 v3.18.0/go.mod h1:IsO3lpbaGuzh8zIMzgY3+J8l4C8GjO0Y9S69eFvNsec=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
github.com/spf13/afero v1.14.0 h1:9tH6MapGnn/j0eb0yIXiLjERO8RB6xIVZRDCX7PtqWA=
github.com/spf13/afero v1.14.0/go.mod h1:acJQ8t0ohCGuMN3O+Pv0V0hgMxNYDlvdk+VTfyZmbYo=
github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y=
github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.20.0 h1:zrxIyR3RQIOsarIrgL8+sAvALXul9jeEPa06Y0Ph6vY=
github.com/spf13/viper v1.20.0/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY=
golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww=
golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE=
golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8=
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg=
golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek=
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

View File

@@ -1,217 +0,0 @@
// Package main provides core interfaces for the gibidify application.
package main
import (
"context"
"io"
"github.com/ivuorinen/gibidify/shared"
)
// Processor defines the interface for file processors.
// This interface allows for easier testing and mocking of the main processing logic.
type Processor interface {
// Process starts the file processing workflow with the given context.
// It returns an error if processing fails at any stage.
Process(ctx context.Context) error
}
// FileProcessorInterface defines the interface for individual file processing.
// This abstracts the file processing logic for better testability.
type FileProcessorInterface interface {
// ProcessFile processes a single file and sends the result to the output channel.
ProcessFile(ctx context.Context, filePath string, outCh chan<- WriteRequest)
// ProcessWithContext processes a file and returns the content directly.
ProcessWithContext(ctx context.Context, filePath string) (string, error)
}
// ResourceMonitorInterface defines the interface for resource monitoring.
// This allows for mocking and testing of resource management functionality.
type ResourceMonitorInterface interface {
// Start begins resource monitoring.
Start() error
// Stop stops resource monitoring and cleanup.
Stop() error
// CheckResourceLimits validates current resource usage against limits.
CheckResourceLimits() error
// Metrics returns current resource usage metrics.
Metrics() ResourceMetrics
}
// MetricsCollectorInterface defines the interface for metrics collection.
// This enables easier testing and different metrics backend implementations.
type MetricsCollectorInterface interface {
// RecordFileProcessed records the processing of a single file.
RecordFileProcessed(result FileProcessingResult)
// IncrementConcurrency increments the current concurrency counter.
IncrementConcurrency()
// DecrementConcurrency decrements the current concurrency counter.
DecrementConcurrency()
// CurrentMetrics returns the current processing metrics.
CurrentMetrics() ProcessingMetrics
// GenerateReport generates a comprehensive processing report.
GenerateReport() ProfileReport
// Reset resets all metrics to initial state.
Reset()
}
// UIManagerInterface defines the interface for user interface management.
// This abstracts UI operations for better testing and different UI implementations.
type UIManagerInterface interface {
// PrintInfo prints an informational message.
PrintInfo(message string)
// PrintWarning prints a warning message.
PrintWarning(message string)
// PrintError prints an error message.
PrintError(message string)
// PrintSuccess prints a success message.
PrintSuccess(message string)
// SetColorOutput enables or disables colored output.
SetColorOutput(enabled bool)
// SetProgressOutput enables or disables progress indicators.
SetProgressOutput(enabled bool)
}
// WriterInterface defines the interface for output writers.
// This allows for different output formats and destinations.
type WriterInterface interface {
// Write writes the processed content to the destination.
Write(req WriteRequest) error
// Close finalizes the output and closes any resources.
Close() error
// GetFormat returns the output format supported by this writer.
GetFormat() string
}
// BackpressureManagerInterface defines the interface for backpressure management.
// This abstracts memory and flow control for better testing.
type BackpressureManagerInterface interface {
// CheckBackpressure returns true if backpressure should be applied.
CheckBackpressure() bool
// UpdateMemoryUsage updates the current memory usage tracking.
UpdateMemoryUsage(bytes int64)
// GetMemoryUsage returns current memory usage statistics.
GetMemoryUsage() int64
// Reset resets backpressure state to initial values.
Reset()
}
// TemplateEngineInterface defines the interface for template processing.
// This allows for different templating systems and easier testing.
type TemplateEngineInterface interface {
// RenderHeader renders the document header using the configured template.
RenderHeader(ctx TemplateContext) (string, error)
// RenderFooter renders the document footer using the configured template.
RenderFooter(ctx TemplateContext) (string, error)
// RenderFileContent renders individual file content with formatting.
RenderFileContent(ctx FileContext) (string, error)
// RenderMetadata renders metadata section if enabled.
RenderMetadata(ctx TemplateContext) (string, error)
}
// ConfigLoaderInterface defines the interface for configuration management.
// This enables different configuration sources and easier testing.
type ConfigLoaderInterface interface {
// LoadConfig loads configuration from the appropriate source.
LoadConfig() error
// GetString returns a string configuration value.
GetString(key string) string
// GetInt returns an integer configuration value.
GetInt(key string) int
// GetBool returns a boolean configuration value.
GetBool(key string) bool
// GetStringSlice returns a string slice configuration value.
GetStringSlice(key string) []string
}
// LoggerInterface defines the interface for logging operations.
// This abstracts logging for better testing and different log backends.
type LoggerInterface = shared.Logger
// These types are referenced by the interfaces but need to be defined
// elsewhere in the codebase. They are included here for documentation.
type WriteRequest struct {
Path string
Content string
IsStream bool
Reader io.Reader
Size int64
}
type ResourceMetrics struct {
FilesProcessed int64
TotalSizeProcessed int64
ConcurrentReads int64
MaxConcurrentReads int64
}
type FileProcessingResult struct {
FilePath string
FileSize int64
Format string
Success bool
Error error
Skipped bool
SkipReason string
}
type ProcessingMetrics struct {
TotalFiles int64
ProcessedFiles int64
ErrorFiles int64
SkippedFiles int64
TotalSize int64
ProcessedSize int64
}
type ProfileReport struct {
Summary ProcessingMetrics
// Additional report fields would be defined in the metrics package
}
type TemplateContext struct {
Files []FileContext
// Additional context fields would be defined in the templates package
}
type FileContext struct {
Path string
Content string
// Additional file context fields would be defined in the templates package
}
type LogLevel int
const (
LogLevelDebug LogLevel = iota
LogLevelInfo
LogLevelWarn
LogLevelError
)

25
main.go
View File

@@ -4,12 +4,12 @@ package main
import (
"context"
"fmt"
"os"
"github.com/sirupsen/logrus"
"github.com/ivuorinen/gibidify/cli"
"github.com/ivuorinen/gibidify/config"
"github.com/ivuorinen/gibidify/shared"
)
func main() {
@@ -23,13 +23,13 @@ func main() {
if cli.IsUserError(err) {
errorFormatter.FormatError(err)
os.Exit(1)
}
// System errors still go to logger for debugging
logger := shared.GetLogger()
logger.Errorf("System error: %v", err)
} else {
// System errors still go to logrus for debugging
logrus.Errorf("System error: %v", err)
ui.PrintError("An unexpected error occurred. Please check the logs.")
os.Exit(2)
}
}
}
// Run executes the main logic of the CLI application using the provided context.
@@ -37,22 +37,13 @@ func run(ctx context.Context) error {
// Parse CLI flags
flags, err := cli.ParseFlags()
if err != nil {
return fmt.Errorf("parsing flags: %w", err)
return err
}
// Initialize logger with provided log level
logger := shared.GetLogger()
logger.SetLevel(shared.ParseLogLevel(flags.LogLevel))
// Load configuration
config.LoadConfig()
// Create and run processor
processor := cli.NewProcessor(flags)
if err := processor.Process(ctx); err != nil {
return fmt.Errorf("processing: %w", err)
}
return nil
return processor.Process(ctx)
}

View File

@@ -1,61 +0,0 @@
package main
import (
"errors"
"testing"
"github.com/ivuorinen/gibidify/cli"
)
// TestMainFunctionComponents tests the components used by main() function.
// Since main() calls os.Exit, we can't test it directly, but we can test
// the components it uses to increase coverage metrics.
func TestMainFunctionComponents(t *testing.T) {
// Test UI manager creation (used in main())
ui := cli.NewUIManager()
if ui == nil {
t.Error("Expected NewUIManager to return non-nil UIManager")
}
// Test error formatter creation (used in main())
errorFormatter := cli.NewErrorFormatter(ui)
if errorFormatter == nil {
t.Error("Expected NewErrorFormatter to return non-nil ErrorFormatter")
}
}
// TestUserErrorClassification tests the error classification used in main().
func TestUserErrorClassification(t *testing.T) {
// Test the cli.IsUserError function that main() uses for error classification
// Create a user error (MissingSourceError is a user error)
userErr := &cli.MissingSourceError{}
if !cli.IsUserError(userErr) {
t.Error("Expected cli.IsUserError to return true for MissingSourceError")
}
// Test with a system error (generic error)
systemErr := errors.New("test system error")
if cli.IsUserError(systemErr) {
t.Error("Expected cli.IsUserError to return false for generic error")
}
// Test with nil error
if cli.IsUserError(nil) {
t.Error("Expected cli.IsUserError to return false for nil error")
}
}
// TestMainPackageExports verifies main package exports are accessible.
func TestMainPackageExports(t *testing.T) {
// The main package exports the run() function for testing
// Let's verify it's accessible and has the expected signature
// This is mainly for documentation and coverage tracking
// The actual testing of run() is done in other test files
t.Log("main package exports verified:")
t.Log("- run(context.Context) error function is accessible for testing")
t.Log("- main() function follows standard Go main conventions")
t.Log("- Package structure supports both execution and testing")
}

View File

@@ -1,264 +0,0 @@
package main
import (
"context"
"flag"
"os"
"testing"
"github.com/ivuorinen/gibidify/cli"
"github.com/ivuorinen/gibidify/shared"
"github.com/ivuorinen/gibidify/testutil"
)
// TestRunErrorPaths tests various error paths in the run() function.
func TestRunErrorPaths(t *testing.T) {
tests := []struct {
name string
setup func(t *testing.T)
expectError bool
errorSubstr string
}{
{
name: "Invalid flags - missing source",
setup: func(_ *testing.T) {
// Reset flags and set invalid args
cli.ResetFlags()
flag.CommandLine = flag.NewFlagSet(os.Args[0], flag.ExitOnError)
// Set args with missing source
os.Args = []string{
"gibidify", shared.TestCLIFlagDestination, shared.TestOutputMD, shared.TestCLIFlagNoUI,
}
},
expectError: true,
errorSubstr: "parsing flags",
},
{
name: "Invalid flags - invalid format",
setup: func(t *testing.T) {
// Reset flags and set invalid args
cli.ResetFlags()
flag.CommandLine = flag.NewFlagSet(os.Args[0], flag.ExitOnError)
srcDir := t.TempDir()
outFile, outPath := testutil.CreateTempOutputFile(t, shared.TestOutputMD)
testutil.CloseFile(t, outFile)
// Set args with invalid format
os.Args = []string{
"gibidify", shared.TestCLIFlagSource, srcDir, shared.TestCLIFlagDestination, outPath,
shared.TestCLIFlagFormat, "invalid", shared.TestCLIFlagNoUI,
}
},
expectError: true,
errorSubstr: shared.TestOpParsingFlags,
},
{
name: "Invalid source directory",
setup: func(t *testing.T) {
// Reset flags and set args with non-existent source
cli.ResetFlags()
flag.CommandLine = flag.NewFlagSet(os.Args[0], flag.ExitOnError)
outFile, outPath := testutil.CreateTempOutputFile(t, shared.TestOutputMD)
testutil.CloseFile(t, outFile)
os.Args = []string{
"gibidify", shared.TestCLIFlagSource, "/nonexistent/directory",
shared.TestCLIFlagDestination, outPath, shared.TestCLIFlagNoUI,
}
},
expectError: true,
errorSubstr: shared.TestOpParsingFlags, // Flag validation catches this, not processing
},
{
name: "Valid run with minimal setup",
setup: func(t *testing.T) {
// Reset flags
cli.ResetFlags()
flag.CommandLine = flag.NewFlagSet(os.Args[0], flag.ExitOnError)
// Create valid setup
srcDir := testutil.SetupTempDirWithStructure(t, []testutil.DirSpec{
{
Path: "",
Files: []testutil.FileSpec{
{Name: shared.TestFileTXT, Content: shared.TestContent},
},
},
})
outFile, outPath := testutil.CreateTempOutputFile(t, shared.TestOutputMD)
testutil.CloseFile(t, outFile)
os.Args = []string{
"gibidify", shared.TestCLIFlagSource, srcDir,
shared.TestCLIFlagDestination, outPath, shared.TestCLIFlagNoUI,
}
},
expectError: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Suppress all output for cleaner test output
restore := testutil.SuppressAllOutput(t)
defer restore()
// Setup test case
tt.setup(t)
// Run the function
ctx := context.Background()
err := run(ctx)
// Check expectations
if tt.expectError {
testutil.AssertExpectedError(t, err, "run() with error case")
if tt.errorSubstr != "" {
testutil.AssertErrorContains(t, err, tt.errorSubstr, "run() error content")
}
} else {
testutil.AssertNoError(t, err, "run() success case")
}
})
}
}
// TestRunFlagParsing tests the flag parsing path in run() function.
func TestRunFlagParsing(t *testing.T) {
// Suppress logs for cleaner test output
restoreLogs := testutil.SuppressLogs(t)
defer restoreLogs()
// Save original args
oldArgs := os.Args
defer func() { os.Args = oldArgs }()
// Test with empty args (should use defaults)
t.Run("default args", func(t *testing.T) {
// Reset flags
cli.ResetFlags()
flag.CommandLine = flag.NewFlagSet(os.Args[0], flag.ExitOnError)
// Create minimal valid setup
srcDir := testutil.SetupTempDirWithStructure(t, []testutil.DirSpec{
{
Path: "",
Files: []testutil.FileSpec{
{Name: shared.TestFileTXT, Content: shared.TestContent},
},
},
})
outFile, outPath := testutil.CreateTempOutputFile(t, "test_output.json")
testutil.CloseFile(t, outFile)
// Set minimal required args
os.Args = []string{
"gibidify", shared.TestCLIFlagSource, srcDir,
shared.TestCLIFlagDestination, outPath, shared.TestCLIFlagNoUI,
}
// Run and verify it works with defaults
ctx := context.Background()
err := run(ctx)
testutil.AssertNoError(t, err, "run() with default flags")
})
}
// TestRunWithCanceledContext tests run() with pre-canceled context.
func TestRunWithCanceledContext(t *testing.T) {
// Suppress logs for cleaner test output
restoreLogs := testutil.SuppressLogs(t)
defer restoreLogs()
// Save original args
oldArgs := os.Args
defer func() { os.Args = oldArgs }()
// Reset flags
cli.ResetFlags()
flag.CommandLine = flag.NewFlagSet(os.Args[0], flag.ExitOnError)
// Create valid setup
srcDir := testutil.SetupTempDirWithStructure(t, []testutil.DirSpec{
{
Path: "",
Files: []testutil.FileSpec{
{Name: shared.TestFileGo, Content: shared.LiteralPackageMain + "\nfunc main() {}"},
},
},
})
outFile, outPath := testutil.CreateTempOutputFile(t, shared.TestOutputMD)
testutil.CloseFile(t, outFile)
os.Args = []string{
"gibidify", shared.TestCLIFlagSource, srcDir,
shared.TestCLIFlagDestination, outPath, shared.TestCLIFlagNoUI,
}
// Create canceled context
ctx, cancel := context.WithCancel(context.Background())
cancel() // Cancel immediately
// Run with canceled context
err := run(ctx)
// Should get processing error due to canceled context
testutil.AssertExpectedError(t, err, "run() with canceled context")
testutil.AssertErrorContains(t, err, "processing", "run() canceled context error")
}
// TestRunLogLevel tests the log level setting in run().
func TestRunLogLevel(t *testing.T) {
// Suppress logs for cleaner test output
restoreLogs := testutil.SuppressLogs(t)
defer restoreLogs()
// Save original args
oldArgs := os.Args
defer func() { os.Args = oldArgs }()
tests := []struct {
name string
logLevel string
}{
{"debug level", "debug"},
{"info level", "info"},
{"warn level", "warn"},
{"error level", "error"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Reset flags
cli.ResetFlags()
flag.CommandLine = flag.NewFlagSet(os.Args[0], flag.ExitOnError)
// Create valid setup
srcDir := testutil.SetupTempDirWithStructure(t, []testutil.DirSpec{
{
Path: "",
Files: []testutil.FileSpec{
{Name: shared.TestFileTXT, Content: shared.TestContent},
},
},
})
outFile, outPath := testutil.CreateTempOutputFile(t, shared.TestOutputMD)
testutil.CloseFile(t, outFile)
// Set args with log level
os.Args = []string{
"gibidify", shared.TestCLIFlagSource, srcDir, shared.TestCLIFlagDestination, outPath,
"-log-level", tt.logLevel, shared.TestCLIFlagNoUI,
}
// Run
ctx := context.Background()
err := run(ctx)
// Should succeed
testutil.AssertNoError(t, err, "run() with log level "+tt.logLevel)
})
}
}

View File

@@ -1,372 +0,0 @@
package main
import (
"context"
"errors"
"flag"
"os"
"path/filepath"
"strings"
"testing"
"github.com/ivuorinen/gibidify/cli"
"github.com/ivuorinen/gibidify/shared"
"github.com/ivuorinen/gibidify/testutil"
)
// withIsolatedFlags sets up isolated flag state for testing and returns a cleanup function.
// This helper saves the original os.Args and flag.CommandLine, resets CLI flags,
// and creates a fresh FlagSet to avoid conflicts between tests.
func withIsolatedFlags(t *testing.T) func() {
t.Helper()
oldArgs := os.Args
oldFlag := flag.CommandLine
cli.ResetFlags()
flag.CommandLine = flag.NewFlagSet("test", flag.ContinueOnError)
return func() {
os.Args = oldArgs
flag.CommandLine = oldFlag
}
}
// TestRun_FlagParsingErrors tests error handling in flag parsing.
func TestRunFlagParsingErrors(t *testing.T) {
// Test with isolated flag state to avoid conflicts with other tests
t.Run("invalid_flag", func(t *testing.T) {
cleanup := withIsolatedFlags(t)
defer cleanup()
os.Args = []string{"test", shared.TestCLIFlagNoUI, "value"}
err := run(context.Background())
if err == nil {
t.Fatal("Expected error from invalid flag")
}
if !strings.Contains(err.Error(), shared.TestOpParsingFlags) {
t.Errorf("Expected 'parsing flags' error, got: %v", err)
}
})
t.Run("invalid_format", func(t *testing.T) {
cleanup := withIsolatedFlags(t)
defer cleanup()
// Create temporary files for the test
srcDir := t.TempDir()
testutil.CreateTestFile(t, srcDir, shared.TestFileTXT, []byte("test"))
outFile, outPath := testutil.CreateTempOutputFile(t, shared.TestMD)
testutil.CloseFile(t, outFile)
defer func() {
if err := os.Remove(outPath); err != nil {
t.Logf(shared.TestMsgFailedToRemoveTempFile, err)
}
}()
os.Args = []string{
"test", shared.TestCLIFlagSource, srcDir, shared.TestCLIFlagDestination, outPath,
shared.TestCLIFlagFormat, "invalid", shared.TestCLIFlagNoUI,
}
err := run(context.Background())
if err == nil {
t.Fatal("Expected error from invalid format")
}
if !strings.Contains(err.Error(), shared.TestOpParsingFlags) {
t.Errorf("Expected 'parsing flags' error, got: %v", err)
}
})
}
// TestRun_ProcessingErrors tests processing-related error paths.
func TestRunProcessingErrors(t *testing.T) {
t.Run("nonexistent_source", func(t *testing.T) {
cleanup := withIsolatedFlags(t)
defer cleanup()
outFile, outPath := testutil.CreateTempOutputFile(t, shared.TestMD)
testutil.CloseFile(t, outFile)
defer func() {
if err := os.Remove(outPath); err != nil {
t.Logf(shared.TestMsgFailedToRemoveTempFile, err)
}
}()
// Use a path that doesn't exist (subpath under temp dir that was never created)
nonExistentDir := filepath.Join(t.TempDir(), "nonexistent", "path")
os.Args = []string{
"test", shared.TestCLIFlagSource, nonExistentDir,
shared.TestCLIFlagDestination, outPath, shared.TestCLIFlagNoUI,
}
err := run(context.Background())
if err == nil {
t.Fatal("Expected error from nonexistent source")
}
// Could be either parsing flags (validation) or processing error
if !strings.Contains(err.Error(), shared.TestOpParsingFlags) && !strings.Contains(err.Error(), "processing") {
t.Errorf("Expected error from parsing or processing, got: %v", err)
}
})
t.Run("missing_source", func(t *testing.T) {
cleanup := withIsolatedFlags(t)
defer cleanup()
outFile, outPath := testutil.CreateTempOutputFile(t, shared.TestMD)
testutil.CloseFile(t, outFile)
defer func() {
if err := os.Remove(outPath); err != nil {
t.Logf(shared.TestMsgFailedToRemoveTempFile, err)
}
}()
os.Args = []string{"test", shared.TestCLIFlagDestination, outPath, shared.TestCLIFlagNoUI}
err := run(context.Background())
if err == nil {
t.Fatal("Expected error from missing source")
}
// Should be a user error
if !cli.IsUserError(err) {
t.Errorf("Expected user error, got: %v", err)
}
})
}
// TestRun_MarkdownExecution tests successful markdown execution.
func TestRunMarkdownExecution(t *testing.T) {
// Suppress all output for cleaner test output
restore := testutil.SuppressAllOutput(t)
defer restore()
cleanup := withIsolatedFlags(t)
defer cleanup()
// Create test environment
srcDir := t.TempDir()
testutil.CreateTestFiles(t, srcDir, []testutil.FileSpec{
{Name: shared.TestFileMainGo, Content: shared.LiteralPackageMain + "\nfunc main() {}"},
{Name: shared.TestFileHelperGo, Content: shared.LiteralPackageMain + "\nfunc help() {}"},
})
// Use non-existent output path to verify run() actually creates it
outPath := filepath.Join(t.TempDir(), "output.md")
defer func() {
if err := os.Remove(outPath); err != nil && !os.IsNotExist(err) {
t.Logf(shared.TestMsgFailedToRemoveTempFile, err)
}
}()
os.Args = []string{
"test", shared.TestCLIFlagSource, srcDir, shared.TestCLIFlagDestination, outPath,
shared.TestCLIFlagFormat, "markdown", shared.TestCLIFlagNoUI,
}
err := run(context.Background())
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Verify output file was created with content
info, err := os.Stat(outPath)
if os.IsNotExist(err) {
t.Fatal("Output file was not created")
}
if err != nil {
t.Fatalf("Failed to stat output file: %v", err)
}
if info.Size() == 0 {
t.Error("Output file is empty, expected content")
}
}
// TestRun_JSONExecution tests successful JSON execution.
func TestRunJSONExecution(t *testing.T) {
// Suppress all output for cleaner test output
restore := testutil.SuppressAllOutput(t)
defer restore()
cleanup := withIsolatedFlags(t)
defer cleanup()
// Create test environment with unique directories
srcDir := t.TempDir()
testutil.CreateTestFile(t, srcDir, shared.TestFileMainGo, []byte(shared.LiteralPackageMain))
// Use non-existent output path to verify run() actually creates it
outPath := filepath.Join(t.TempDir(), "output.json")
defer func() {
if err := os.Remove(outPath); err != nil && !os.IsNotExist(err) {
t.Logf(shared.TestMsgFailedToRemoveTempFile, err)
}
}()
// Set CLI args with fresh paths
os.Args = []string{
"test", shared.TestCLIFlagSource, srcDir, shared.TestCLIFlagDestination, outPath,
shared.TestCLIFlagFormat, "json", shared.TestCLIFlagNoUI,
}
err := run(context.Background())
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Verify output file was created with content
info, err := os.Stat(outPath)
if os.IsNotExist(err) {
t.Fatal("Output file was not created")
}
if err != nil {
t.Fatalf("Failed to stat output file: %v", err)
}
if info.Size() == 0 {
t.Error("Output file is empty, expected content")
}
}
// TestRun_ErrorWrapping tests that errors are properly wrapped.
func TestRunErrorWrapping(t *testing.T) {
cleanup := withIsolatedFlags(t)
defer cleanup()
os.Args = []string{"test", "-invalid-flag"}
err := run(context.Background())
if err == nil {
t.Fatal("Expected error")
}
// Should wrap with proper context
if !strings.Contains(err.Error(), "parsing flags:") {
t.Errorf("Error not properly wrapped, got: %v", err)
}
}
// TestRun_HappyPathWithDefaultConfig tests successful execution with default configuration.
// This validates that run() completes successfully when given valid inputs,
// implicitly exercising the config loading path without directly verifying it.
func TestRunHappyPathWithDefaultConfig(t *testing.T) {
// Suppress all output for cleaner test output
restore := testutil.SuppressAllOutput(t)
defer restore()
cleanup := withIsolatedFlags(t)
defer cleanup()
// Create valid test setup
srcDir := t.TempDir()
testutil.CreateTestFile(t, srcDir, shared.TestFileGo, []byte(shared.LiteralPackageMain))
outFile, outPath := testutil.CreateTempOutputFile(t, shared.TestMD)
testutil.CloseFile(t, outFile)
defer func() {
if err := os.Remove(outPath); err != nil {
t.Logf(shared.TestMsgFailedToRemoveTempFile, err)
}
}()
os.Args = []string{
"test", shared.TestCLIFlagSource, srcDir, shared.TestCLIFlagDestination, outPath, shared.TestCLIFlagNoUI,
}
err := run(context.Background())
if err != nil {
t.Fatalf("run() failed: %v", err)
}
}
// TestErrorClassification tests user vs system error classification.
func TestErrorClassification(t *testing.T) {
tests := []struct {
name string
err error
isUserErr bool
}{
{
name: "nil_error",
err: nil,
isUserErr: false,
},
{
name: "cli_missing_source",
err: cli.NewCLIMissingSourceError(),
isUserErr: true,
},
{
name: "flag_error",
err: errors.New("flag: invalid argument"),
isUserErr: true,
},
{
name: "permission_denied",
err: errors.New("permission denied"),
isUserErr: true,
},
{
name: "file_not_found",
err: errors.New("file not found"),
isUserErr: true,
},
{
name: "generic_system_error",
err: errors.New("internal system failure"),
isUserErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
isUser := cli.IsUserError(tt.err)
if isUser != tt.isUserErr {
t.Errorf("IsUserError(%v) = %v, want %v", tt.err, isUser, tt.isUserErr)
}
})
}
}
// TestRun_ContextCancellation tests context cancellation handling.
func TestRunContextCancellation(t *testing.T) {
// Suppress all output for cleaner test output
restore := testutil.SuppressAllOutput(t)
defer restore()
cleanup := withIsolatedFlags(t)
defer cleanup()
// Create test environment
srcDir := t.TempDir()
testutil.CreateTestFile(t, srcDir, shared.TestFileGo, []byte(shared.LiteralPackageMain))
outFile, outPath := testutil.CreateTempOutputFile(t, shared.TestMD)
testutil.CloseFile(t, outFile)
defer func() {
if err := os.Remove(outPath); err != nil {
t.Logf(shared.TestMsgFailedToRemoveTempFile, err)
}
}()
os.Args = []string{
"test", shared.TestCLIFlagSource, srcDir, shared.TestCLIFlagDestination, outPath, shared.TestCLIFlagNoUI,
}
// Create pre-canceled context
ctx, cancel := context.WithCancel(context.Background())
cancel()
err := run(ctx)
// Assert that canceled context causes an error
if err == nil {
t.Error("Expected error with canceled context, got nil")
} else if !errors.Is(err, context.Canceled) && !strings.Contains(err.Error(), "context canceled") {
t.Errorf("Expected context.Canceled error, got: %v", err)
}
}

View File

@@ -2,15 +2,11 @@ package main
import (
"context"
"flag"
"fmt"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/ivuorinen/gibidify/cli"
"github.com/ivuorinen/gibidify/testutil"
)
@@ -18,20 +14,8 @@ const (
testFileCount = 1000
)
// resetFlagState resets the global flag state to allow multiple test runs.
func resetFlagState() {
// Reset both the flag.CommandLine and cli global state for clean testing
cli.ResetFlags()
flag.CommandLine = flag.NewFlagSet(os.Args[0], flag.ExitOnError)
}
// TestIntegrationFullCLI simulates a full run of the CLI application using adaptive concurrency.
func TestIntegrationFullCLI(t *testing.T) {
// Suppress logs for cleaner test output
restore := testutil.SuppressAllOutput(t)
defer restore()
resetFlagState()
srcDir := setupTestFiles(t)
outFilePath := setupOutputFile(t)
setupCLIArgs(srcDir, outFilePath)
@@ -86,11 +70,6 @@ func verifyOutput(t *testing.T, outFilePath string) {
// TestIntegrationCancellation verifies that the application correctly cancels processing when the context times out.
func TestIntegrationCancellation(t *testing.T) {
// Suppress logs for cleaner test output
restore := testutil.SuppressAllOutput(t)
defer restore()
resetFlagState()
// Create a temporary source directory with many files to simulate a long-running process.
srcDir := t.TempDir()
@@ -112,10 +91,10 @@ func TestIntegrationCancellation(t *testing.T) {
// Set up CLI arguments.
testutil.SetupCLIArgs(srcDir, outFilePath, "PREFIX", "SUFFIX", 2)
// Create a context with a short timeout to force cancellation.
// Create a context with a very short timeout to force cancellation.
ctx, cancel := context.WithTimeout(
t.Context(),
5*time.Millisecond,
1*time.Millisecond,
)
defer cancel()
@@ -125,86 +104,3 @@ func TestIntegrationCancellation(t *testing.T) {
t.Error("Expected Run to fail due to cancellation, but it succeeded")
}
}
// BenchmarkRun benchmarks the run() function performance.
func BenchmarkRun(b *testing.B) {
// Save original args and flags
oldArgs := os.Args
defer func() { os.Args = oldArgs }()
ctx := context.Background()
for b.Loop() {
// Create fresh directories for each iteration
srcDir := b.TempDir()
outDir := b.TempDir()
// Create benchmark files
files := map[string]string{
"bench1.go": "package main\n// Benchmark file 1",
"bench2.txt": "Benchmark content file 2",
"bench3.md": "# Benchmark markdown file",
}
for name, content := range files {
filePath := filepath.Join(srcDir, name)
if err := os.WriteFile(filePath, []byte(content), 0o600); err != nil {
b.Fatalf("Failed to create benchmark file %s: %v", name, err)
}
}
outFilePath := filepath.Join(outDir, "output.md")
// Reset flags for each iteration
cli.ResetFlags()
flag.CommandLine = flag.NewFlagSet("bench", flag.ContinueOnError)
os.Args = []string{"gibidify", "-source", srcDir, "-destination", outFilePath, "-no-ui"}
if err := run(ctx); err != nil {
b.Fatalf("run() failed in benchmark: %v", err)
}
}
}
// BenchmarkRunLargeFiles benchmarks the run() function with larger files.
func BenchmarkRunLargeFiles(b *testing.B) {
// Save original args
oldArgs := os.Args
defer func() { os.Args = oldArgs }()
largeContent := strings.Repeat("This is a large file for benchmarking purposes.\n", 1000)
ctx := context.Background()
for b.Loop() {
// Create fresh directories for each iteration
srcDir := b.TempDir()
outDir := b.TempDir()
// Create large benchmark files
files := map[string]string{
"large1.go": "package main\n" + largeContent,
"large2.txt": largeContent,
"large3.md": "# Large File\n" + largeContent,
}
for name, content := range files {
filePath := filepath.Join(srcDir, name)
if err := os.WriteFile(filePath, []byte(content), 0o600); err != nil {
b.Fatalf("Failed to create large benchmark file %s: %v", name, err)
}
}
outFilePath := filepath.Join(outDir, "output.md")
// Reset flags for each iteration
cli.ResetFlags()
flag.CommandLine = flag.NewFlagSet("bench", flag.ContinueOnError)
os.Args = []string{"gibidify", "-source", srcDir, "-destination", outFilePath, "-no-ui"}
if err := run(ctx); err != nil {
b.Fatalf("run() failed in large files benchmark: %v", err)
}
}
}

Some files were not shown because too many files have changed in this diff Show More