feat: update go to 1.25, add permissions and envs (#49)

* chore(ci): update go to 1.25, add permissions and envs
* fix(ci): update pr-lint.yml
* chore: update go, fix linting
* fix: tests and linting
* fix(lint): lint fixes, renovate should now pass
* fix: updates, security upgrades
* chore: workflow updates, lint
* fix: more lint, checkmake, and other fixes
* fix: more lint, convert scripts to POSIX compliant
* fix: simplify codeql workflow
* tests: increase test coverage, fix found issues
* fix(lint): editorconfig checking, add to linters
* fix(lint): shellcheck, add to linters
* fix(lint): apply cr comment suggestions
* fix(ci): remove step-security/harden-runner
* fix(lint): remove duplication, apply cr fixes
* fix(ci): tests in CI/CD pipeline
* chore(lint): deduplication of strings
* fix(lint): apply cr comment suggestions
* fix(ci): actionlint
* fix(lint): apply cr comment suggestions
* chore: lint, add deps management
This commit is contained in:
2025-10-10 12:14:42 +03:00
committed by GitHub
parent 958f5952a0
commit 3f65b813bd
100 changed files with 6997 additions and 1225 deletions

View File

@@ -1,8 +1,14 @@
# checkmake configuration
# See: https://github.com/mrtazz/checkmake#configuration
# See: https://github.com/checkmake/checkmake#configuration
[rules.timestampexpansion]
disabled = true
[rules.maxbodylength]
disabled = true
[rules.minphony]
disabled = true
[rules.phonydeclared]
disabled = true

38
.dockerignore Normal file
View File

@@ -0,0 +1,38 @@
# Git
.git
.github
.gitignore
# Build artifacts
gibidify
gibidify-*
dist/
coverage.out
coverage.html
test-results.json
*.sarif
# Documentation
*.md
docs/
# Config and tooling
.checkmake
.editorconfig
.golangci.yml
.yamllint
revive.toml
# Scripts
scripts/
# IDE
.vscode
.idea
*.swp
*.swo
*~
# OS
.DS_Store
Thumbs.db

View File

@@ -8,19 +8,26 @@ indent_size = 2
indent_style = tab
tab_width = 2
[*.yml]
indent_style = space
[*.go]
max_line_length = 120
[*.md]
trim_trailing_whitespace = false
[*.{yml,yaml,json}]
[*.{yml,yaml,json,toml}]
indent_style = space
max_line_length = 250
[*.{yaml.example,yml.example}]
indent_style = space
[.yamllint]
indent_style = space
[LICENSE]
max_line_length = 80
indent_size = 0
indent_style = space
trim_trailing_whitespace = true
[Makefile]
max_line_length = 80

View File

@@ -0,0 +1,14 @@
{
"Exclude": [".git", "vendor", "node_modules", "README\\.md"],
"AllowedContentTypes": [],
"PassedFiles": [],
"Disable": {
"IndentSize": false,
"EndOfLine": false,
"InsertFinalNewline": false,
"TrimTrailingWhitespace": false,
"MaxLineLength": false
},
"SpacesAfterTabs": false,
"NoColor": false
}

15
.github/actions/setup/action.yml vendored Normal file
View File

@@ -0,0 +1,15 @@
name: "Setup Go with Runner Hardening"
description: "Reusable action to set up Go"
inputs:
token:
description: "GitHub token for checkout (optional)"
required: false
default: ""
runs:
using: "composite"
steps:
- name: Set up Go
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
with:
go-version-file: "go.mod"
cache: true

View File

@@ -9,8 +9,7 @@ on:
release:
types: [created]
permissions:
contents: read
permissions: {}
jobs:
test:
@@ -25,51 +24,60 @@ jobs:
statuses: write
steps:
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
with:
egress-policy: audit
- name: Checkout code
- name: Checkout repository
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Setup Go
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
uses: ./.github/actions/setup
with:
go-version-file: "./go.mod"
cache: true
token: ${{ github.token }}
- name: Install dependencies
run: go mod tidy
- name: Download dependencies
shell: bash
run: go mod download
- name: Run tests
run: go test -json ./... > test-results.json
- name: Generate coverage report
run: go test -coverprofile=coverage.out ./...
- name: Run tests with coverage
shell: bash
run: |
go test -race -covermode=atomic -json -coverprofile=coverage.out ./... | tee test-results.json
- name: Check coverage
id: coverage
if: always()
shell: bash
run: |
if [[ ! -f coverage.out ]]; then
echo "coverage.out is missing; tests likely failed before producing coverage"
exit 1
fi
coverage="$(go tool cover -func=coverage.out | grep total | awk '{print substr($3, 1, length($3)-1)}')"
echo "total_coverage=$coverage" >> "$GITHUB_ENV"
echo "Coverage: $coverage%"
- name: Upload test results
if: always()
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: test-results
path: test-results.json
- name: Cleanup
run: rm coverage.out
if: always()
shell: bash
run: rm -f coverage.out test-results.json
- name: Fail if coverage is below threshold
if: always()
shell: bash
run: |
if (( $(echo "$total_coverage < 50" | bc -l) )); then
echo "Coverage ($total_coverage%) is below the threshold (50%)"
if [[ -z "${total_coverage:-}" ]]; then
echo "total_coverage is unset; previous step likely failed"
exit 1
fi
awk -v cov="$total_coverage" 'BEGIN{ if (cov < 60) exit 1; else exit 0 }' || {
echo "Coverage ($total_coverage%) is below the threshold (60%)"
exit 1
}
build:
name: Build Binaries
@@ -90,12 +98,12 @@ jobs:
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Setup Go
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
uses: ./.github/actions/setup
with:
go-version-file: "./go.mod"
token: ${{ github.token }}
- name: Run go mod tidy
run: go mod tidy
- name: Download dependencies
run: go mod download
- name: Build binary for ${{ matrix.goos }}-${{ matrix.goarch }}
run: |
@@ -132,24 +140,24 @@ jobs:
- name: Checkout repository
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Download Linux binaries
uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0
- name: Setup Go
uses: ./.github/actions/setup
with:
name: gibidify-linux-amd64
path: .
token: ${{ github.token }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
- name: Log in to GitHub Container Registry
run: echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
- name: Build and push multi-arch Docker image
run: |
chmod +x gibidify-linux-amd64
mv gibidify-linux-amd64 gibidify
docker buildx build --platform linux/amd64,linux/arm64,linux/arm/v7 \
--tag ghcr.io/${{ github.repository }}/gibidify:${{ github.ref_name }} \
--tag ghcr.io/${{ github.repository }}/gibidify:latest \
--push \
--squash .
echo "${{ github.token }}" | docker login ghcr.io \
-u "$(echo "${{ github.actor }}" | tr '[:upper:]' '[:lower:]')" \
--password-stdin
- name: Build and push Docker image
run: |
repo="$(echo "${{ github.repository }}" | tr '[:upper:]' '[:lower:]')"
docker buildx build --platform linux/amd64 \
--tag "ghcr.io/${repo}/gibidify:${{ github.ref_name }}" \
--tag "ghcr.io/${repo}/gibidify:latest" \
--push .

39
.github/workflows/codeql.yml vendored Normal file
View File

@@ -0,0 +1,39 @@
name: CodeQL Analysis
on:
push:
branches: [main, develop]
pull_request:
branches: [main, develop]
permissions: {}
jobs:
analyze:
name: Analyze Code
runs-on: ubuntu-latest
permissions:
security-events: write
contents: read
actions: read
steps:
- name: Checkout repository
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Setup Go
uses: ./.github/actions/setup
with:
token: ${{ github.token }}
- name: Initialize CodeQL
uses: github/codeql-action/init@e296a935590eb16afc0c0108289f68c87e2a89a5 # v4.30.7
with:
languages: go
- name: Autobuild
uses: github/codeql-action/autobuild@e296a935590eb16afc0c0108289f68c87e2a89a5 # v4.30.7
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@e296a935590eb16afc0c0108289f68c87e2a89a5 # v4.30.7

View File

@@ -9,7 +9,7 @@ on:
pull_request:
branches: [master, main]
permissions: read-all
permissions: {}
jobs:
Linter:
@@ -21,7 +21,12 @@ jobs:
pull-requests: write
statuses: write
steps:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
- name: Checkout repository
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Setup Go
uses: ./.github/actions/setup
with:
token: ${{ secrets.GITHUB_TOKEN }}
token: ${{ github.token }}
- uses: ivuorinen/actions/pr-lint@dc895c40ffdce61ab057fb992f4e00f1efdcbcbf # 25.10.7

View File

@@ -7,45 +7,37 @@ on:
branches: [main, develop]
schedule:
# Run security scan weekly on Sundays at 00:00 UTC
- cron: '0 0 * * 0'
- cron: "0 0 * * 0"
permissions:
security-events: write
contents: read
actions: read
permissions: {}
jobs:
security:
name: Security Analysis
runs-on: ubuntu-latest
permissions:
security-events: write
contents: read
actions: read
steps:
- name: Checkout code
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
- name: Checkout repository
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Setup Go
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6
uses: ./.github/actions/setup
with:
go-version: '1.23'
- name: Cache Go modules
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4
with:
path: |
~/.cache/go-build
~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
token: ${{ github.token }}
# Security Scanning with gosec
- name: Run gosec Security Scanner
uses: securego/gosec@15d5c61e866bc2e2e8389376a31f1e5e09bde7d8 # v2.22.9
with:
args: '-fmt sarif -out gosec-results.sarif ./...'
args: "-fmt sarif -out gosec-results.sarif ./..."
- name: Upload gosec results to GitHub Security tab
uses: github/codeql-action/upload-sarif@df559355d593797519d70b90fc8edd5db049e7a2 # v3
uses: github/codeql-action/upload-sarif@e296a935590eb16afc0c0108289f68c87e2a89a5 # v4.30.7
if: always()
with:
sarif_file: gosec-results.sarif
@@ -60,24 +52,17 @@ jobs:
run: |
if [ -s govulncheck-results.json ]; then
echo "::warning::Vulnerability check completed. Check govulncheck-results.json for details."
if grep -q '"finding"' govulncheck-results.json; then
if grep -i -q '"finding"' govulncheck-results.json; then
echo "::error::Vulnerabilities found in dependencies!"
cat govulncheck-results.json
exit 1
fi
fi
# Additional Security Linting
- name: Run security-focused golangci-lint
run: |
go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest
golangci-lint run --enable=gosec,gocritic,bodyclose,rowserrcheck,misspell,unconvert,unparam,unused \
--timeout=5m
# Makefile Linting
- name: Run checkmake on Makefile
run: |
go install github.com/mrtazz/checkmake/cmd/checkmake@latest
go install github.com/checkmake/checkmake/cmd/checkmake@latest
checkmake --config=.checkmake Makefile
# Shell Script Formatting Check
@@ -86,27 +71,11 @@ jobs:
go install mvdan.cc/sh/v3/cmd/shfmt@latest
shfmt -d .
# YAML Linting
- name: Run YAML linting
run: |
go install github.com/excilsploft/yamllint@latest
yamllint -c .yamllint .
# Secrets Detection (basic patterns)
- name: Run secrets detection
run: |
echo "Scanning for potential secrets..."
# Look for common secret patterns
git log --all --full-history -- . | grep -i -E "(password|secret|key|token|api_key)" || true
find . -type f -name "*.go" -exec grep -H -i -E "(password|secret|key|token|api_key)\s*[:=]" {} \; || true
# Check for hardcoded IPs and URLs
- name: Check for hardcoded network addresses
run: |
echo "Scanning for hardcoded network addresses..."
find . -type f -name "*.go" -exec grep -H -E "([0-9]{1,3}\.){3}[0-9]{1,3}" {} \; || true
find . -type f -name "*.go" -exec grep -H -E "https?://[^/\s]+" {} \; | \
grep -v "example.com|localhost|127.0.0.1" || true
uses: ibiqlik/action-yamllint@2576378a8e339169678f9939646ee3ee325e845c # v3.1.1
with:
file_or_dir: .
strict: true
# Docker Security (if Dockerfile exists)
- name: Run Docker security scan
@@ -115,24 +84,9 @@ jobs:
docker run --rm -v "$PWD":/workspace \
aquasec/trivy:latest fs --security-checks vuln,config /workspace/Dockerfile || true
# SAST with CodeQL (if available)
- name: Initialize CodeQL
if: github.event_name != 'schedule'
uses: github/codeql-action/init@df559355d593797519d70b90fc8edd5db049e7a2 # v3
with:
languages: go
- name: Autobuild
if: github.event_name != 'schedule'
uses: github/codeql-action/autobuild@df559355d593797519d70b90fc8edd5db049e7a2 # v3
- name: Perform CodeQL Analysis
if: github.event_name != 'schedule'
uses: github/codeql-action/analyze@df559355d593797519d70b90fc8edd5db049e7a2 # v3
# Upload artifacts for review
- name: Upload security scan results
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
if: always()
with:
name: security-scan-results

5
.gitignore vendored
View File

@@ -12,3 +12,8 @@ megalinter-reports/*
coverage.*
*.out
gibidify-benchmark
gosec-report.json
gosec-results.sarif
govulncheck-report.json
govulncheck-errors.log
security-report.md

View File

@@ -1 +1 @@
1.23.0
1.25.1

View File

@@ -15,6 +15,9 @@ PRINT_ALPACA: false # Print Alpaca logo in console
SARIF_REPORTER: true # Generate SARIF report
SHOW_SKIPPED_LINTERS: false # Show skipped linters in MegaLinter log
GO_REVIVE_CLI_LINT_MODE: project
DISABLE_LINTERS:
- REPOSITORY_DEVSKIM
- REPOSITORY_TRIVY
- GO_GOLANGCI_LINT

View File

@@ -5,7 +5,7 @@ repos:
- id: golangci-lint
args: ["--timeout=5m"]
- repo: https://github.com/tekwizely/pre-commit-golang
rev: v1.0.0-rc.1
rev: v1.0.0-rc.2
hooks:
- id: go-build-mod
alias: build
@@ -13,3 +13,12 @@ repos:
alias: tidy
- id: go-fmt
alias: fmt
- repo: https://github.com/editorconfig-checker/editorconfig-checker.python
rev: 3.4.0
hooks:
- id: editorconfig-checker
alias: ec
- repo: https://github.com/shellcheck-py/shellcheck-py
rev: v0.11.0.1
hooks:
- id: shellcheck

1
.serena/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/cache

73
.serena/project.yml Normal file
View File

@@ -0,0 +1,73 @@
# language of the project (csharp, python, rust, java, typescript, go, cpp, or ruby)
# * For C, use cpp
# * For JavaScript, use typescript
# Special requirements:
# * csharp: Requires the presence of a .sln file in the project folder.
language: go
# whether to use the project's gitignore file to ignore files
# Added on 2025-04-07
ignore_all_files_in_gitignore: true
# list of additional paths to ignore
# same syntax as gitignore, so you can use * and **
# Was previously called `ignored_dirs`, please update your config if you are using that.
# Added (renamed) on 2025-04-07
ignored_paths: []
# whether the project is in read-only mode
# If set to true, all editing tools will be disabled and attempts to use them will result in an error
# Added on 2025-04-18
read_only: false
# list of tool names to exclude. We recommend not excluding any tools, see the readme for more details.
# Below is the complete list of tools for convenience.
# To make sure you have the latest list of tools, and to view their descriptions,
# execute `uv run scripts/print_tool_overview.py`.
#
# * `activate_project`: Activates a project by name.
# * `check_onboarding_performed`: Checks whether project onboarding was already performed.
# * `create_text_file`: Creates/overwrites a file in the project directory.
# * `delete_lines`: Deletes a range of lines within a file.
# * `delete_memory`: Deletes a memory from Serena's project-specific memory store.
# * `execute_shell_command`: Executes a shell command.
# * `find_referencing_code_snippets`: Finds code snippets in which the symbol at the given location is referenced.
# * `find_referencing_symbols`: Finds symbols that reference the symbol at the given location
# (optionally filtered by type).
# * `find_symbol`: Performs a global (or local) search for symbols with/containing a given
# name/substring (optionally filtered by type).
# * `get_current_config`: Prints the current configuration of the agent, including the active
# and available projects, tools, contexts, and modes.
# * `get_symbols_overview`: Gets an overview of the top-level symbols defined in a given file.
# * `initial_instructions`: Gets the initial instructions for the current project.
# Should only be used in settings where the system prompt cannot be set,
# e.g. in clients you have no control over, like Claude Desktop.
# * `insert_after_symbol`: Inserts content after the end of the definition of a given symbol.
# * `insert_at_line`: Inserts content at a given line in a file.
# * `insert_before_symbol`: Inserts content before the beginning of the definition of a given symbol.
# * `list_dir`: Lists files and directories in the given directory (optionally with recursion).
# * `list_memories`: Lists memories in Serena's project-specific memory store.
# * `onboarding`: Performs onboarding (identifying the project structure and essential tasks,
# e.g. for testing or building).
# * `prepare_for_new_conversation`: Provides instructions for preparing for a new conversation
# (in order to continue with the necessary context).
# * `read_file`: Reads a file within the project directory.
# * `read_memory`: Reads the memory with the given name from Serena's project-specific memory store.
# * `remove_project`: Removes a project from the Serena configuration.
# * `replace_lines`: Replaces a range of lines within a file with new content.
# * `replace_symbol_body`: Replaces the full definition of a symbol.
# * `restart_language_server`: Restarts the language server, may be necessary when edits not through Serena happen.
# * `search_for_pattern`: Performs a search for a pattern in the project.
# * `summarize_changes`: Provides instructions for summarizing the changes made to the codebase.
# * `switch_modes`: Activates modes by providing a list of their names
# * `think_about_collected_information`: Thinking tool for pondering the completeness of collected information.
# * `think_about_task_adherence`: Thinking tool for determining whether the agent is still on
# track with the current task.
# * `think_about_whether_you_are_done`: Thinking tool for determining whether the task is truly completed.
# * `write_memory`: Writes a named memory (for future reference) to Serena's project-specific memory store.
excluded_tools: []
# initial prompt for the project. It will always be given to the LLM upon activating the project
# (contrary to the memories, which are loaded on demand).
initial_prompt: ""
project_name: "gibidify"

View File

@@ -44,4 +44,7 @@ EditorConfig (LF, tabs), semantic commits, testing required
## Workflow
1. `make lint-fix` first 2. >80% coverage 3. Follow patterns 4. Update docs
1. `make lint-fix` first
2. >80% coverage
3. Follow patterns
4. Update docs

View File

@@ -1,17 +1,38 @@
# Use a minimal base image
# Build stage - builds the binary for the target architecture
FROM --platform=$BUILDPLATFORM golang:1.25.1-alpine AS builder
# Build arguments automatically set by buildx
ARG TARGETOS
ARG TARGETARCH
ARG TARGETVARIANT
WORKDIR /build
# Copy go mod files first for better layer caching
COPY go.mod go.sum ./
RUN go mod download
# Copy source code
COPY . .
# Build the binary for the target platform
RUN CGO_ENABLED=0 GOOS=${TARGETOS} GOARCH=${TARGETARCH} \
go build -ldflags="-s -w" -o gibidify .
# Runtime stage - minimal image with the binary
FROM alpine:3.22.1
# Add user
RUN useradd -ms /bin/bash gibidify
# Install ca-certificates for HTTPS and create non-root user
# hadolint ignore=DL3018
# kics-scan ignore-line
RUN apk add --no-cache ca-certificates && \
adduser -D -s /bin/sh gibidify
# Use the new user
# Copy the binary from builder
COPY --from=builder /build/gibidify /usr/local/bin/gibidify
# Use non-root user
USER gibidify
# Copy the gibidify binary into the container
COPY gibidify /usr/local/bin/gibidify
# Ensure the binary is executable
RUN chmod +x /usr/local/bin/gibidify
# Set the entrypoint
ENTRYPOINT ["/usr/local/bin/gibidify"]

View File

@@ -1,4 +1,8 @@
.PHONY: help install-tools lint lint-fix lint-verbose test coverage build clean all build-benchmark benchmark benchmark-collection benchmark-processing benchmark-concurrency benchmark-format security security-full vuln-check check-all dev-setup
.PHONY: all clean test test-coverage build coverage help lint lint-fix \
lint-verbose install-tools benchmark benchmark-collection \
benchmark-concurrency benchmark-format benchmark-processing \
build-benchmark check-all ci-lint ci-test dev-setup security \
security-full vuln-check deps-update deps-check deps-tidy
# Default target shows help
.DEFAULT_GOAL := help
@@ -16,6 +20,8 @@ install-tools:
@go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest
@echo "Installing gofumpt..."
@go install mvdan.cc/gofumpt@latest
@echo "Installing golines..."
@go install github.com/segmentio/golines@latest
@echo "Installing goimports..."
@go install golang.org/x/tools/cmd/goimports@latest
@echo "Installing staticcheck..."
@@ -24,12 +30,19 @@ install-tools:
@go install github.com/securego/gosec/v2/cmd/gosec@latest
@echo "Installing gocyclo..."
@go install github.com/fzipp/gocyclo/cmd/gocyclo@latest
@echo "Installing revive..."
@go install github.com/mgechev/revive@latest
@echo "Installing checkmake..."
@go install github.com/mrtazz/checkmake/cmd/checkmake@latest
@go install github.com/checkmake/checkmake/cmd/checkmake@latest
@echo "Installing shellcheck..."
@go install github.com/koalaman/shellcheck/cmd/shellcheck@latest
@echo "Installing shfmt..."
@go install mvdan.cc/sh/v3/cmd/shfmt@latest
@echo "Installing yamllint (Go-based)..."
@go install github.com/excilsploft/yamllint@latest
@echo "Installing editorconfig-checker..."
@go install github.com/editorconfig-checker/editorconfig-checker/\
cmd/editorconfig-checker@latest
@echo "All tools installed successfully!"
# Run linters
@@ -40,6 +53,8 @@ lint:
lint-fix:
@echo "Running gofumpt..."
@gofumpt -l -w .
@echo "Running golines..."
@golines -w -m 120 --base-formatter="gofumpt" --shorten-comments .
@echo "Running goimports..."
@goimports -w -local github.com/ivuorinen/gibidify .
@echo "Running go fmt..."
@@ -47,32 +62,46 @@ lint-fix:
@echo "Running go mod tidy..."
@go mod tidy
@echo "Running shfmt formatting..."
@shfmt -w -i 2 -ci .
@shfmt -w -i 0 -ci .
@echo "Running golangci-lint with --fix..."
@golangci-lint run --fix ./...
@echo "Auto-fix completed. Running final lint check..."
@golangci-lint run ./...
@echo "Running revive..."
@revive -config revive.toml -formatter friendly ./...
@echo "Running checkmake..."
@checkmake --config=.checkmake Makefile
@echo "Running yamllint..."
@yamllint -c .yamllint .
@yamllint .
# Run linters with verbose output
lint-verbose:
@echo "Running golangci-lint (verbose)..."
@golangci-lint run -v ./...
@echo "Running checkmake (verbose)..."
@checkmake --config=.checkmake --format="{{.Line}}:{{.Rule}}:{{.Violation}}" Makefile
@checkmake --config=.checkmake \
--format="{{.Line}}:{{.Rule}}:{{.Violation}}" Makefile
@echo "Running shfmt check (verbose)..."
@shfmt -d .
@echo "Running yamllint (verbose)..."
@yamllint -c .yamllint -f parsable .
@yamllint .
# Run tests
test:
@echo "Running tests..."
@go test -race -v ./...
# Run tests with coverage output
test-coverage:
@echo "Running tests with coverage..."
@go test -race -v -coverprofile=coverage.out -covermode=atomic ./...
@echo ""
@echo "Coverage summary:"
@go tool cover -func=coverage.out | grep total:
@echo ""
@echo "Full coverage report saved to: coverage.out"
@echo "To view HTML report, run: make coverage"
# Run tests with coverage
coverage:
@echo "Running tests with coverage..."
@@ -94,8 +123,6 @@ clean:
@echo "Clean complete"
# CI-specific targets
.PHONY: ci-lint ci-test
ci-lint:
@golangci-lint run --out-format=github-actions ./...
@@ -138,10 +165,34 @@ security:
security-full:
@echo "Running full security analysis..."
@./scripts/security-scan.sh
@echo "Running additional security checks..."
@golangci-lint run --enable-all --disable=depguard,exhaustruct,ireturn,varnamelen,wrapcheck --timeout=10m
vuln-check:
@echo "Checking for dependency vulnerabilities..."
@go install golang.org/x/vuln/cmd/govulncheck@latest
@govulncheck ./...
# Dependency management targets
deps-check:
@echo "Checking for available dependency updates..."
@echo ""
@echo "Direct dependencies:"
@go list -u -m all | grep -v "indirect" | column -t
@echo ""
@echo "Note: Run 'make deps-update' to update all dependencies"
deps-update:
@echo "Updating all dependencies to latest versions..."
@go get -u ./...
@go mod tidy
@echo ""
@echo "Dependencies updated successfully!"
@echo "Running tests to verify compatibility..."
@go test ./...
@echo ""
@echo "Update complete. Run 'make lint-fix && make test' to verify."
deps-tidy:
@echo "Cleaning up dependencies..."
@go mod tidy
@go mod verify
@echo "Dependencies cleaned and verified successfully!"

View File

@@ -12,11 +12,11 @@ import (
"github.com/ivuorinen/gibidify/config"
"github.com/ivuorinen/gibidify/fileproc"
"github.com/ivuorinen/gibidify/utils"
"github.com/ivuorinen/gibidify/gibidiutils"
)
// BenchmarkResult represents the results of a benchmark run.
type BenchmarkResult struct {
// Result represents the results of a benchmark run.
type Result struct {
Name string
Duration time.Duration
FilesProcessed int
@@ -42,14 +42,14 @@ type CPUStats struct {
Goroutines int
}
// BenchmarkSuite represents a collection of benchmarks.
type BenchmarkSuite struct {
// Suite represents a collection of benchmarks.
type Suite struct {
Name string
Results []BenchmarkResult
Results []Result
}
// FileCollectionBenchmark benchmarks file collection operations.
func FileCollectionBenchmark(sourceDir string, numFiles int) (*BenchmarkResult, error) {
func FileCollectionBenchmark(sourceDir string, numFiles int) (*Result, error) {
// Load configuration to ensure proper file filtering
config.LoadConfig()
@@ -58,7 +58,12 @@ func FileCollectionBenchmark(sourceDir string, numFiles int) (*BenchmarkResult,
if sourceDir == "" {
tempDir, cleanupFunc, err := createBenchmarkFiles(numFiles)
if err != nil {
return nil, utils.WrapError(err, utils.ErrorTypeFileSystem, utils.CodeFSAccess, "failed to create benchmark files")
return nil, gibidiutils.WrapError(
err,
gibidiutils.ErrorTypeFileSystem,
gibidiutils.CodeFSAccess,
"failed to create benchmark files",
)
}
cleanup = cleanupFunc
defer cleanup()
@@ -74,7 +79,12 @@ func FileCollectionBenchmark(sourceDir string, numFiles int) (*BenchmarkResult,
// Run the file collection benchmark
files, err := fileproc.CollectFiles(sourceDir)
if err != nil {
return nil, utils.WrapError(err, utils.ErrorTypeProcessing, utils.CodeProcessingCollection, "benchmark file collection failed")
return nil, gibidiutils.WrapError(
err,
gibidiutils.ErrorTypeProcessing,
gibidiutils.CodeProcessingCollection,
"benchmark file collection failed",
)
}
duration := time.Since(startTime)
@@ -91,7 +101,7 @@ func FileCollectionBenchmark(sourceDir string, numFiles int) (*BenchmarkResult,
}
}
result := &BenchmarkResult{
result := &Result{
Name: "FileCollection",
Duration: duration,
FilesProcessed: len(files),
@@ -113,7 +123,9 @@ func FileCollectionBenchmark(sourceDir string, numFiles int) (*BenchmarkResult,
}
// FileProcessingBenchmark benchmarks full file processing pipeline.
func FileProcessingBenchmark(sourceDir string, format string, concurrency int) (*BenchmarkResult, error) {
//
//revive:disable-next-line:function-length
func FileProcessingBenchmark(sourceDir string, format string, concurrency int) (*Result, error) {
// Load configuration to ensure proper file filtering
config.LoadConfig()
@@ -122,7 +134,12 @@ func FileProcessingBenchmark(sourceDir string, format string, concurrency int) (
// Create temporary directory with test files
tempDir, cleanupFunc, err := createBenchmarkFiles(100)
if err != nil {
return nil, utils.WrapError(err, utils.ErrorTypeFileSystem, utils.CodeFSAccess, "failed to create benchmark files")
return nil, gibidiutils.WrapError(
err,
gibidiutils.ErrorTypeFileSystem,
gibidiutils.CodeFSAccess,
"failed to create benchmark files",
)
}
cleanup = cleanupFunc
defer cleanup()
@@ -132,7 +149,12 @@ func FileProcessingBenchmark(sourceDir string, format string, concurrency int) (
// Create temporary output file
outputFile, err := os.CreateTemp("", "benchmark_output_*."+format)
if err != nil {
return nil, utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIOFileCreate, "failed to create benchmark output file")
return nil, gibidiutils.WrapError(
err,
gibidiutils.ErrorTypeIO,
gibidiutils.CodeIOFileCreate,
"failed to create benchmark output file",
)
}
defer func() {
if err := outputFile.Close(); err != nil {
@@ -154,13 +176,29 @@ func FileProcessingBenchmark(sourceDir string, format string, concurrency int) (
// Run the full processing pipeline
files, err := fileproc.CollectFiles(sourceDir)
if err != nil {
return nil, utils.WrapError(err, utils.ErrorTypeProcessing, utils.CodeProcessingCollection, "benchmark file collection failed")
return nil, gibidiutils.WrapError(
err,
gibidiutils.ErrorTypeProcessing,
gibidiutils.CodeProcessingCollection,
"benchmark file collection failed",
)
}
// Process files with concurrency
err = runProcessingPipeline(context.Background(), files, outputFile, format, concurrency, sourceDir)
err = runProcessingPipeline(context.Background(), processingConfig{
files: files,
outputFile: outputFile,
format: format,
concurrency: concurrency,
sourceDir: sourceDir,
})
if err != nil {
return nil, utils.WrapError(err, utils.ErrorTypeProcessing, utils.CodeProcessingFileRead, "benchmark processing pipeline failed")
return nil, gibidiutils.WrapError(
err,
gibidiutils.ErrorTypeProcessing,
gibidiutils.CodeProcessingFileRead,
"benchmark processing pipeline failed",
)
}
duration := time.Since(startTime)
@@ -177,7 +215,7 @@ func FileProcessingBenchmark(sourceDir string, format string, concurrency int) (
}
}
result := &BenchmarkResult{
result := &Result{
Name: fmt.Sprintf("FileProcessing_%s_c%d", format, concurrency),
Duration: duration,
FilesProcessed: len(files),
@@ -199,16 +237,22 @@ func FileProcessingBenchmark(sourceDir string, format string, concurrency int) (
}
// ConcurrencyBenchmark benchmarks different concurrency levels.
func ConcurrencyBenchmark(sourceDir string, format string, concurrencyLevels []int) (*BenchmarkSuite, error) {
suite := &BenchmarkSuite{
func ConcurrencyBenchmark(sourceDir string, format string, concurrencyLevels []int) (*Suite, error) {
suite := &Suite{
Name: "ConcurrencyBenchmark",
Results: make([]BenchmarkResult, 0, len(concurrencyLevels)),
Results: make([]Result, 0, len(concurrencyLevels)),
}
for _, concurrency := range concurrencyLevels {
result, err := FileProcessingBenchmark(sourceDir, format, concurrency)
if err != nil {
return nil, utils.WrapErrorf(err, utils.ErrorTypeProcessing, utils.CodeProcessingCollection, "concurrency benchmark failed for level %d", concurrency)
return nil, gibidiutils.WrapErrorf(
err,
gibidiutils.ErrorTypeProcessing,
gibidiutils.CodeProcessingCollection,
"concurrency benchmark failed for level %d",
concurrency,
)
}
suite.Results = append(suite.Results, *result)
}
@@ -217,16 +261,22 @@ func ConcurrencyBenchmark(sourceDir string, format string, concurrencyLevels []i
}
// FormatBenchmark benchmarks different output formats.
func FormatBenchmark(sourceDir string, formats []string) (*BenchmarkSuite, error) {
suite := &BenchmarkSuite{
func FormatBenchmark(sourceDir string, formats []string) (*Suite, error) {
suite := &Suite{
Name: "FormatBenchmark",
Results: make([]BenchmarkResult, 0, len(formats)),
Results: make([]Result, 0, len(formats)),
}
for _, format := range formats {
result, err := FileProcessingBenchmark(sourceDir, format, runtime.NumCPU())
if err != nil {
return nil, utils.WrapErrorf(err, utils.ErrorTypeProcessing, utils.CodeProcessingCollection, "format benchmark failed for format %s", format)
return nil, gibidiutils.WrapErrorf(
err,
gibidiutils.ErrorTypeProcessing,
gibidiutils.CodeProcessingCollection,
"format benchmark failed for format %s",
format,
)
}
suite.Results = append(suite.Results, *result)
}
@@ -238,7 +288,12 @@ func FormatBenchmark(sourceDir string, formats []string) (*BenchmarkSuite, error
func createBenchmarkFiles(numFiles int) (string, func(), error) {
tempDir, err := os.MkdirTemp("", "gibidify_benchmark_*")
if err != nil {
return "", nil, utils.WrapError(err, utils.ErrorTypeFileSystem, utils.CodeFSAccess, "failed to create temp directory")
return "", nil, gibidiutils.WrapError(
err,
gibidiutils.ErrorTypeFileSystem,
gibidiutils.CodeFSAccess,
"failed to create temp directory",
)
}
cleanup := func() {
@@ -256,8 +311,15 @@ func createBenchmarkFiles(numFiles int) (string, func(), error) {
{".go", "package main\n\nfunc main() {\n\tprintln(\"Hello, World!\")\n}"},
{".js", "console.log('Hello, World!');"},
{".py", "print('Hello, World!')"},
{".java", "public class Hello {\n\tpublic static void main(String[] args) {\n\t\tSystem.out.println(\"Hello, World!\");\n\t}\n}"},
{".cpp", "#include <iostream>\n\nint main() {\n\tstd::cout << \"Hello, World!\" << std::endl;\n\treturn 0;\n}"},
{
".java",
"public class Hello {\n\tpublic static void main(String[] args) {" +
"\n\t\tSystem.out.println(\"Hello, World!\");\n\t}\n}",
},
{
".cpp",
"#include <iostream>\n\nint main() {\n\tstd::cout << \"Hello, World!\" << std::endl;\n\treturn 0;\n}",
},
{".rs", "fn main() {\n\tprintln!(\"Hello, World!\");\n}"},
{".rb", "puts 'Hello, World!'"},
{".php", "<?php\necho 'Hello, World!';\n?>"},
@@ -272,9 +334,14 @@ func createBenchmarkFiles(numFiles int) (string, func(), error) {
// Create subdirectories for some files
if i%10 == 0 {
subdir := filepath.Join(tempDir, fmt.Sprintf("subdir_%d", i/10))
if err := os.MkdirAll(subdir, 0o755); err != nil {
if err := os.MkdirAll(subdir, 0o750); err != nil {
cleanup()
return "", nil, utils.WrapError(err, utils.ErrorTypeFileSystem, utils.CodeFSAccess, "failed to create subdirectory")
return "", nil, gibidiutils.WrapError(
err,
gibidiutils.ErrorTypeFileSystem,
gibidiutils.CodeFSAccess,
"failed to create subdirectory",
)
}
filename = filepath.Join(subdir, filename)
} else {
@@ -287,9 +354,14 @@ func createBenchmarkFiles(numFiles int) (string, func(), error) {
content += fmt.Sprintf("// Line %d\n%s\n", j, fileType.content)
}
if err := os.WriteFile(filename, []byte(content), 0o644); err != nil {
if err := os.WriteFile(filename, []byte(content), 0o600); err != nil {
cleanup()
return "", nil, utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIOFileWrite, "failed to write benchmark file")
return "", nil, gibidiutils.WrapError(
err,
gibidiutils.ErrorTypeIO,
gibidiutils.CodeIOFileWrite,
"failed to write benchmark file",
)
}
}
@@ -297,23 +369,41 @@ func createBenchmarkFiles(numFiles int) (string, func(), error) {
}
// runProcessingPipeline runs the processing pipeline similar to main.go.
func runProcessingPipeline(ctx context.Context, files []string, outputFile *os.File, format string, concurrency int, sourceDir string) error {
fileCh := make(chan string, concurrency)
writeCh := make(chan fileproc.WriteRequest, concurrency)
// processingConfig holds configuration for processing pipeline.
type processingConfig struct {
files []string
outputFile *os.File
format string
concurrency int
sourceDir string
}
func runProcessingPipeline(ctx context.Context, config processingConfig) error {
fileCh := make(chan string, config.concurrency)
writeCh := make(chan fileproc.WriteRequest, config.concurrency)
writerDone := make(chan struct{})
// Start writer
go fileproc.StartWriter(outputFile, writeCh, writerDone, format, "", "")
go fileproc.StartWriter(config.outputFile, writeCh, writerDone, fileproc.WriterConfig{
Format: config.format,
Prefix: "",
Suffix: "",
})
// Get absolute path once
absRoot, err := utils.GetAbsolutePath(sourceDir)
absRoot, err := gibidiutils.GetAbsolutePath(config.sourceDir)
if err != nil {
return utils.WrapError(err, utils.ErrorTypeFileSystem, utils.CodeFSPathResolution, "failed to get absolute path for source directory")
return gibidiutils.WrapError(
err,
gibidiutils.ErrorTypeFileSystem,
gibidiutils.CodeFSPathResolution,
"failed to get absolute path for source directory",
)
}
// Start workers with proper synchronization
var workersDone sync.WaitGroup
for i := 0; i < concurrency; i++ {
for i := 0; i < config.concurrency; i++ {
workersDone.Add(1)
go func() {
defer workersDone.Done()
@@ -324,7 +414,7 @@ func runProcessingPipeline(ctx context.Context, files []string, outputFile *os.F
}
// Send files to workers
for _, file := range files {
for _, file := range config.files {
select {
case <-ctx.Done():
close(fileCh)
@@ -347,8 +437,8 @@ func runProcessingPipeline(ctx context.Context, files []string, outputFile *os.F
return nil
}
// PrintBenchmarkResult prints a formatted benchmark result.
func PrintBenchmarkResult(result *BenchmarkResult) {
// PrintResult prints a formatted benchmark result.
func PrintResult(result *Result) {
fmt.Printf("=== %s ===\n", result.Name)
fmt.Printf("Duration: %v\n", result.Duration)
fmt.Printf("Files Processed: %d\n", result.FilesProcessed)
@@ -356,16 +446,17 @@ func PrintBenchmarkResult(result *BenchmarkResult) {
fmt.Printf("Files/sec: %.2f\n", result.FilesPerSecond)
fmt.Printf("Bytes/sec: %.2f MB/sec\n", result.BytesPerSecond/1024/1024)
fmt.Printf("Memory Usage: +%.2f MB (Sys: +%.2f MB)\n", result.MemoryUsage.AllocMB, result.MemoryUsage.SysMB)
fmt.Printf("GC Runs: %d (Pause: %v)\n", result.MemoryUsage.NumGC, time.Duration(result.MemoryUsage.PauseTotalNs))
pauseDuration := time.Duration(gibidiutils.SafeUint64ToInt64WithDefault(result.MemoryUsage.PauseTotalNs, 0))
fmt.Printf("GC Runs: %d (Pause: %v)\n", result.MemoryUsage.NumGC, pauseDuration)
fmt.Printf("Goroutines: %d\n", result.CPUUsage.Goroutines)
fmt.Println()
}
// PrintBenchmarkSuite prints all results in a benchmark suite.
func PrintBenchmarkSuite(suite *BenchmarkSuite) {
// PrintSuite prints all results in a benchmark suite.
func PrintSuite(suite *Suite) {
fmt.Printf("=== %s ===\n", suite.Name)
for _, result := range suite.Results {
PrintBenchmarkResult(&result)
for i := range suite.Results {
PrintResult(&suite.Results[i])
}
}
@@ -380,26 +471,41 @@ func RunAllBenchmarks(sourceDir string) error {
fmt.Println("Running file collection benchmark...")
result, err := FileCollectionBenchmark(sourceDir, 1000)
if err != nil {
return utils.WrapError(err, utils.ErrorTypeProcessing, utils.CodeProcessingCollection, "file collection benchmark failed")
return gibidiutils.WrapError(
err,
gibidiutils.ErrorTypeProcessing,
gibidiutils.CodeProcessingCollection,
"file collection benchmark failed",
)
}
PrintBenchmarkResult(result)
PrintResult(result)
// Format benchmarks
fmt.Println("Running format benchmarks...")
formatSuite, err := FormatBenchmark(sourceDir, []string{"json", "yaml", "markdown"})
if err != nil {
return utils.WrapError(err, utils.ErrorTypeProcessing, utils.CodeProcessingCollection, "format benchmark failed")
return gibidiutils.WrapError(
err,
gibidiutils.ErrorTypeProcessing,
gibidiutils.CodeProcessingCollection,
"format benchmark failed",
)
}
PrintBenchmarkSuite(formatSuite)
PrintSuite(formatSuite)
// Concurrency benchmarks
fmt.Println("Running concurrency benchmarks...")
concurrencyLevels := []int{1, 2, 4, 8, runtime.NumCPU()}
concurrencySuite, err := ConcurrencyBenchmark(sourceDir, "json", concurrencyLevels)
if err != nil {
return utils.WrapError(err, utils.ErrorTypeProcessing, utils.CodeProcessingCollection, "concurrency benchmark failed")
return gibidiutils.WrapError(
err,
gibidiutils.ErrorTypeProcessing,
gibidiutils.CodeProcessingCollection,
"concurrency benchmark failed",
)
}
PrintBenchmarkSuite(concurrencySuite)
PrintSuite(concurrencySuite)
return nil
}

View File

@@ -1,3 +1,4 @@
// Package cli provides command-line interface utilities for gibidify.
package cli
import (
@@ -6,7 +7,7 @@ import (
"path/filepath"
"strings"
"github.com/ivuorinen/gibidify/utils"
"github.com/ivuorinen/gibidify/gibidiutils"
)
// ErrorFormatter handles CLI-friendly error formatting with suggestions.
@@ -19,6 +20,11 @@ func NewErrorFormatter(ui *UIManager) *ErrorFormatter {
return &ErrorFormatter{ui: ui}
}
// Suggestion messages for error formatting.
const (
suggestionCheckPermissions = " %s Check file/directory permissions\n"
)
// FormatError formats an error with context and suggestions.
func (ef *ErrorFormatter) FormatError(err error) {
if err == nil {
@@ -26,7 +32,8 @@ func (ef *ErrorFormatter) FormatError(err error) {
}
// Handle structured errors
if structErr, ok := err.(*utils.StructuredError); ok {
var structErr *gibidiutils.StructuredError
if errors.As(err, &structErr) {
ef.formatStructuredError(structErr)
return
}
@@ -36,12 +43,12 @@ func (ef *ErrorFormatter) FormatError(err error) {
}
// formatStructuredError formats a structured error with context and suggestions.
func (ef *ErrorFormatter) formatStructuredError(err *utils.StructuredError) {
func (ef *ErrorFormatter) formatStructuredError(err *gibidiutils.StructuredError) {
// Print main error
ef.ui.PrintError("Error: %s", err.Message)
// Print error type and code
if err.Type != utils.ErrorTypeUnknown || err.Code != "" {
if err.Type != gibidiutils.ErrorTypeUnknown || err.Code != "" {
ef.ui.PrintInfo("Type: %s, Code: %s", err.Type.String(), err.Code)
}
@@ -69,15 +76,15 @@ func (ef *ErrorFormatter) formatGenericError(err error) {
}
// provideSuggestions provides helpful suggestions based on the error.
func (ef *ErrorFormatter) provideSuggestions(err *utils.StructuredError) {
func (ef *ErrorFormatter) provideSuggestions(err *gibidiutils.StructuredError) {
switch err.Type {
case utils.ErrorTypeFileSystem:
case gibidiutils.ErrorTypeFileSystem:
ef.provideFileSystemSuggestions(err)
case utils.ErrorTypeValidation:
case gibidiutils.ErrorTypeValidation:
ef.provideValidationSuggestions(err)
case utils.ErrorTypeProcessing:
case gibidiutils.ErrorTypeProcessing:
ef.provideProcessingSuggestions(err)
case utils.ErrorTypeIO:
case gibidiutils.ErrorTypeIO:
ef.provideIOSuggestions(err)
default:
ef.provideDefaultSuggestions()
@@ -85,17 +92,17 @@ func (ef *ErrorFormatter) provideSuggestions(err *utils.StructuredError) {
}
// provideFileSystemSuggestions provides suggestions for file system errors.
func (ef *ErrorFormatter) provideFileSystemSuggestions(err *utils.StructuredError) {
func (ef *ErrorFormatter) provideFileSystemSuggestions(err *gibidiutils.StructuredError) {
filePath := err.FilePath
ef.ui.PrintWarning("Suggestions:")
switch err.Code {
case utils.CodeFSAccess:
case gibidiutils.CodeFSAccess:
ef.suggestFileAccess(filePath)
case utils.CodeFSPathResolution:
case gibidiutils.CodeFSPathResolution:
ef.suggestPathResolution(filePath)
case utils.CodeFSNotFound:
case gibidiutils.CodeFSNotFound:
ef.suggestFileNotFound(filePath)
default:
ef.suggestFileSystemGeneral(filePath)
@@ -103,91 +110,91 @@ func (ef *ErrorFormatter) provideFileSystemSuggestions(err *utils.StructuredErro
}
// provideValidationSuggestions provides suggestions for validation errors.
func (ef *ErrorFormatter) provideValidationSuggestions(err *utils.StructuredError) {
func (ef *ErrorFormatter) provideValidationSuggestions(err *gibidiutils.StructuredError) {
ef.ui.PrintWarning("Suggestions:")
switch err.Code {
case utils.CodeValidationFormat:
ef.ui.printf(" Use a supported format: markdown, json, yaml\n")
ef.ui.printf(" Example: -format markdown\n")
case utils.CodeValidationSize:
ef.ui.printf(" Increase file size limit in config.yaml\n")
ef.ui.printf(" Use smaller files or exclude large files\n")
case gibidiutils.CodeValidationFormat:
ef.ui.printf(" %s Use a supported format: markdown, json, yaml\n", gibidiutils.IconBullet)
ef.ui.printf(" %s Example: -format markdown\n", gibidiutils.IconBullet)
case gibidiutils.CodeValidationSize:
ef.ui.printf(" %s Increase file size limit in config.yaml\n", gibidiutils.IconBullet)
ef.ui.printf(" %s Use smaller files or exclude large files\n", gibidiutils.IconBullet)
default:
ef.ui.printf(" Check your command line arguments\n")
ef.ui.printf(" Run with --help for usage information\n")
ef.ui.printf(" %s Check your command line arguments\n", gibidiutils.IconBullet)
ef.ui.printf(" %s Run with --help for usage information\n", gibidiutils.IconBullet)
}
}
// provideProcessingSuggestions provides suggestions for processing errors.
func (ef *ErrorFormatter) provideProcessingSuggestions(err *utils.StructuredError) {
func (ef *ErrorFormatter) provideProcessingSuggestions(err *gibidiutils.StructuredError) {
ef.ui.PrintWarning("Suggestions:")
switch err.Code {
case utils.CodeProcessingCollection:
ef.ui.printf(" Check if the source directory exists and is readable\n")
ef.ui.printf(" Verify directory permissions\n")
case utils.CodeProcessingFileRead:
ef.ui.printf(" Check file permissions\n")
ef.ui.printf(" Verify the file is not corrupted\n")
case gibidiutils.CodeProcessingCollection:
ef.ui.printf(" %s Check if the source directory exists and is readable\n", gibidiutils.IconBullet)
ef.ui.printf(" %s Verify directory permissions\n", gibidiutils.IconBullet)
case gibidiutils.CodeProcessingFileRead:
ef.ui.printf(" %s Check file permissions\n", gibidiutils.IconBullet)
ef.ui.printf(" %s Verify the file is not corrupted\n", gibidiutils.IconBullet)
default:
ef.ui.printf(" Try reducing concurrency: -concurrency 1\n")
ef.ui.printf(" Check available system resources\n")
ef.ui.printf(" %s Try reducing concurrency: -concurrency 1\n", gibidiutils.IconBullet)
ef.ui.printf(" %s Check available system resources\n", gibidiutils.IconBullet)
}
}
// provideIOSuggestions provides suggestions for I/O errors.
func (ef *ErrorFormatter) provideIOSuggestions(err *utils.StructuredError) {
func (ef *ErrorFormatter) provideIOSuggestions(err *gibidiutils.StructuredError) {
ef.ui.PrintWarning("Suggestions:")
switch err.Code {
case utils.CodeIOFileCreate:
ef.ui.printf(" Check if the destination directory exists\n")
ef.ui.printf(" Verify write permissions for the output file\n")
ef.ui.printf(" Ensure sufficient disk space\n")
case utils.CodeIOWrite:
ef.ui.printf(" Check available disk space\n")
ef.ui.printf(" Verify write permissions\n")
case gibidiutils.CodeIOFileCreate:
ef.ui.printf(" %s Check if the destination directory exists\n", gibidiutils.IconBullet)
ef.ui.printf(" %s Verify write permissions for the output file\n", gibidiutils.IconBullet)
ef.ui.printf(" %s Ensure sufficient disk space\n", gibidiutils.IconBullet)
case gibidiutils.CodeIOWrite:
ef.ui.printf(" %s Check available disk space\n", gibidiutils.IconBullet)
ef.ui.printf(" %s Verify write permissions\n", gibidiutils.IconBullet)
default:
ef.ui.printf(" • Check file/directory permissions\n")
ef.ui.printf(" Verify available disk space\n")
ef.ui.printf(suggestionCheckPermissions, gibidiutils.IconBullet)
ef.ui.printf(" %s Verify available disk space\n", gibidiutils.IconBullet)
}
}
// Helper methods for specific suggestions
func (ef *ErrorFormatter) suggestFileAccess(filePath string) {
ef.ui.printf(" Check if the path exists: %s\n", filePath)
ef.ui.printf(" Verify read permissions\n")
ef.ui.printf(" %s Check if the path exists: %s\n", gibidiutils.IconBullet, filePath)
ef.ui.printf(" %s Verify read permissions\n", gibidiutils.IconBullet)
if filePath != "" {
if stat, err := os.Stat(filePath); err == nil {
ef.ui.printf(" Path exists but may not be accessible\n")
ef.ui.printf(" Mode: %s\n", stat.Mode())
ef.ui.printf(" %s Path exists but may not be accessible\n", gibidiutils.IconBullet)
ef.ui.printf(" %s Mode: %s\n", gibidiutils.IconBullet, stat.Mode())
}
}
}
func (ef *ErrorFormatter) suggestPathResolution(filePath string) {
ef.ui.printf(" Use an absolute path instead of relative\n")
ef.ui.printf(" %s Use an absolute path instead of relative\n", gibidiutils.IconBullet)
if filePath != "" {
if abs, err := filepath.Abs(filePath); err == nil {
ef.ui.printf(" Try: %s\n", abs)
ef.ui.printf(" %s Try: %s\n", gibidiutils.IconBullet, abs)
}
}
}
func (ef *ErrorFormatter) suggestFileNotFound(filePath string) {
ef.ui.printf(" Check if the file/directory exists: %s\n", filePath)
ef.ui.printf(" %s Check if the file/directory exists: %s\n", gibidiutils.IconBullet, filePath)
if filePath != "" {
dir := filepath.Dir(filePath)
if entries, err := os.ReadDir(dir); err == nil {
ef.ui.printf(" Similar files in %s:\n", dir)
ef.ui.printf(" %s Similar files in %s:\n", gibidiutils.IconBullet, dir)
count := 0
for _, entry := range entries {
if count >= 3 {
break
}
if strings.Contains(entry.Name(), filepath.Base(filePath)) {
ef.ui.printf(" - %s\n", entry.Name())
ef.ui.printf(" %s %s\n", gibidiutils.IconBullet, entry.Name())
count++
}
}
@@ -196,18 +203,18 @@ func (ef *ErrorFormatter) suggestFileNotFound(filePath string) {
}
func (ef *ErrorFormatter) suggestFileSystemGeneral(filePath string) {
ef.ui.printf(" • Check file/directory permissions\n")
ef.ui.printf(" Verify the path is correct\n")
ef.ui.printf(suggestionCheckPermissions, gibidiutils.IconBullet)
ef.ui.printf(" %s Verify the path is correct\n", gibidiutils.IconBullet)
if filePath != "" {
ef.ui.printf(" Path: %s\n", filePath)
ef.ui.printf(" %s Path: %s\n", gibidiutils.IconBullet, filePath)
}
}
// provideDefaultSuggestions provides general suggestions.
func (ef *ErrorFormatter) provideDefaultSuggestions() {
ef.ui.printf(" Check your command line arguments\n")
ef.ui.printf(" Run with --help for usage information\n")
ef.ui.printf(" Try with -concurrency 1 to reduce resource usage\n")
ef.ui.printf(" %s Check your command line arguments\n", gibidiutils.IconBullet)
ef.ui.printf(" %s Run with --help for usage information\n", gibidiutils.IconBullet)
ef.ui.printf(" %s Try with -concurrency 1 to reduce resource usage\n", gibidiutils.IconBullet)
}
// provideGenericSuggestions provides suggestions for generic errors.
@@ -219,14 +226,14 @@ func (ef *ErrorFormatter) provideGenericSuggestions(err error) {
// Pattern matching for common errors
switch {
case strings.Contains(errorMsg, "permission denied"):
ef.ui.printf(" • Check file/directory permissions\n")
ef.ui.printf(" Try running with appropriate privileges\n")
ef.ui.printf(suggestionCheckPermissions, gibidiutils.IconBullet)
ef.ui.printf(" %s Try running with appropriate privileges\n", gibidiutils.IconBullet)
case strings.Contains(errorMsg, "no such file or directory"):
ef.ui.printf(" Verify the file/directory path is correct\n")
ef.ui.printf(" Check if the file exists\n")
ef.ui.printf(" %s Verify the file/directory path is correct\n", gibidiutils.IconBullet)
ef.ui.printf(" %s Check if the file exists\n", gibidiutils.IconBullet)
case strings.Contains(errorMsg, "flag") && strings.Contains(errorMsg, "redefined"):
ef.ui.printf(" This is likely a test environment issue\n")
ef.ui.printf(" Try running the command directly instead of in tests\n")
ef.ui.printf(" %s This is likely a test environment issue\n", gibidiutils.IconBullet)
ef.ui.printf(" %s Try running the command directly instead of in tests\n", gibidiutils.IconBullet)
default:
ef.provideDefaultSuggestions()
}
@@ -234,16 +241,16 @@ func (ef *ErrorFormatter) provideGenericSuggestions(err error) {
// CLI-specific error types
// CLIMissingSourceError represents a missing source directory error.
type CLIMissingSourceError struct{}
// MissingSourceError represents a missing source directory error.
type MissingSourceError struct{}
func (e CLIMissingSourceError) Error() string {
func (e MissingSourceError) Error() string {
return "source directory is required"
}
// NewCLIMissingSourceError creates a new CLI missing source error with suggestions.
func NewCLIMissingSourceError() error {
return &CLIMissingSourceError{}
// NewMissingSourceError creates a new CLI missing source error with suggestions.
func NewMissingSourceError() error {
return &MissingSourceError{}
}
// IsUserError checks if an error is a user input error that should be handled gracefully.
@@ -253,16 +260,17 @@ func IsUserError(err error) bool {
}
// Check for specific user error types
var cliErr *CLIMissingSourceError
var cliErr *MissingSourceError
if errors.As(err, &cliErr) {
return true
}
// Check for structured errors that are user-facing
if structErr, ok := err.(*utils.StructuredError); ok {
return structErr.Type == utils.ErrorTypeValidation ||
structErr.Code == utils.CodeValidationFormat ||
structErr.Code == utils.CodeValidationSize
var structErr *gibidiutils.StructuredError
if errors.As(err, &structErr) {
return structErr.Type == gibidiutils.ErrorTypeValidation ||
structErr.Code == gibidiutils.CodeValidationFormat ||
structErr.Code == gibidiutils.CodeValidationSize
}
// Check error message patterns

963
cli/errors_test.go Normal file
View File

@@ -0,0 +1,963 @@
package cli
import (
"bytes"
"errors"
"strings"
"testing"
"github.com/fatih/color"
"github.com/stretchr/testify/assert"
"github.com/ivuorinen/gibidify/gibidiutils"
)
func TestNewErrorFormatter(t *testing.T) {
ui := &UIManager{
output: &bytes.Buffer{},
}
ef := NewErrorFormatter(ui)
assert.NotNil(t, ef)
assert.Equal(t, ui, ef.ui)
}
func TestFormatError(t *testing.T) {
tests := []struct {
name string
err error
expectedOutput []string
notExpected []string
}{
{
name: "nil error",
err: nil,
expectedOutput: []string{},
},
{
name: "structured error",
err: gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeFileSystem,
gibidiutils.CodeFSNotFound,
testErrFileNotFound,
"/test/file.txt",
map[string]interface{}{"size": 1024},
),
expectedOutput: []string{
gibidiutils.IconError + testErrorSuffix,
"FileSystem",
testErrFileNotFound,
"/test/file.txt",
"NOT_FOUND",
},
},
{
name: "generic error",
err: errors.New("something went wrong"),
expectedOutput: []string{gibidiutils.IconError + testErrorSuffix, "something went wrong"},
},
{
name: "wrapped structured error",
err: gibidiutils.WrapError(
errors.New("inner error"),
gibidiutils.ErrorTypeValidation,
gibidiutils.CodeValidationRequired,
"validation failed",
),
expectedOutput: []string{
gibidiutils.IconError + testErrorSuffix,
"validation failed",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
buf := &bytes.Buffer{}
ui := &UIManager{
enableColors: false,
output: buf,
}
prev := color.NoColor
color.NoColor = true
t.Cleanup(func() { color.NoColor = prev })
ef := NewErrorFormatter(ui)
ef.FormatError(tt.err)
output := buf.String()
for _, expected := range tt.expectedOutput {
assert.Contains(t, output, expected)
}
for _, notExpected := range tt.notExpected {
assert.NotContains(t, output, notExpected)
}
})
}
}
func TestFormatStructuredError(t *testing.T) {
tests := []struct {
name string
err *gibidiutils.StructuredError
expectedOutput []string
}{
{
name: "filesystem error",
err: gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeFileSystem,
gibidiutils.CodeFSPermission,
testErrPermissionDenied,
"/etc/shadow",
nil,
),
expectedOutput: []string{
"FileSystem",
testErrPermissionDenied,
"/etc/shadow",
"PERMISSION_DENIED",
testSuggestionsHeader,
},
},
{
name: "validation error",
err: gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeValidation,
gibidiutils.CodeValidationFormat,
testErrInvalidFormat,
"",
map[string]interface{}{"format": "xml"},
),
expectedOutput: []string{
"Validation",
testErrInvalidFormat,
"FORMAT",
testSuggestionsHeader,
},
},
{
name: "processing error",
err: gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeProcessing,
gibidiutils.CodeProcessingFileRead,
"failed to read file",
"large.bin",
nil,
),
expectedOutput: []string{
"Processing",
"failed to read file",
"large.bin",
"FILE_READ",
testSuggestionsHeader,
},
},
{
name: "IO error",
err: gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeIO,
gibidiutils.CodeIOFileWrite,
"disk full",
"/output/result.txt",
nil,
),
expectedOutput: []string{
"IO",
"disk full",
"/output/result.txt",
"FILE_WRITE",
testSuggestionsHeader,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
buf := &bytes.Buffer{}
ui := &UIManager{
enableColors: false,
output: buf,
}
prev := color.NoColor
color.NoColor = true
t.Cleanup(func() { color.NoColor = prev })
ef := &ErrorFormatter{ui: ui}
ef.formatStructuredError(tt.err)
output := buf.String()
for _, expected := range tt.expectedOutput {
assert.Contains(t, output, expected)
}
})
}
}
func TestFormatGenericError(t *testing.T) {
buf := &bytes.Buffer{}
ui := &UIManager{
enableColors: false,
output: buf,
}
prev := color.NoColor
color.NoColor = true
t.Cleanup(func() { color.NoColor = prev })
ef := &ErrorFormatter{ui: ui}
ef.formatGenericError(errors.New("generic error message"))
output := buf.String()
assert.Contains(t, output, gibidiutils.IconError+testErrorSuffix)
assert.Contains(t, output, "generic error message")
}
func TestProvideSuggestions(t *testing.T) {
tests := []struct {
name string
err *gibidiutils.StructuredError
expectedSugges []string
}{
{
name: "filesystem permission error",
err: gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeFileSystem,
gibidiutils.CodeFSPermission,
testErrPermissionDenied,
"/root/file",
nil,
),
expectedSugges: []string{
testSuggestCheckPerms,
testSuggestVerifyPath,
},
},
{
name: "filesystem not found error",
err: gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeFileSystem,
gibidiutils.CodeFSNotFound,
testErrFileNotFound,
"/missing/file",
nil,
),
expectedSugges: []string{
"Check if the file/directory exists: /missing/file",
},
},
{
name: "validation format error",
err: gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeValidation,
gibidiutils.CodeValidationFormat,
"unsupported format",
"",
nil,
),
expectedSugges: []string{
testSuggestFormat,
testSuggestFormatEx,
},
},
{
name: "validation path error",
err: gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeValidation,
gibidiutils.CodeValidationPath,
"invalid path",
"../../etc",
nil,
),
expectedSugges: []string{
testSuggestCheckArgs,
testSuggestHelp,
},
},
{
name: "processing file read error",
err: gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeProcessing,
gibidiutils.CodeProcessingFileRead,
"read error",
"corrupted.dat",
nil,
),
expectedSugges: []string{
"Check file permissions",
"Verify the file is not corrupted",
},
},
{
name: "IO file write error",
err: gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeIO,
gibidiutils.CodeIOFileWrite,
"write failed",
"/output.txt",
nil,
),
expectedSugges: []string{
testSuggestCheckPerms,
testSuggestDiskSpace,
},
},
{
name: "unknown error type",
err: gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeUnknown,
"UNKNOWN",
"unknown error",
"",
nil,
),
expectedSugges: []string{
testSuggestCheckArgs,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
buf := &bytes.Buffer{}
ui := &UIManager{
enableColors: false,
output: buf,
}
prev := color.NoColor
color.NoColor = true
t.Cleanup(func() { color.NoColor = prev })
ef := &ErrorFormatter{ui: ui}
ef.provideSuggestions(tt.err)
output := buf.String()
for _, suggestion := range tt.expectedSugges {
assert.Contains(t, output, suggestion)
}
})
}
}
func TestProvideFileSystemSuggestions(t *testing.T) {
tests := []struct {
name string
err *gibidiutils.StructuredError
expectedSugges []string
}{
{
name: testErrPermissionDenied,
err: gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeFileSystem,
gibidiutils.CodeFSPermission,
testErrPermissionDenied,
"/root/secret",
nil,
),
expectedSugges: []string{
testSuggestCheckPerms,
testSuggestVerifyPath,
},
},
{
name: "path resolution error",
err: gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeFileSystem,
gibidiutils.CodeFSPathResolution,
"path error",
"../../../etc",
nil,
),
expectedSugges: []string{
"Use an absolute path instead of relative",
},
},
{
name: testErrFileNotFound,
err: gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeFileSystem,
gibidiutils.CodeFSNotFound,
"not found",
"/missing.txt",
nil,
),
expectedSugges: []string{
"Check if the file/directory exists: /missing.txt",
},
},
{
name: "default filesystem error",
err: gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeFileSystem,
"OTHER_FS_ERROR",
testErrOther,
"/some/path",
nil,
),
expectedSugges: []string{
testSuggestCheckPerms,
testSuggestVerifyPath,
"Path: /some/path",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
buf := &bytes.Buffer{}
ui := &UIManager{
enableColors: false,
output: buf,
}
ef := &ErrorFormatter{ui: ui}
ef.provideFileSystemSuggestions(tt.err)
output := buf.String()
for _, suggestion := range tt.expectedSugges {
assert.Contains(t, output, suggestion)
}
})
}
}
func TestProvideValidationSuggestions(t *testing.T) {
tests := []struct {
name string
err *gibidiutils.StructuredError
expectedSugges []string
}{
{
name: "format validation",
err: gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeValidation,
gibidiutils.CodeValidationFormat,
testErrInvalidFormat,
"",
nil,
),
expectedSugges: []string{
testSuggestFormat,
testSuggestFormatEx,
},
},
{
name: "path validation",
err: gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeValidation,
gibidiutils.CodeValidationPath,
"invalid path",
"",
nil,
),
expectedSugges: []string{
testSuggestCheckArgs,
testSuggestHelp,
},
},
{
name: "size validation",
err: gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeValidation,
gibidiutils.CodeValidationSize,
"size error",
"",
nil,
),
expectedSugges: []string{
"Increase file size limit in config.yaml",
"Use smaller files or exclude large files",
},
},
{
name: "required validation",
err: gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeValidation,
gibidiutils.CodeValidationRequired,
"required",
"",
nil,
),
expectedSugges: []string{
testSuggestCheckArgs,
testSuggestHelp,
},
},
{
name: "default validation",
err: gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeValidation,
"OTHER_VALIDATION",
"other",
"",
nil,
),
expectedSugges: []string{
testSuggestCheckArgs,
testSuggestHelp,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
buf := &bytes.Buffer{}
ui := &UIManager{
enableColors: false,
output: buf,
}
ef := &ErrorFormatter{ui: ui}
ef.provideValidationSuggestions(tt.err)
output := buf.String()
for _, suggestion := range tt.expectedSugges {
assert.Contains(t, output, suggestion)
}
})
}
}
func TestProvideProcessingSuggestions(t *testing.T) {
tests := []struct {
name string
err *gibidiutils.StructuredError
expectedSugges []string
}{
{
name: "file read error",
err: gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeProcessing,
gibidiutils.CodeProcessingFileRead,
"read error",
"",
nil,
),
expectedSugges: []string{
"Check file permissions",
"Verify the file is not corrupted",
},
},
{
name: "collection error",
err: gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeProcessing,
gibidiutils.CodeProcessingCollection,
"collection error",
"",
nil,
),
expectedSugges: []string{
"Check if the source directory exists and is readable",
"Verify directory permissions",
},
},
{
name: testErrEncoding,
err: gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeProcessing,
gibidiutils.CodeProcessingEncode,
testErrEncoding,
"",
nil,
),
expectedSugges: []string{
"Try reducing concurrency: -concurrency 1",
"Check available system resources",
},
},
{
name: "default processing",
err: gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeProcessing,
"OTHER",
testErrOther,
"",
nil,
),
expectedSugges: []string{
"Try reducing concurrency: -concurrency 1",
"Check available system resources",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
buf := &bytes.Buffer{}
ui := &UIManager{
enableColors: false,
output: buf,
}
ef := &ErrorFormatter{ui: ui}
ef.provideProcessingSuggestions(tt.err)
output := buf.String()
for _, suggestion := range tt.expectedSugges {
assert.Contains(t, output, suggestion)
}
})
}
}
func TestProvideIOSuggestions(t *testing.T) {
tests := []struct {
name string
err *gibidiutils.StructuredError
expectedSugges []string
}{
{
name: "file create error",
err: gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeIO,
gibidiutils.CodeIOFileCreate,
"create error",
"",
nil,
),
expectedSugges: []string{
"Check if the destination directory exists",
"Verify write permissions for the output file",
"Ensure sufficient disk space",
},
},
{
name: "file write error",
err: gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeIO,
gibidiutils.CodeIOFileWrite,
"write error",
"",
nil,
),
expectedSugges: []string{
testSuggestCheckPerms,
testSuggestDiskSpace,
},
},
{
name: testErrEncoding,
err: gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeIO,
gibidiutils.CodeIOEncoding,
testErrEncoding,
"",
nil,
),
expectedSugges: []string{
testSuggestCheckPerms,
testSuggestDiskSpace,
},
},
{
name: "default IO error",
err: gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeIO,
"OTHER",
testErrOther,
"",
nil,
),
expectedSugges: []string{
testSuggestCheckPerms,
testSuggestDiskSpace,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
buf := &bytes.Buffer{}
ui := &UIManager{
enableColors: false,
output: buf,
}
ef := &ErrorFormatter{ui: ui}
ef.provideIOSuggestions(tt.err)
output := buf.String()
for _, suggestion := range tt.expectedSugges {
assert.Contains(t, output, suggestion)
}
})
}
}
func TestProvideGenericSuggestions(t *testing.T) {
tests := []struct {
name string
err error
expectedSugges []string
}{
{
name: "permission error",
err: errors.New("permission denied accessing file"),
expectedSugges: []string{
testSuggestCheckPerms,
"Try running with appropriate privileges",
},
},
{
name: "not found error",
err: errors.New("no such file or directory"),
expectedSugges: []string{
"Verify the file/directory path is correct",
"Check if the file exists",
},
},
{
name: "memory error",
err: errors.New("out of memory"),
expectedSugges: []string{
testSuggestCheckArgs,
testSuggestHelp,
testSuggestReduceConcur,
},
},
{
name: "timeout error",
err: errors.New("operation timed out"),
expectedSugges: []string{
testSuggestCheckArgs,
testSuggestHelp,
testSuggestReduceConcur,
},
},
{
name: "connection error",
err: errors.New("connection refused"),
expectedSugges: []string{
testSuggestCheckArgs,
testSuggestHelp,
testSuggestReduceConcur,
},
},
{
name: "default error",
err: errors.New("unknown error occurred"),
expectedSugges: []string{
testSuggestCheckArgs,
testSuggestHelp,
testSuggestReduceConcur,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
buf := &bytes.Buffer{}
ui := &UIManager{
enableColors: false,
output: buf,
}
ef := &ErrorFormatter{ui: ui}
ef.provideGenericSuggestions(tt.err)
output := buf.String()
for _, suggestion := range tt.expectedSugges {
assert.Contains(t, output, suggestion)
}
})
}
}
func TestMissingSourceError(t *testing.T) {
err := &MissingSourceError{}
assert.Equal(t, "source directory is required", err.Error())
}
func TestNewMissingSourceErrorType(t *testing.T) {
err := NewMissingSourceError()
assert.NotNil(t, err)
assert.Equal(t, "source directory is required", err.Error())
var msErr *MissingSourceError
ok := errors.As(err, &msErr)
assert.True(t, ok)
assert.NotNil(t, msErr)
}
// Test error formatting with colors enabled
func TestFormatErrorWithColors(t *testing.T) {
buf := &bytes.Buffer{}
ui := &UIManager{
enableColors: true,
output: buf,
}
prev := color.NoColor
color.NoColor = false
t.Cleanup(func() { color.NoColor = prev })
ef := NewErrorFormatter(ui)
err := gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeValidation,
gibidiutils.CodeValidationFormat,
testErrInvalidFormat,
"",
nil,
)
ef.FormatError(err)
output := buf.String()
// When colors are enabled, some output may go directly to stdout
// Check for suggestions that are captured in the buffer
assert.Contains(t, output, testSuggestFormat)
assert.Contains(t, output, testSuggestFormatEx)
}
// Test wrapped error handling
func TestFormatWrappedError(t *testing.T) {
buf := &bytes.Buffer{}
ui := &UIManager{
enableColors: false,
output: buf,
}
ef := NewErrorFormatter(ui)
innerErr := errors.New("inner error")
wrappedErr := gibidiutils.WrapError(
innerErr,
gibidiutils.ErrorTypeProcessing,
gibidiutils.CodeProcessingFileRead,
"wrapper message",
)
ef.FormatError(wrappedErr)
output := buf.String()
assert.Contains(t, output, "wrapper message")
}
// Test all suggestion paths get called
func TestSuggestionPathCoverage(t *testing.T) {
buf := &bytes.Buffer{}
ui := &UIManager{
enableColors: false,
output: buf,
}
ef := &ErrorFormatter{ui: ui}
// Test all error types
errorTypes := []gibidiutils.ErrorType{
gibidiutils.ErrorTypeFileSystem,
gibidiutils.ErrorTypeValidation,
gibidiutils.ErrorTypeProcessing,
gibidiutils.ErrorTypeIO,
gibidiutils.ErrorTypeConfiguration,
gibidiutils.ErrorTypeUnknown,
}
for _, errType := range errorTypes {
t.Run(errType.String(), func(t *testing.T) {
buf.Reset()
err := gibidiutils.NewStructuredError(
errType,
"TEST_CODE",
"test error",
"/test/path",
nil,
)
ef.provideSuggestions(err)
output := buf.String()
// Should have some suggestion output
assert.NotEmpty(t, output)
})
}
}
// Test suggestion helper functions with various inputs
func TestSuggestHelpers(t *testing.T) {
tests := []struct {
name string
testFunc func(*ErrorFormatter)
}{
{
name: "suggestFileAccess",
testFunc: func(ef *ErrorFormatter) {
ef.suggestFileAccess("/root/file")
},
},
{
name: "suggestPathResolution",
testFunc: func(ef *ErrorFormatter) {
ef.suggestPathResolution("../../../etc")
},
},
{
name: "suggestFileNotFound",
testFunc: func(ef *ErrorFormatter) {
ef.suggestFileNotFound("/missing")
},
},
{
name: "suggestFileSystemGeneral",
testFunc: func(ef *ErrorFormatter) {
ef.suggestFileSystemGeneral("/path")
},
},
{
name: "provideDefaultSuggestions",
testFunc: func(ef *ErrorFormatter) {
ef.provideDefaultSuggestions()
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
buf := &bytes.Buffer{}
ui := &UIManager{
enableColors: false,
output: buf,
}
ef := &ErrorFormatter{ui: ui}
tt.testFunc(ef)
output := buf.String()
// Each should produce some output
assert.NotEmpty(t, output)
// Should contain bullet point
assert.Contains(t, output, gibidiutils.IconBullet)
})
}
}
// Test edge cases in error message analysis
func TestGenericSuggestionsEdgeCases(t *testing.T) {
tests := []struct {
name string
err error
}{
{"empty message", errors.New("")},
{"very long message", errors.New(strings.Repeat("error ", 100))},
{"special characters", errors.New("error!@#$%^&*()")},
{"newlines", errors.New("error\nwith\nnewlines")},
{"unicode", errors.New("error with 中文 characters")},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
buf := &bytes.Buffer{}
ui := &UIManager{
enableColors: false,
output: buf,
}
ef := &ErrorFormatter{ui: ui}
// Should not panic
ef.provideGenericSuggestions(tt.err)
output := buf.String()
// Should have some output
assert.NotEmpty(t, output)
})
}
}

View File

@@ -5,7 +5,7 @@ import (
"runtime"
"github.com/ivuorinen/gibidify/config"
"github.com/ivuorinen/gibidify/utils"
"github.com/ivuorinen/gibidify/gibidiutils"
)
// Flags holds CLI flags values.
@@ -39,8 +39,10 @@ func ParseFlags() (*Flags, error) {
flag.StringVar(&flags.Prefix, "prefix", "", "Text to add at the beginning of the output file")
flag.StringVar(&flags.Suffix, "suffix", "", "Text to add at the end of the output file")
flag.StringVar(&flags.Format, "format", "markdown", "Output format (json, markdown, yaml)")
flag.IntVar(&flags.Concurrency, "concurrency", runtime.NumCPU(),
"Number of concurrent workers (default: number of CPU cores)")
flag.IntVar(
&flags.Concurrency, "concurrency", runtime.NumCPU(),
"Number of concurrent workers (default: number of CPU cores)",
)
flag.BoolVar(&flags.NoColors, "no-colors", false, "Disable colored output")
flag.BoolVar(&flags.NoProgress, "no-progress", false, "Disable progress bars")
flag.BoolVar(&flags.Verbose, "verbose", false, "Enable verbose output")
@@ -63,11 +65,11 @@ func ParseFlags() (*Flags, error) {
// validate validates the CLI flags.
func (f *Flags) validate() error {
if f.SourceDir == "" {
return NewCLIMissingSourceError()
return NewMissingSourceError()
}
// Validate source path for security
if err := utils.ValidateSourcePath(f.SourceDir); err != nil {
if err := gibidiutils.ValidateSourcePath(f.SourceDir); err != nil {
return err
}
@@ -77,28 +79,20 @@ func (f *Flags) validate() error {
}
// Validate concurrency
if err := config.ValidateConcurrency(f.Concurrency); err != nil {
return err
}
return nil
return config.ValidateConcurrency(f.Concurrency)
}
// setDefaultDestination sets the default destination if not provided.
func (f *Flags) setDefaultDestination() error {
if f.Destination == "" {
absRoot, err := utils.GetAbsolutePath(f.SourceDir)
absRoot, err := gibidiutils.GetAbsolutePath(f.SourceDir)
if err != nil {
return err
}
baseName := utils.GetBaseName(absRoot)
baseName := gibidiutils.GetBaseName(absRoot)
f.Destination = baseName + "." + f.Format
}
// Validate destination path for security
if err := utils.ValidateDestinationPath(f.Destination); err != nil {
return err
}
return nil
return gibidiutils.ValidateDestinationPath(f.Destination)
}

366
cli/flags_test.go Normal file
View File

@@ -0,0 +1,366 @@
package cli
import (
"errors"
"flag"
"os"
"runtime"
"strings"
"testing"
"github.com/spf13/viper"
"github.com/stretchr/testify/assert"
)
func TestParseFlags(t *testing.T) {
// Save original command line args and restore after test
oldArgs := os.Args
oldFlagsParsed := flagsParsed
defer func() {
os.Args = oldArgs
flagsParsed = oldFlagsParsed
flag.CommandLine = flag.NewFlagSet(os.Args[0], flag.ContinueOnError)
}()
tests := []struct {
name string
args []string
expectedError string
validate func(t *testing.T, f *Flags)
setup func(t *testing.T)
}{
{
name: "valid flags with all options",
args: []string{
"gibidify",
testFlagSource, "", // will set to tempDir in test body
"-destination", "output.md",
"-format", "json",
testFlagConcurrency, "4",
"-prefix", "prefix",
"-suffix", "suffix",
"-no-colors",
"-no-progress",
"-verbose",
},
validate: nil, // set in test body using closure
},
{
name: "missing source directory",
args: []string{"gibidify"},
expectedError: testErrSourceRequired,
},
{
name: "invalid format",
args: []string{
"gibidify",
testFlagSource, "", // will set to tempDir in test body
"-format", "invalid",
},
expectedError: "unsupported output format: invalid",
},
{
name: "invalid concurrency (zero)",
args: []string{
"gibidify",
testFlagSource, "", // will set to tempDir in test body
testFlagConcurrency, "0",
},
expectedError: "concurrency (0) must be at least 1",
},
{
name: "invalid concurrency (too high)",
args: []string{
"gibidify",
testFlagSource, "", // will set to tempDir in test body
testFlagConcurrency, "200",
},
// Set maxConcurrency so the upper bound is enforced
expectedError: "concurrency (200) exceeds maximum (128)",
setup: func(t *testing.T) {
orig := viper.Get("maxConcurrency")
viper.Set("maxConcurrency", 128)
t.Cleanup(func() { viper.Set("maxConcurrency", orig) })
},
},
{
name: "path traversal in source",
args: []string{
"gibidify",
testFlagSource, testPathTraversalPath,
},
expectedError: testErrPathTraversal,
},
{
name: "default values",
args: []string{
"gibidify",
testFlagSource, "", // will set to tempDir in test body
},
validate: nil, // set in test body using closure
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Reset flags for each test
flagsParsed = false
globalFlags = nil
flag.CommandLine = flag.NewFlagSet(os.Args[0], flag.ContinueOnError)
// Create a local copy of args to avoid corrupting shared test data
args := append([]string{}, tt.args...)
// Use t.TempDir for source directory if needed
tempDir := ""
for i := range args {
if i > 0 && args[i-1] == testFlagSource && args[i] == "" {
tempDir = t.TempDir()
args[i] = tempDir
}
}
os.Args = args
// Set validate closure if needed (for tempDir)
if tt.name == "valid flags with all options" {
tt.validate = func(t *testing.T, f *Flags) {
assert.Equal(t, tempDir, f.SourceDir)
assert.Equal(t, "output.md", f.Destination)
assert.Equal(t, "json", f.Format)
assert.Equal(t, 4, f.Concurrency)
assert.Equal(t, "prefix", f.Prefix)
assert.Equal(t, "suffix", f.Suffix)
assert.True(t, f.NoColors)
assert.True(t, f.NoProgress)
assert.True(t, f.Verbose)
}
}
if tt.name == "default values" {
tt.validate = func(t *testing.T, f *Flags) {
assert.Equal(t, tempDir, f.SourceDir)
assert.Equal(t, "markdown", f.Format)
assert.Equal(t, runtime.NumCPU(), f.Concurrency)
assert.Equal(t, "", f.Prefix)
assert.Equal(t, "", f.Suffix)
assert.False(t, f.NoColors)
assert.False(t, f.NoProgress)
assert.False(t, f.Verbose)
// Destination should be set by setDefaultDestination
assert.NotEmpty(t, f.Destination)
}
}
// Call setup if present (e.g. for maxConcurrency)
if tt.setup != nil {
tt.setup(t)
}
flags, err := ParseFlags()
if tt.expectedError != "" {
if assert.Error(t, err) {
assert.Contains(t, err.Error(), tt.expectedError)
}
assert.Nil(t, flags)
} else {
assert.NoError(t, err)
assert.NotNil(t, flags)
if tt.validate != nil {
tt.validate(t, flags)
}
}
})
}
}
func TestFlagsValidate(t *testing.T) {
tests := []struct {
name string
flags *Flags
setupFunc func(t *testing.T, f *Flags)
expectedError string
}{
{
name: "missing source directory",
flags: &Flags{},
expectedError: testErrSourceRequired,
},
{
name: "invalid format",
flags: &Flags{
Format: "invalid",
},
setupFunc: func(t *testing.T, f *Flags) {
f.SourceDir = t.TempDir()
},
expectedError: "unsupported output format: invalid",
},
{
name: "invalid concurrency",
flags: &Flags{
Format: "markdown",
Concurrency: 0,
},
setupFunc: func(t *testing.T, f *Flags) {
f.SourceDir = t.TempDir()
},
expectedError: "concurrency (0) must be at least 1",
},
{
name: "path traversal attempt",
flags: &Flags{
SourceDir: testPathTraversalPath,
Format: "markdown",
},
expectedError: testErrPathTraversal,
},
{
name: "valid flags",
flags: &Flags{
Format: "json",
Concurrency: 4,
},
setupFunc: func(t *testing.T, f *Flags) {
f.SourceDir = t.TempDir()
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if tt.setupFunc != nil {
tt.setupFunc(t, tt.flags)
}
err := tt.flags.validate()
if tt.expectedError != "" {
assert.Error(t, err)
assert.Contains(t, err.Error(), tt.expectedError)
} else {
assert.NoError(t, err)
}
})
}
}
func TestSetDefaultDestination(t *testing.T) {
tests := []struct {
name string
flags *Flags
setupFunc func(t *testing.T, f *Flags)
expectedDest string
expectedError string
}{
{
name: "default destination for directory",
flags: &Flags{
Format: "markdown",
},
setupFunc: func(t *testing.T, f *Flags) {
f.SourceDir = t.TempDir()
},
expectedDest: "", // will check suffix below
},
{
name: "default destination for json format",
flags: &Flags{
Format: "json",
},
setupFunc: func(t *testing.T, f *Flags) {
f.SourceDir = t.TempDir()
},
expectedDest: "", // will check suffix below
},
{
name: "provided destination unchanged",
flags: &Flags{
Format: "markdown",
Destination: "custom-output.txt",
},
setupFunc: func(t *testing.T, f *Flags) {
f.SourceDir = t.TempDir()
},
expectedDest: "custom-output.txt",
},
{
name: "path traversal in destination",
flags: &Flags{
Format: "markdown",
Destination: testPathTraversalPath,
},
setupFunc: func(t *testing.T, f *Flags) {
f.SourceDir = t.TempDir()
},
expectedError: testErrPathTraversal,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if tt.setupFunc != nil {
tt.setupFunc(t, tt.flags)
}
err := tt.flags.setDefaultDestination()
if tt.expectedError != "" {
assert.Error(t, err)
assert.Contains(t, err.Error(), tt.expectedError)
} else {
assert.NoError(t, err)
switch {
case tt.expectedDest != "":
assert.Equal(t, tt.expectedDest, tt.flags.Destination)
case tt.flags.Format == "json":
assert.True(
t, strings.HasSuffix(tt.flags.Destination, ".json"),
"expected %q to have suffix .json", tt.flags.Destination,
)
case tt.flags.Format == "markdown":
assert.True(
t, strings.HasSuffix(tt.flags.Destination, ".markdown"),
"expected %q to have suffix .markdown", tt.flags.Destination,
)
}
}
})
}
}
func TestFlagsSingleton(t *testing.T) {
// Save original state
oldFlagsParsed := flagsParsed
oldGlobalFlags := globalFlags
defer func() {
flagsParsed = oldFlagsParsed
globalFlags = oldGlobalFlags
}()
// Test singleton behavior
flagsParsed = true
expectedFlags := &Flags{
SourceDir: "/test",
Format: "json",
Concurrency: 2,
}
globalFlags = expectedFlags
// Should return cached flags without parsing
flags, err := ParseFlags()
assert.NoError(t, err)
assert.Equal(t, expectedFlags, flags)
assert.Same(t, globalFlags, flags)
}
func TestNewMissingSourceError(t *testing.T) {
err := NewMissingSourceError()
assert.Error(t, err)
assert.Equal(t, testErrSourceRequired, err.Error())
// Check if it's the right type
var missingSourceError *MissingSourceError
ok := errors.As(err, &missingSourceError)
assert.True(t, ok)
}

View File

@@ -8,14 +8,19 @@ import (
"github.com/ivuorinen/gibidify/config"
"github.com/ivuorinen/gibidify/fileproc"
"github.com/ivuorinen/gibidify/utils"
"github.com/ivuorinen/gibidify/gibidiutils"
)
// collectFiles collects all files to be processed.
func (p *Processor) collectFiles() ([]string, error) {
files, err := fileproc.CollectFiles(p.flags.SourceDir)
if err != nil {
return nil, utils.WrapError(err, utils.ErrorTypeProcessing, utils.CodeProcessingCollection, "error collecting files")
return nil, gibidiutils.WrapError(
err,
gibidiutils.ErrorTypeProcessing,
gibidiutils.CodeProcessingCollection,
"error collecting files",
)
}
logrus.Infof("Found %d files to process", len(files))
return files, nil
@@ -30,9 +35,9 @@ func (p *Processor) validateFileCollection(files []string) error {
// Check file count limit
maxFiles := config.GetMaxFiles()
if len(files) > maxFiles {
return utils.NewStructuredError(
utils.ErrorTypeValidation,
utils.CodeResourceLimitFiles,
return gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeValidation,
gibidiutils.CodeResourceLimitFiles,
fmt.Sprintf("file count (%d) exceeds maximum limit (%d)", len(files), maxFiles),
"",
map[string]interface{}{
@@ -51,10 +56,14 @@ func (p *Processor) validateFileCollection(files []string) error {
if fileInfo, err := os.Stat(filePath); err == nil {
totalSize += fileInfo.Size()
if totalSize > maxTotalSize {
return utils.NewStructuredError(
utils.ErrorTypeValidation,
utils.CodeResourceLimitTotalSize,
fmt.Sprintf("total file size (%d bytes) would exceed maximum limit (%d bytes)", totalSize, maxTotalSize),
return gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeValidation,
gibidiutils.CodeResourceLimitTotalSize,
fmt.Sprintf(
"total file size (%d bytes) would exceed maximum limit (%d bytes)",
totalSize,
maxTotalSize,
),
"",
map[string]interface{}{
"total_size": totalSize,

View File

@@ -6,7 +6,7 @@ import (
"sync"
"github.com/ivuorinen/gibidify/fileproc"
"github.com/ivuorinen/gibidify/utils"
"github.com/ivuorinen/gibidify/gibidiutils"
)
// Process executes the main file processing workflow.
@@ -16,7 +16,9 @@ func (p *Processor) Process(ctx context.Context) error {
defer overallCancel()
// Configure file type registry
p.configureFileTypes()
if err := p.configureFileTypes(); err != nil {
return err
}
// Print startup info with colors
p.ui.PrintHeader("🚀 Starting gibidify")
@@ -55,7 +57,7 @@ func (p *Processor) processFiles(ctx context.Context, files []string) error {
return err
}
defer func() {
utils.LogError("Error closing output file", outFile.Close())
gibidiutils.LogError("Error closing output file", outFile.Close())
}()
// Initialize back-pressure and channels
@@ -65,7 +67,11 @@ func (p *Processor) processFiles(ctx context.Context, files []string) error {
writerDone := make(chan struct{})
// Start writer
go fileproc.StartWriter(outFile, writeCh, writerDone, p.flags.Format, p.flags.Prefix, p.flags.Suffix)
go fileproc.StartWriter(outFile, writeCh, writerDone, fileproc.WriterConfig{
Format: p.flags.Format,
Prefix: p.flags.Prefix,
Suffix: p.flags.Suffix,
})
// Start workers
var wg sync.WaitGroup
@@ -92,9 +98,13 @@ func (p *Processor) processFiles(ctx context.Context, files []string) error {
// createOutputFile creates the output file.
func (p *Processor) createOutputFile() (*os.File, error) {
// Destination path has been validated in CLI flags validation for path traversal attempts
outFile, err := os.Create(p.flags.Destination) // #nosec G304 - destination is validated in flags.validate()
// #nosec G304 - destination is validated in flags.validate()
outFile, err := os.Create(p.flags.Destination)
if err != nil {
return nil, utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIOFileCreate, "failed to create output file").WithFilePath(p.flags.Destination)
return nil, gibidiutils.WrapError(
err, gibidiutils.ErrorTypeIO, gibidiutils.CodeIOFileCreate,
"failed to create output file",
).WithFilePath(p.flags.Destination)
}
return outFile, nil
}

View File

@@ -0,0 +1,265 @@
package cli
import (
"context"
"os"
"path/filepath"
"sync"
"testing"
"github.com/spf13/viper"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/ivuorinen/gibidify/fileproc"
)
func TestProcessorSimple(t *testing.T) {
t.Run("NewProcessor", func(t *testing.T) {
flags := &Flags{
SourceDir: "/tmp/test",
Destination: "output.md",
Format: "markdown",
Concurrency: 2,
NoColors: true,
NoProgress: true,
Verbose: false,
}
p := NewProcessor(flags)
assert.NotNil(t, p)
assert.Equal(t, flags, p.flags)
assert.NotNil(t, p.ui)
assert.NotNil(t, p.backpressure)
assert.NotNil(t, p.resourceMonitor)
assert.False(t, p.ui.enableColors)
assert.False(t, p.ui.enableProgress)
})
t.Run("ConfigureFileTypes", func(t *testing.T) {
p := &Processor{
flags: &Flags{},
ui: NewUIManager(),
}
// Should not panic or error
err := p.configureFileTypes()
assert.NoError(t, err)
assert.NotNil(t, p)
})
t.Run("CreateOutputFile", func(t *testing.T) {
// Create temp file path
tempDir := t.TempDir()
outputPath := filepath.Join(tempDir, "output.txt")
p := &Processor{
flags: &Flags{
Destination: outputPath,
},
ui: NewUIManager(),
}
file, err := p.createOutputFile()
assert.NoError(t, err)
assert.NotNil(t, file)
// Clean up
err = file.Close()
require.NoError(t, err)
err = os.Remove(outputPath)
require.NoError(t, err)
})
t.Run("ValidateFileCollection", func(t *testing.T) {
p := &Processor{
ui: NewUIManager(),
}
// Empty collection should be valid (just checks limits)
err := p.validateFileCollection([]string{})
assert.NoError(t, err)
// Small collection should be valid
err = p.validateFileCollection([]string{
testFilePath1,
testFilePath2,
})
assert.NoError(t, err)
})
t.Run("CollectFiles_EmptyDir", func(t *testing.T) {
tempDir := t.TempDir()
p := &Processor{
flags: &Flags{
SourceDir: tempDir,
},
ui: NewUIManager(),
}
files, err := p.collectFiles()
assert.NoError(t, err)
assert.Empty(t, files)
})
t.Run("CollectFiles_WithFiles", func(t *testing.T) {
tempDir := t.TempDir()
// Create test files
require.NoError(t, os.WriteFile(filepath.Join(tempDir, "test1.go"), []byte("package main"), 0o600))
require.NoError(t, os.WriteFile(filepath.Join(tempDir, "test2.go"), []byte("package test"), 0o600))
// Set config so no files are ignored, and restore after test
origIgnoreDirs := viper.Get("ignoreDirectories")
origFileSizeLimit := viper.Get("fileSizeLimit")
viper.Set("ignoreDirectories", []string{})
viper.Set("fileSizeLimit", 1024*1024*10) // 10MB
t.Cleanup(func() {
viper.Set("ignoreDirectories", origIgnoreDirs)
viper.Set("fileSizeLimit", origFileSizeLimit)
})
p := &Processor{
flags: &Flags{
SourceDir: tempDir,
},
ui: NewUIManager(),
}
files, err := p.collectFiles()
assert.NoError(t, err)
assert.Len(t, files, 2)
})
t.Run("SendFiles", func(t *testing.T) {
p := &Processor{
backpressure: fileproc.NewBackpressureManager(),
ui: NewUIManager(),
}
ctx := context.Background()
fileCh := make(chan string, 3)
files := []string{
testFilePath1,
testFilePath2,
}
var wg sync.WaitGroup
wg.Add(1)
// Send files in a goroutine since it might block
go func() {
defer wg.Done()
err := p.sendFiles(ctx, files, fileCh)
assert.NoError(t, err)
}()
// Read all files from channel
var received []string
for i := 0; i < len(files); i++ {
file := <-fileCh
received = append(received, file)
}
assert.Equal(t, len(files), len(received))
// Wait for sendFiles goroutine to finish (and close fileCh)
wg.Wait()
// Now channel should be closed
_, ok := <-fileCh
assert.False(t, ok, "channel should be closed")
})
t.Run("WaitForCompletion", func(t *testing.T) {
p := &Processor{
ui: NewUIManager(),
}
writeCh := make(chan fileproc.WriteRequest)
writerDone := make(chan struct{})
// Simulate writer finishing
go func() {
<-writeCh // Wait for close
close(writerDone)
}()
var wg sync.WaitGroup
// Start and finish immediately
wg.Add(1)
wg.Done()
// Should complete without hanging
p.waitForCompletion(&wg, writeCh, writerDone)
assert.NotNil(t, p)
})
t.Run("LogFinalStats", func(t *testing.T) {
p := &Processor{
flags: &Flags{
Verbose: true,
},
ui: NewUIManager(),
resourceMonitor: fileproc.NewResourceMonitor(),
backpressure: fileproc.NewBackpressureManager(),
}
// Should not panic
p.logFinalStats()
assert.NotNil(t, p)
})
}
// Test error handling scenarios
func TestProcessorErrors(t *testing.T) {
t.Run("CreateOutputFile_InvalidPath", func(t *testing.T) {
p := &Processor{
flags: &Flags{
Destination: "/root/cannot-write-here.txt",
},
ui: NewUIManager(),
}
file, err := p.createOutputFile()
assert.Error(t, err)
assert.Nil(t, file)
})
t.Run("CollectFiles_NonExistentDir", func(t *testing.T) {
p := &Processor{
flags: &Flags{
SourceDir: "/non/existent/path",
},
ui: NewUIManager(),
}
files, err := p.collectFiles()
assert.Error(t, err)
assert.Nil(t, files)
})
t.Run("SendFiles_WithCancellation", func(t *testing.T) {
p := &Processor{
backpressure: fileproc.NewBackpressureManager(),
ui: NewUIManager(),
}
ctx, cancel := context.WithCancel(context.Background())
fileCh := make(chan string) // Unbuffered to force blocking
files := []string{
testFilePath1,
testFilePath2,
"/test/file3.go",
}
// Cancel immediately
cancel()
err := p.sendFiles(ctx, files, fileCh)
assert.Error(t, err)
assert.Equal(t, context.Canceled, err)
})
}

View File

@@ -11,8 +11,12 @@ func (p *Processor) logFinalStats() {
// Log back-pressure stats
backpressureStats := p.backpressure.GetStats()
if backpressureStats.Enabled {
logrus.Infof("Back-pressure stats: processed=%d files, memory=%dMB/%dMB",
backpressureStats.FilesProcessed, backpressureStats.CurrentMemoryUsage/1024/1024, backpressureStats.MaxMemoryUsage/1024/1024)
logrus.Infof(
"Back-pressure stats: processed=%d files, memory=%dMB/%dMB",
backpressureStats.FilesProcessed,
backpressureStats.CurrentMemoryUsage/1024/1024,
backpressureStats.MaxMemoryUsage/1024/1024,
)
}
// Log resource monitoring stats

View File

@@ -30,15 +30,18 @@ func NewProcessor(flags *Flags) *Processor {
}
// configureFileTypes configures the file type registry.
func (p *Processor) configureFileTypes() {
func (p *Processor) configureFileTypes() error {
if config.GetFileTypesEnabled() {
fileproc.ConfigureFromSettings(
config.GetCustomImageExtensions(),
config.GetCustomBinaryExtensions(),
config.GetCustomLanguages(),
config.GetDisabledImageExtensions(),
config.GetDisabledBinaryExtensions(),
config.GetDisabledLanguageExtensions(),
)
if err := fileproc.ConfigureFromSettings(fileproc.RegistryConfig{
CustomImages: config.GetCustomImageExtensions(),
CustomBinary: config.GetCustomBinaryExtensions(),
CustomLanguages: config.GetCustomLanguages(),
DisabledImages: config.GetDisabledImageExtensions(),
DisabledBinary: config.GetDisabledBinaryExtensions(),
DisabledLanguages: config.GetDisabledLanguageExtensions(),
}); err != nil {
return err
}
}
return nil
}

View File

@@ -7,11 +7,16 @@ import (
"github.com/sirupsen/logrus"
"github.com/ivuorinen/gibidify/fileproc"
"github.com/ivuorinen/gibidify/utils"
"github.com/ivuorinen/gibidify/gibidiutils"
)
// startWorkers starts the worker goroutines.
func (p *Processor) startWorkers(ctx context.Context, wg *sync.WaitGroup, fileCh chan string, writeCh chan fileproc.WriteRequest) {
func (p *Processor) startWorkers(
ctx context.Context,
wg *sync.WaitGroup,
fileCh chan string,
writeCh chan fileproc.WriteRequest,
) {
for range p.flags.Concurrency {
wg.Add(1)
go p.worker(ctx, wg, fileCh, writeCh)
@@ -19,7 +24,12 @@ func (p *Processor) startWorkers(ctx context.Context, wg *sync.WaitGroup, fileCh
}
// worker is the worker goroutine function.
func (p *Processor) worker(ctx context.Context, wg *sync.WaitGroup, fileCh chan string, writeCh chan fileproc.WriteRequest) {
func (p *Processor) worker(
ctx context.Context,
wg *sync.WaitGroup,
fileCh chan string,
writeCh chan fileproc.WriteRequest,
) {
defer wg.Done()
for {
select {
@@ -42,9 +52,9 @@ func (p *Processor) processFile(ctx context.Context, filePath string, writeCh ch
return
}
absRoot, err := utils.GetAbsolutePath(p.flags.SourceDir)
absRoot, err := gibidiutils.GetAbsolutePath(p.flags.SourceDir)
if err != nil {
utils.LogError("Failed to get absolute path", err)
gibidiutils.LogError("Failed to get absolute path", err)
return
}
@@ -78,7 +88,11 @@ func (p *Processor) sendFiles(ctx context.Context, files []string, fileCh chan s
}
// waitForCompletion waits for all workers to complete.
func (p *Processor) waitForCompletion(wg *sync.WaitGroup, writeCh chan fileproc.WriteRequest, writerDone chan struct{}) {
func (p *Processor) waitForCompletion(
wg *sync.WaitGroup,
writeCh chan fileproc.WriteRequest,
writerDone chan struct{},
) {
wg.Wait()
close(writeCh)
<-writerDone

View File

@@ -0,0 +1,68 @@
package cli
import "testing"
// terminalEnvSetup defines environment variables for terminal detection tests.
type terminalEnvSetup struct {
Term string
CI string
GitHubActions string
NoColor string
ForceColor string
}
// apply sets up the environment variables using t.Setenv.
func (e terminalEnvSetup) apply(t *testing.T) {
t.Helper()
// Always set all environment variables to ensure isolation
// Empty string explicitly unsets the variable in the test environment
t.Setenv("TERM", e.Term)
t.Setenv("CI", e.CI)
t.Setenv("GITHUB_ACTIONS", e.GitHubActions)
t.Setenv("NO_COLOR", e.NoColor)
t.Setenv("FORCE_COLOR", e.ForceColor)
}
// Common terminal environment setups for reuse across tests.
var (
envDefaultTerminal = terminalEnvSetup{
Term: "xterm-256color",
CI: "",
NoColor: "",
ForceColor: "",
}
envDumbTerminal = terminalEnvSetup{
Term: "dumb",
}
envCIWithoutGitHub = terminalEnvSetup{
Term: "xterm",
CI: "true",
GitHubActions: "",
}
envGitHubActions = terminalEnvSetup{
Term: "xterm",
CI: "true",
GitHubActions: "true",
NoColor: "",
}
envNoColor = terminalEnvSetup{
Term: "xterm-256color",
CI: "",
NoColor: "1",
ForceColor: "",
}
envForceColor = terminalEnvSetup{
Term: "dumb",
ForceColor: "1",
}
envEmptyTerm = terminalEnvSetup{
Term: "",
}
)

42
cli/test_constants.go Normal file
View File

@@ -0,0 +1,42 @@
package cli
// Test constants to avoid duplication in test files.
// These constants are used across multiple test files in the cli package.
const (
// Error messages
testErrFileNotFound = "file not found"
testErrPermissionDenied = "permission denied"
testErrInvalidFormat = "invalid format"
testErrOther = "other error"
testErrEncoding = "encoding error"
testErrSourceRequired = "source directory is required"
testErrPathTraversal = "path traversal attempt detected"
testPathTraversalPath = "../../../etc/passwd"
// Suggestion messages
testSuggestionsHeader = "Suggestions:"
testSuggestCheckPerms = "Check file/directory permissions"
testSuggestVerifyPath = "Verify the path is correct"
testSuggestFormat = "Use a supported format: markdown, json, yaml"
testSuggestFormatEx = "Example: -format markdown"
testSuggestCheckArgs = "Check your command line arguments"
testSuggestHelp = "Run with --help for usage information"
testSuggestDiskSpace = "Verify available disk space"
testSuggestReduceConcur = "Try with -concurrency 1 to reduce resource usage"
// UI test strings
testWithColors = "with colors"
testWithoutColors = "without colors"
testProcessingMsg = "Processing files"
// Flag names
testFlagSource = "-source"
testFlagConcurrency = "-concurrency"
// Test file paths
testFilePath1 = "/test/file1.go"
testFilePath2 = "/test/file2.go"
// Output markers
testErrorSuffix = " Error"
)

View File

@@ -8,6 +8,8 @@ import (
"github.com/fatih/color"
"github.com/schollz/progressbar/v3"
"github.com/ivuorinen/gibidify/gibidiutils"
)
// UIManager handles CLI user interface elements.
@@ -44,23 +46,40 @@ func (ui *UIManager) StartProgress(total int, description string) {
return
}
ui.progressBar = progressbar.NewOptions(total,
progressbar.OptionSetWriter(ui.output),
progressbar.OptionSetDescription(description),
progressbar.OptionSetTheme(progressbar.Theme{
// Set progress bar theme based on color support
var theme progressbar.Theme
if ui.enableColors {
theme = progressbar.Theme{
Saucer: color.GreenString("█"),
SaucerHead: color.GreenString("█"),
SaucerPadding: " ",
BarStart: "[",
BarEnd: "]",
}),
}
} else {
theme = progressbar.Theme{
Saucer: "█",
SaucerHead: "█",
SaucerPadding: " ",
BarStart: "[",
BarEnd: "]",
}
}
ui.progressBar = progressbar.NewOptions(
total,
progressbar.OptionSetWriter(ui.output),
progressbar.OptionSetDescription(description),
progressbar.OptionSetTheme(theme),
progressbar.OptionShowCount(),
progressbar.OptionShowIts(),
progressbar.OptionSetWidth(40),
progressbar.OptionThrottle(100*time.Millisecond),
progressbar.OptionOnCompletion(func() {
progressbar.OptionOnCompletion(
func() {
_, _ = fmt.Fprint(ui.output, "\n")
}),
},
),
progressbar.OptionSetRenderBlankState(true),
)
}
@@ -80,40 +99,44 @@ func (ui *UIManager) FinishProgress() {
}
}
// PrintSuccess prints a success message in green.
// writeMessage writes a formatted message with optional colorization.
// It handles color enablement, formatting, writing to output, and error logging.
func (ui *UIManager) writeMessage(
icon, methodName, format string,
colorFunc func(string, ...interface{}) string,
args ...interface{},
) {
msg := icon + " " + format
var output string
if ui.enableColors && colorFunc != nil {
output = colorFunc(msg, args...)
} else {
output = fmt.Sprintf(msg, args...)
}
if _, err := fmt.Fprintf(ui.output, "%s\n", output); err != nil {
gibidiutils.LogError(fmt.Sprintf("UIManager.%s: failed to write to output", methodName), err)
}
}
// PrintSuccess prints a success message in green (to ui.output if set).
func (ui *UIManager) PrintSuccess(format string, args ...interface{}) {
if ui.enableColors {
color.Green("✓ "+format, args...)
} else {
ui.printf("✓ "+format+"\n", args...)
}
ui.writeMessage(gibidiutils.IconSuccess, "PrintSuccess", format, color.GreenString, args...)
}
// PrintError prints an error message in red.
// PrintError prints an error message in red (to ui.output if set).
func (ui *UIManager) PrintError(format string, args ...interface{}) {
if ui.enableColors {
color.Red("✗ "+format, args...)
} else {
ui.printf("✗ "+format+"\n", args...)
}
ui.writeMessage(gibidiutils.IconError, "PrintError", format, color.RedString, args...)
}
// PrintWarning prints a warning message in yellow.
// PrintWarning prints a warning message in yellow (to ui.output if set).
func (ui *UIManager) PrintWarning(format string, args ...interface{}) {
if ui.enableColors {
color.Yellow("⚠ "+format, args...)
} else {
ui.printf("⚠ "+format+"\n", args...)
}
ui.writeMessage(gibidiutils.IconWarning, "PrintWarning", format, color.YellowString, args...)
}
// PrintInfo prints an info message in blue.
// PrintInfo prints an info message in blue (to ui.output if set).
func (ui *UIManager) PrintInfo(format string, args ...interface{}) {
if ui.enableColors {
color.Blue(" "+format, args...)
} else {
ui.printf(" "+format+"\n", args...)
}
ui.writeMessage(gibidiutils.IconInfo, "PrintInfo", format, color.BlueString, args...)
}
// PrintHeader prints a header message in bold.
@@ -127,6 +150,11 @@ func (ui *UIManager) PrintHeader(format string, args ...interface{}) {
// isColorTerminal checks if the terminal supports colors.
func isColorTerminal() bool {
// Check if FORCE_COLOR is set
if os.Getenv("FORCE_COLOR") != "" {
return true
}
// Check common environment variables
term := os.Getenv("TERM")
if term == "" || term == "dumb" {
@@ -148,15 +176,9 @@ func isColorTerminal() bool {
return false
}
// Check if FORCE_COLOR is set
if os.Getenv("FORCE_COLOR") != "" {
return true
}
// Default to true for interactive terminals
return isInteractiveTerminal()
}
// isInteractiveTerminal checks if we're running in an interactive terminal.
func isInteractiveTerminal() bool {
// Check if stderr is a terminal (where we output progress/colors)

109
cli/ui_manager_test.go Normal file
View File

@@ -0,0 +1,109 @@
package cli
import (
"bytes"
"os"
"testing"
"github.com/fatih/color"
"github.com/stretchr/testify/assert"
)
func TestNewUIManager(t *testing.T) {
tests := []struct {
name string
env terminalEnvSetup
expectedColors bool
expectedProgress bool
}{
{
name: "default terminal",
env: envDefaultTerminal,
expectedColors: true,
expectedProgress: false, // Not a tty in test environment
},
{
name: "dumb terminal",
env: envDumbTerminal,
expectedColors: false,
expectedProgress: false,
},
{
name: "CI environment without GitHub Actions",
env: envCIWithoutGitHub,
expectedColors: false,
expectedProgress: false,
},
{
name: "GitHub Actions CI",
env: envGitHubActions,
expectedColors: true,
expectedProgress: false,
},
{
name: "NO_COLOR set",
env: envNoColor,
expectedColors: false,
expectedProgress: false,
},
{
name: "FORCE_COLOR set",
env: envForceColor,
expectedColors: true,
expectedProgress: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tt.env.apply(t)
ui := NewUIManager()
assert.NotNil(t, ui)
assert.NotNil(t, ui.output)
assert.Equal(t, tt.expectedColors, ui.enableColors, "color state mismatch")
assert.Equal(t, tt.expectedProgress, ui.enableProgress, "progress state mismatch")
})
}
}
func TestSetColorOutput(t *testing.T) {
// Capture original color.NoColor state and restore after test
orig := color.NoColor
defer func() { color.NoColor = orig }()
ui := &UIManager{output: os.Stderr}
// Test enabling colors
ui.SetColorOutput(true)
assert.False(t, color.NoColor)
assert.True(t, ui.enableColors)
// Test disabling colors
ui.SetColorOutput(false)
assert.True(t, color.NoColor)
assert.False(t, ui.enableColors)
}
func TestSetProgressOutput(t *testing.T) {
ui := &UIManager{output: os.Stderr}
// Test enabling progress
ui.SetProgressOutput(true)
assert.True(t, ui.enableProgress)
// Test disabling progress
ui.SetProgressOutput(false)
assert.False(t, ui.enableProgress)
}
func TestPrintf(t *testing.T) {
buf := &bytes.Buffer{}
ui := &UIManager{
output: buf,
}
ui.printf("Test %s %d", "output", 123)
assert.Equal(t, "Test output 123", buf.String())
}

245
cli/ui_print_test.go Normal file
View File

@@ -0,0 +1,245 @@
package cli
import (
"bytes"
"strings"
"testing"
"github.com/fatih/color"
"github.com/stretchr/testify/assert"
"github.com/ivuorinen/gibidify/gibidiutils"
)
func TestPrintSuccess(t *testing.T) {
tests := []struct {
name string
enableColors bool
format string
args []interface{}
expectSymbol string
}{
{
name: testWithColors,
enableColors: true,
format: "Operation %s",
args: []interface{}{"completed"},
expectSymbol: gibidiutils.IconSuccess,
},
{
name: testWithoutColors,
enableColors: false,
format: "Operation %s",
args: []interface{}{"completed"},
expectSymbol: gibidiutils.IconSuccess,
},
{
name: "no arguments",
enableColors: true,
format: "Success",
args: nil,
expectSymbol: gibidiutils.IconSuccess,
},
}
for _, tt := range tests {
t.Run(
tt.name, func(t *testing.T) {
buf := &bytes.Buffer{}
ui := &UIManager{
enableColors: tt.enableColors,
output: buf,
}
prev := color.NoColor
color.NoColor = !tt.enableColors
defer func() { color.NoColor = prev }()
ui.PrintSuccess(tt.format, tt.args...)
output := buf.String()
assert.Contains(t, output, tt.expectSymbol)
if len(tt.args) > 0 {
assert.Contains(t, output, "completed")
}
},
)
}
}
func TestPrintError(t *testing.T) {
tests := []struct {
name string
enableColors bool
format string
args []interface{}
expectSymbol string
}{
{
name: testWithColors,
enableColors: true,
format: "Failed to %s",
args: []interface{}{"process"},
expectSymbol: gibidiutils.IconError,
},
{
name: testWithoutColors,
enableColors: false,
format: "Failed to %s",
args: []interface{}{"process"},
expectSymbol: gibidiutils.IconError,
},
}
for _, tt := range tests {
t.Run(
tt.name, func(t *testing.T) {
buf := &bytes.Buffer{}
ui := &UIManager{
enableColors: tt.enableColors,
output: buf,
}
prev := color.NoColor
color.NoColor = !tt.enableColors
defer func() { color.NoColor = prev }()
ui.PrintError(tt.format, tt.args...)
output := buf.String()
assert.Contains(t, output, tt.expectSymbol)
if len(tt.args) > 0 {
assert.Contains(t, output, "process")
}
},
)
}
}
func TestPrintWarning(t *testing.T) {
buf := &bytes.Buffer{}
ui := &UIManager{
enableColors: true,
output: buf,
}
ui.PrintWarning("This is a %s", "warning")
output := buf.String()
assert.Contains(t, output, gibidiutils.IconWarning)
}
func TestPrintInfo(t *testing.T) {
// Capture original color.NoColor state and restore after test
orig := color.NoColor
defer func() { color.NoColor = orig }()
buf := &bytes.Buffer{}
ui := &UIManager{
enableColors: true,
output: buf,
}
color.NoColor = false
ui.PrintInfo("Information: %d items", 42)
output := buf.String()
assert.Contains(t, output, gibidiutils.IconInfo)
assert.Contains(t, output, "42")
}
func TestPrintHeader(t *testing.T) {
tests := []struct {
name string
enableColors bool
format string
args []interface{}
}{
{
name: testWithColors,
enableColors: true,
format: "Header %s",
args: []interface{}{"Title"},
},
{
name: testWithoutColors,
enableColors: false,
format: "Header %s",
args: []interface{}{"Title"},
},
}
for _, tt := range tests {
t.Run(
tt.name, func(t *testing.T) {
// Capture original color.NoColor state and restore after test
orig := color.NoColor
defer func() { color.NoColor = orig }()
buf := &bytes.Buffer{}
ui := &UIManager{
enableColors: tt.enableColors,
output: buf,
}
color.NoColor = !tt.enableColors
ui.PrintHeader(tt.format, tt.args...)
output := buf.String()
assert.Contains(t, output, "Title")
},
)
}
}
// Test that all print methods handle newlines correctly
func TestPrintMethodsNewlines(t *testing.T) {
tests := []struct {
name string
method func(*UIManager, string, ...interface{})
symbol string
}{
{
name: "PrintSuccess",
method: (*UIManager).PrintSuccess,
symbol: gibidiutils.IconSuccess,
},
{
name: "PrintError",
method: (*UIManager).PrintError,
symbol: gibidiutils.IconError,
},
{
name: "PrintWarning",
method: (*UIManager).PrintWarning,
symbol: gibidiutils.IconWarning,
},
{
name: "PrintInfo",
method: (*UIManager).PrintInfo,
symbol: gibidiutils.IconInfo,
},
}
for _, tt := range tests {
t.Run(
tt.name, func(t *testing.T) {
// Disable colors for consistent testing
oldNoColor := color.NoColor
color.NoColor = true
defer func() { color.NoColor = oldNoColor }()
buf := &bytes.Buffer{}
ui := &UIManager{
enableColors: false,
output: buf,
}
tt.method(ui, "Test message")
output := buf.String()
assert.True(t, strings.HasSuffix(output, "\n"))
assert.Contains(t, output, tt.symbol)
},
)
}
}

147
cli/ui_progress_test.go Normal file
View File

@@ -0,0 +1,147 @@
package cli
import (
"bytes"
"testing"
"github.com/stretchr/testify/assert"
)
func TestStartProgress(t *testing.T) {
tests := []struct {
name string
total int
description string
enabled bool
expectBar bool
}{
{
name: "progress enabled with valid total",
total: 100,
description: testProcessingMsg,
enabled: true,
expectBar: true,
},
{
name: "progress disabled",
total: 100,
description: testProcessingMsg,
enabled: false,
expectBar: false,
},
{
name: "zero total",
total: 0,
description: testProcessingMsg,
enabled: true,
expectBar: false,
},
{
name: "negative total",
total: -5,
description: testProcessingMsg,
enabled: true,
expectBar: false,
},
}
for _, tt := range tests {
t.Run(
tt.name, func(t *testing.T) {
ui := &UIManager{
enableProgress: tt.enabled,
output: &bytes.Buffer{},
}
ui.StartProgress(tt.total, tt.description)
if tt.expectBar {
assert.NotNil(t, ui.progressBar)
} else {
assert.Nil(t, ui.progressBar)
}
},
)
}
}
func TestUpdateProgress(t *testing.T) {
tests := []struct {
name string
setupBar bool
enabledProg bool
expectUpdate bool
}{
{
name: "with progress bar",
setupBar: true,
enabledProg: true,
expectUpdate: true,
},
{
name: "without progress bar",
setupBar: false,
enabledProg: false,
expectUpdate: false,
},
}
for _, tt := range tests {
t.Run(
tt.name, func(_ *testing.T) {
ui := &UIManager{
enableProgress: tt.enabledProg,
output: &bytes.Buffer{},
}
if tt.setupBar {
ui.StartProgress(10, "Test")
}
// Should not panic
ui.UpdateProgress(1)
// Multiple updates should not panic
ui.UpdateProgress(2)
ui.UpdateProgress(3)
},
)
}
}
func TestFinishProgress(t *testing.T) {
tests := []struct {
name string
setupBar bool
}{
{
name: "with progress bar",
setupBar: true,
},
{
name: "without progress bar",
setupBar: false,
},
}
for _, tt := range tests {
t.Run(
tt.name, func(t *testing.T) {
ui := &UIManager{
enableProgress: true,
output: &bytes.Buffer{},
}
if tt.setupBar {
ui.StartProgress(10, "Test")
}
// Should not panic
ui.FinishProgress()
// Bar should be cleared
assert.Nil(t, ui.progressBar)
},
)
}
}

62
cli/ui_terminal_test.go Normal file
View File

@@ -0,0 +1,62 @@
package cli
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestIsColorTerminal(t *testing.T) {
tests := []struct {
name string
env terminalEnvSetup
expected bool
}{
{
name: "dumb terminal",
env: envDumbTerminal,
expected: false,
},
{
name: "empty TERM",
env: envEmptyTerm,
expected: false,
},
{
name: "CI without GitHub Actions",
env: envCIWithoutGitHub,
expected: false,
},
{
name: "GitHub Actions",
env: envGitHubActions,
expected: true,
},
{
name: "NO_COLOR set",
env: envNoColor,
expected: false,
},
{
name: "FORCE_COLOR set",
env: envForceColor,
expected: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tt.env.apply(t)
result := isColorTerminal()
assert.Equal(t, tt.expected, result)
})
}
}
func TestIsInteractiveTerminal(t *testing.T) {
// This function checks if stderr is a terminal
// In test environment, it will typically return false
result := isInteractiveTerminal()
assert.False(t, result)
}

View File

@@ -9,7 +9,7 @@ import (
"strings"
"github.com/ivuorinen/gibidify/benchmark"
"github.com/ivuorinen/gibidify/utils"
"github.com/ivuorinen/gibidify/gibidiutils"
)
var (
@@ -26,7 +26,7 @@ func main() {
flag.Parse()
if err := runBenchmarks(); err != nil {
fmt.Fprintf(os.Stderr, "Benchmark failed: %v\n", err)
_, _ = fmt.Fprintf(os.Stderr, "Benchmark failed: %v\n", err)
os.Exit(1)
}
}
@@ -50,7 +50,10 @@ func runBenchmarks() error {
case "format":
return runFormatBenchmark()
default:
return utils.NewValidationError(utils.CodeValidationFormat, "invalid benchmark type: "+*benchmarkType)
return gibidiutils.NewValidationError(
gibidiutils.CodeValidationFormat,
"invalid benchmark type: "+*benchmarkType,
)
}
}
@@ -58,9 +61,14 @@ func runCollectionBenchmark() error {
fmt.Println("Running file collection benchmark...")
result, err := benchmark.FileCollectionBenchmark(*sourceDir, *numFiles)
if err != nil {
return utils.WrapError(err, utils.ErrorTypeProcessing, utils.CodeProcessingCollection, "file collection benchmark failed")
return gibidiutils.WrapError(
err,
gibidiutils.ErrorTypeProcessing,
gibidiutils.CodeProcessingCollection,
"file collection benchmark failed",
)
}
benchmark.PrintBenchmarkResult(result)
benchmark.PrintResult(result)
return nil
}
@@ -68,24 +76,39 @@ func runProcessingBenchmark() error {
fmt.Printf("Running file processing benchmark (format: %s, concurrency: %d)...\n", *format, *concurrency)
result, err := benchmark.FileProcessingBenchmark(*sourceDir, *format, *concurrency)
if err != nil {
return utils.WrapError(err, utils.ErrorTypeProcessing, utils.CodeProcessingCollection, "file processing benchmark failed")
return gibidiutils.WrapError(
err,
gibidiutils.ErrorTypeProcessing,
gibidiutils.CodeProcessingCollection,
"file processing benchmark failed",
)
}
benchmark.PrintBenchmarkResult(result)
benchmark.PrintResult(result)
return nil
}
func runConcurrencyBenchmark() error {
concurrencyLevels, err := parseConcurrencyList(*concurrencyList)
if err != nil {
return utils.WrapError(err, utils.ErrorTypeValidation, utils.CodeValidationFormat, "invalid concurrency list")
return gibidiutils.WrapError(
err,
gibidiutils.ErrorTypeValidation,
gibidiutils.CodeValidationFormat,
"invalid concurrency list",
)
}
fmt.Printf("Running concurrency benchmark (format: %s, levels: %v)...\n", *format, concurrencyLevels)
suite, err := benchmark.ConcurrencyBenchmark(*sourceDir, *format, concurrencyLevels)
if err != nil {
return utils.WrapError(err, utils.ErrorTypeProcessing, utils.CodeProcessingCollection, "concurrency benchmark failed")
return gibidiutils.WrapError(
err,
gibidiutils.ErrorTypeProcessing,
gibidiutils.CodeProcessingCollection,
"concurrency benchmark failed",
)
}
benchmark.PrintBenchmarkSuite(suite)
benchmark.PrintSuite(suite)
return nil
}
@@ -94,9 +117,14 @@ func runFormatBenchmark() error {
fmt.Printf("Running format benchmark (formats: %v)...\n", formats)
suite, err := benchmark.FormatBenchmark(*sourceDir, formats)
if err != nil {
return utils.WrapError(err, utils.ErrorTypeProcessing, utils.CodeProcessingCollection, "format benchmark failed")
return gibidiutils.WrapError(
err,
gibidiutils.ErrorTypeProcessing,
gibidiutils.CodeProcessingCollection,
"format benchmark failed",
)
}
benchmark.PrintBenchmarkSuite(suite)
benchmark.PrintSuite(suite)
return nil
}
@@ -115,16 +143,28 @@ func parseConcurrencyList(list string) ([]int, error) {
part = strings.TrimSpace(part)
var level int
if _, err := fmt.Sscanf(part, "%d", &level); err != nil {
return nil, utils.WrapErrorf(err, utils.ErrorTypeValidation, utils.CodeValidationFormat, "invalid concurrency level: %s", part)
return nil, gibidiutils.WrapErrorf(
err,
gibidiutils.ErrorTypeValidation,
gibidiutils.CodeValidationFormat,
"invalid concurrency level: %s",
part,
)
}
if level <= 0 {
return nil, utils.NewValidationError(utils.CodeValidationFormat, "concurrency level must be positive: "+part)
return nil, gibidiutils.NewValidationError(
gibidiutils.CodeValidationFormat,
"concurrency level must be positive: "+part,
)
}
levels = append(levels, level)
}
if len(levels) == 0 {
return nil, utils.NewValidationError(utils.CodeValidationFormat, "no valid concurrency levels found")
return nil, gibidiutils.NewValidationError(
gibidiutils.CodeValidationFormat,
"no valid concurrency levels found",
)
}
return levels, nil

View File

@@ -1,13 +1,15 @@
package config
import (
"flag"
"os"
"path/filepath"
"sync/atomic"
"github.com/sirupsen/logrus"
"github.com/spf13/viper"
"github.com/ivuorinen/gibidify/utils"
"github.com/ivuorinen/gibidify/gibidiutils"
)
// LoadConfig reads configuration from a YAML file.
@@ -15,13 +17,18 @@ import (
// 1. $XDG_CONFIG_HOME/gibidify/config.yaml
// 2. $HOME/.config/gibidify/config.yaml
// 3. The current directory as fallback.
//
// Note: LoadConfig relies on isRunningTest() which requires the testing package
// to have registered its flags (e.g., via flag.Parse() or during test initialization).
// If called too early (e.g., from init() or before TestMain), test detection may not work reliably.
// For explicit control, use SetRunningInTest() before calling LoadConfig.
func LoadConfig() {
viper.SetConfigName("config")
viper.SetConfigType("yaml")
if xdgConfig := os.Getenv("XDG_CONFIG_HOME"); xdgConfig != "" {
// Validate XDG_CONFIG_HOME for path traversal attempts
if err := utils.ValidateConfigPath(xdgConfig); err != nil {
if err := gibidiutils.ValidateConfigPath(xdgConfig); err != nil {
logrus.Warnf("Invalid XDG_CONFIG_HOME path, using default config: %v", err)
} else {
configPath := filepath.Join(xdgConfig, "gibidify")
@@ -37,7 +44,14 @@ func LoadConfig() {
}
if err := viper.ReadInConfig(); err != nil {
// Suppress this info-level log when running tests.
// Prefer an explicit test flag (SetRunningInTest) but fall back to runtime detection.
if runningInTest.Load() || isRunningTest() {
// Keep a debug-level record so tests that enable debug can still see it.
logrus.Debugf("Config file not found (tests): %v", err)
} else {
logrus.Infof("Config file not found, using default values: %v", err)
}
setDefaultConfig()
} else {
logrus.Infof("Using config file: %s", viper.ConfigFileUsed())
@@ -88,3 +102,30 @@ func setDefaultConfig() {
viper.SetDefault("resourceLimits.enableGracefulDegradation", true)
viper.SetDefault("resourceLimits.enableResourceMonitoring", true)
}
var runningInTest atomic.Bool
// SetRunningInTest allows tests to explicitly indicate they are running under `go test`.
// Call this from TestMain in tests to suppress noisy info logs while still allowing
// debug-level output for tests that enable it.
func SetRunningInTest(b bool) {
runningInTest.Store(b)
}
// isRunningTest attempts to detect if the binary is running under `go test`.
// Prefer checking for standard test flags registered by the testing package.
// This is reliable when `go test` initializes the flag set.
//
// IMPORTANT: This function relies on flag.Lookup which returns nil if the testing
// package hasn't registered test flags yet. Callers must invoke this after flag
// parsing (or test flag registration) has occurred. If invoked too early (e.g.,
// from init() or early in TestMain before flags are parsed), detection will fail.
// For explicit control, use SetRunningInTest() instead.
func isRunningTest() bool {
// Look for the well-known test flags created by the testing package.
// If any are present in the flag registry, we're running under `go test`.
if flag.Lookup("test.v") != nil || flag.Lookup("test.run") != nil || flag.Lookup("test.bench") != nil {
return true
}
return false
}

View File

@@ -87,7 +87,7 @@ ignoreDirectories:
tempDir := t.TempDir()
configFile := tempDir + "/config.yaml"
err := os.WriteFile(configFile, []byte(configContent), 0o644)
err := os.WriteFile(configFile, []byte(configContent), 0o600)
if err != nil {
t.Fatalf("Failed to write config file: %v", err)
}
@@ -104,7 +104,10 @@ ignoreDirectories:
t.Errorf("Expected default file size limit after validation failure, got %d", config.GetFileSizeLimit())
}
if containsString(config.GetIgnoredDirectories(), "") {
t.Errorf("Expected ignored directories not to contain empty string after validation failure, got %v", config.GetIgnoredDirectories())
t.Errorf(
"Expected ignored directories not to contain empty string after validation failure, got %v",
config.GetIgnoredDirectories(),
)
}
}

View File

@@ -6,240 +6,532 @@ import (
"github.com/spf13/viper"
"github.com/ivuorinen/gibidify/utils"
"github.com/ivuorinen/gibidify/gibidiutils"
)
// ValidateConfig validates the loaded configuration.
func ValidateConfig() error {
var validationErrors []string
// Validate file size limit
// validateFileSizeLimit validates the file size limit configuration.
func validateFileSizeLimit() []string {
var errors []string
fileSizeLimit := viper.GetInt64("fileSizeLimit")
if fileSizeLimit < MinFileSizeLimit {
validationErrors = append(validationErrors, fmt.Sprintf("fileSizeLimit (%d) is below minimum (%d)", fileSizeLimit, MinFileSizeLimit))
errors = append(
errors,
fmt.Sprintf("fileSizeLimit (%d) is below minimum (%d)", fileSizeLimit, MinFileSizeLimit),
)
}
if fileSizeLimit > MaxFileSizeLimit {
validationErrors = append(validationErrors, fmt.Sprintf("fileSizeLimit (%d) exceeds maximum (%d)", fileSizeLimit, MaxFileSizeLimit))
errors = append(
errors,
fmt.Sprintf("fileSizeLimit (%d) exceeds maximum (%d)", fileSizeLimit, MaxFileSizeLimit),
)
}
return errors
}
// Validate ignore directories
// validateIgnoreDirectories validates the ignore directories configuration.
func validateIgnoreDirectories() []string {
var errors []string
ignoreDirectories := viper.GetStringSlice("ignoreDirectories")
for i, dir := range ignoreDirectories {
dir = strings.TrimSpace(dir)
if dir == "" {
validationErrors = append(validationErrors, fmt.Sprintf("ignoreDirectories[%d] is empty", i))
errors = append(errors, fmt.Sprintf("ignoreDirectories[%d] is empty", i))
continue
}
if strings.Contains(dir, "/") {
validationErrors = append(validationErrors, fmt.Sprintf("ignoreDirectories[%d] (%s) contains path separator - only directory names are allowed", i, dir))
errors = append(
errors,
fmt.Sprintf(
"ignoreDirectories[%d] (%s) contains path separator - only directory names are allowed",
i,
dir,
),
)
}
if strings.HasPrefix(dir, ".") && dir != ".git" && dir != ".vscode" && dir != ".idea" {
validationErrors = append(validationErrors, fmt.Sprintf("ignoreDirectories[%d] (%s) starts with dot - this may cause unexpected behavior", i, dir))
errors = append(
errors,
fmt.Sprintf("ignoreDirectories[%d] (%s) starts with dot - this may cause unexpected behavior", i, dir),
)
}
}
return errors
}
// Validate supported output formats if configured
// validateSupportedFormats validates the supported output formats configuration.
func validateSupportedFormats() []string {
var errors []string
if viper.IsSet("supportedFormats") {
supportedFormats := viper.GetStringSlice("supportedFormats")
validFormats := map[string]bool{"json": true, "yaml": true, "markdown": true}
for i, format := range supportedFormats {
format = strings.ToLower(strings.TrimSpace(format))
if !validFormats[format] {
validationErrors = append(validationErrors, fmt.Sprintf("supportedFormats[%d] (%s) is not a valid format (json, yaml, markdown)", i, format))
errors = append(
errors,
fmt.Sprintf("supportedFormats[%d] (%s) is not a valid format (json, yaml, markdown)", i, format),
)
}
}
}
return errors
}
// Validate concurrency settings if configured
// validateConcurrencySettings validates the concurrency settings configuration.
func validateConcurrencySettings() []string {
var errors []string
if viper.IsSet("maxConcurrency") {
maxConcurrency := viper.GetInt("maxConcurrency")
if maxConcurrency < 1 {
validationErrors = append(validationErrors, fmt.Sprintf("maxConcurrency (%d) must be at least 1", maxConcurrency))
errors = append(
errors,
fmt.Sprintf("maxConcurrency (%d) must be at least 1", maxConcurrency),
)
}
if maxConcurrency > 100 {
validationErrors = append(validationErrors, fmt.Sprintf("maxConcurrency (%d) is unreasonably high (max 100)", maxConcurrency))
errors = append(
errors,
fmt.Sprintf("maxConcurrency (%d) is unreasonably high (max 100)", maxConcurrency),
)
}
}
return errors
}
// Validate file patterns if configured
// validateFilePatterns validates the file patterns configuration.
func validateFilePatterns() []string {
var errors []string
if viper.IsSet("filePatterns") {
filePatterns := viper.GetStringSlice("filePatterns")
for i, pattern := range filePatterns {
pattern = strings.TrimSpace(pattern)
if pattern == "" {
validationErrors = append(validationErrors, fmt.Sprintf("filePatterns[%d] is empty", i))
errors = append(errors, fmt.Sprintf("filePatterns[%d] is empty", i))
continue
}
// Basic validation - patterns should contain at least one alphanumeric character
if !strings.ContainsAny(pattern, "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789") {
validationErrors = append(validationErrors, fmt.Sprintf("filePatterns[%d] (%s) appears to be invalid", i, pattern))
errors = append(
errors,
fmt.Sprintf("filePatterns[%d] (%s) appears to be invalid", i, pattern),
)
}
}
}
return errors
}
// validateFileTypes validates the FileTypeRegistry configuration.
// validateCustomImageExtensions validates custom image extensions configuration.
func validateCustomImageExtensions() []string {
var errors []string
if !viper.IsSet("fileTypes.customImageExtensions") {
return errors
}
// Validate FileTypeRegistry configuration
if viper.IsSet("fileTypes.customImageExtensions") {
customImages := viper.GetStringSlice("fileTypes.customImageExtensions")
for i, ext := range customImages {
ext = strings.TrimSpace(ext)
if ext == "" {
validationErrors = append(validationErrors, fmt.Sprintf("fileTypes.customImageExtensions[%d] is empty", i))
errors = append(
errors,
fmt.Sprintf("fileTypes.customImageExtensions[%d] is empty", i),
)
continue
}
if !strings.HasPrefix(ext, ".") {
validationErrors = append(validationErrors, fmt.Sprintf("fileTypes.customImageExtensions[%d] (%s) must start with a dot", i, ext))
errors = append(
errors,
fmt.Sprintf("fileTypes.customImageExtensions[%d] (%s) must start with a dot", i, ext),
)
}
}
return errors
}
// validateCustomBinaryExtensions validates custom binary extensions configuration.
func validateCustomBinaryExtensions() []string {
var errors []string
if !viper.IsSet("fileTypes.customBinaryExtensions") {
return errors
}
if viper.IsSet("fileTypes.customBinaryExtensions") {
customBinary := viper.GetStringSlice("fileTypes.customBinaryExtensions")
for i, ext := range customBinary {
ext = strings.TrimSpace(ext)
if ext == "" {
validationErrors = append(validationErrors, fmt.Sprintf("fileTypes.customBinaryExtensions[%d] is empty", i))
errors = append(
errors,
fmt.Sprintf("fileTypes.customBinaryExtensions[%d] is empty", i),
)
continue
}
if !strings.HasPrefix(ext, ".") {
validationErrors = append(validationErrors, fmt.Sprintf("fileTypes.customBinaryExtensions[%d] (%s) must start with a dot", i, ext))
errors = append(
errors,
fmt.Sprintf("fileTypes.customBinaryExtensions[%d] (%s) must start with a dot", i, ext),
)
}
}
return errors
}
// validateCustomLanguages validates custom languages configuration.
func validateCustomLanguages() []string {
var errors []string
if !viper.IsSet("fileTypes.customLanguages") {
return errors
}
if viper.IsSet("fileTypes.customLanguages") {
customLangs := viper.GetStringMapString("fileTypes.customLanguages")
for ext, lang := range customLangs {
ext = strings.TrimSpace(ext)
lang = strings.TrimSpace(lang)
if ext == "" {
validationErrors = append(validationErrors, "fileTypes.customLanguages contains empty extension key")
errors = append(errors, "fileTypes.customLanguages contains empty extension key")
continue
}
if !strings.HasPrefix(ext, ".") {
validationErrors = append(validationErrors, fmt.Sprintf("fileTypes.customLanguages extension (%s) must start with a dot", ext))
errors = append(
errors,
fmt.Sprintf("fileTypes.customLanguages extension (%s) must start with a dot", ext),
)
}
if lang == "" {
validationErrors = append(validationErrors, fmt.Sprintf("fileTypes.customLanguages[%s] has empty language value", ext))
errors = append(
errors,
fmt.Sprintf("fileTypes.customLanguages[%s] has empty language value", ext),
)
}
}
return errors
}
// validateFileTypes validates the FileTypeRegistry configuration.
func validateFileTypes() []string {
var errors []string
errors = append(errors, validateCustomImageExtensions()...)
errors = append(errors, validateCustomBinaryExtensions()...)
errors = append(errors, validateCustomLanguages()...)
return errors
}
// validateBackpressureConfig validates the back-pressure configuration.
// validateBackpressureMaxPendingFiles validates max pending files configuration.
func validateBackpressureMaxPendingFiles() []string {
var errors []string
if !viper.IsSet("backpressure.maxPendingFiles") {
return errors
}
// Validate back-pressure configuration
if viper.IsSet("backpressure.maxPendingFiles") {
maxPendingFiles := viper.GetInt("backpressure.maxPendingFiles")
if maxPendingFiles < 1 {
validationErrors = append(validationErrors, fmt.Sprintf("backpressure.maxPendingFiles (%d) must be at least 1", maxPendingFiles))
errors = append(
errors,
fmt.Sprintf("backpressure.maxPendingFiles (%d) must be at least 1", maxPendingFiles),
)
}
if maxPendingFiles > 100000 {
validationErrors = append(validationErrors, fmt.Sprintf("backpressure.maxPendingFiles (%d) is unreasonably high (max 100000)", maxPendingFiles))
errors = append(
errors,
fmt.Sprintf("backpressure.maxPendingFiles (%d) is unreasonably high (max 100000)", maxPendingFiles),
)
}
return errors
}
// validateBackpressureMaxPendingWrites validates max pending writes configuration.
func validateBackpressureMaxPendingWrites() []string {
var errors []string
if !viper.IsSet("backpressure.maxPendingWrites") {
return errors
}
if viper.IsSet("backpressure.maxPendingWrites") {
maxPendingWrites := viper.GetInt("backpressure.maxPendingWrites")
if maxPendingWrites < 1 {
validationErrors = append(validationErrors, fmt.Sprintf("backpressure.maxPendingWrites (%d) must be at least 1", maxPendingWrites))
errors = append(
errors,
fmt.Sprintf("backpressure.maxPendingWrites (%d) must be at least 1", maxPendingWrites),
)
}
if maxPendingWrites > 10000 {
validationErrors = append(validationErrors, fmt.Sprintf("backpressure.maxPendingWrites (%d) is unreasonably high (max 10000)", maxPendingWrites))
errors = append(
errors,
fmt.Sprintf("backpressure.maxPendingWrites (%d) is unreasonably high (max 10000)", maxPendingWrites),
)
}
return errors
}
// validateBackpressureMaxMemoryUsage validates max memory usage configuration.
func validateBackpressureMaxMemoryUsage() []string {
var errors []string
if !viper.IsSet("backpressure.maxMemoryUsage") {
return errors
}
if viper.IsSet("backpressure.maxMemoryUsage") {
maxMemoryUsage := viper.GetInt64("backpressure.maxMemoryUsage")
if maxMemoryUsage < 1048576 { // 1MB minimum
validationErrors = append(validationErrors, fmt.Sprintf("backpressure.maxMemoryUsage (%d) must be at least 1MB (1048576 bytes)", maxMemoryUsage))
errors = append(
errors,
fmt.Sprintf("backpressure.maxMemoryUsage (%d) must be at least 1MB (1048576 bytes)", maxMemoryUsage),
)
}
if maxMemoryUsage > 10737418240 { // 10GB maximum
validationErrors = append(validationErrors, fmt.Sprintf("backpressure.maxMemoryUsage (%d) is unreasonably high (max 10GB)", maxMemoryUsage))
if maxMemoryUsage > 104857600 { // 100MB maximum
errors = append(
errors,
fmt.Sprintf("backpressure.maxMemoryUsage (%d) is unreasonably high (max 100MB)", maxMemoryUsage),
)
}
return errors
}
// validateBackpressureMemoryCheckInterval validates memory check interval configuration.
func validateBackpressureMemoryCheckInterval() []string {
var errors []string
if !viper.IsSet("backpressure.memoryCheckInterval") {
return errors
}
if viper.IsSet("backpressure.memoryCheckInterval") {
interval := viper.GetInt("backpressure.memoryCheckInterval")
if interval < 1 {
validationErrors = append(validationErrors, fmt.Sprintf("backpressure.memoryCheckInterval (%d) must be at least 1", interval))
errors = append(
errors,
fmt.Sprintf("backpressure.memoryCheckInterval (%d) must be at least 1", interval),
)
}
if interval > 100000 {
validationErrors = append(validationErrors, fmt.Sprintf("backpressure.memoryCheckInterval (%d) is unreasonably high (max 100000)", interval))
errors = append(
errors,
fmt.Sprintf("backpressure.memoryCheckInterval (%d) is unreasonably high (max 100000)", interval),
)
}
return errors
}
// validateBackpressureConfig validates the back-pressure configuration.
func validateBackpressureConfig() []string {
var errors []string
errors = append(errors, validateBackpressureMaxPendingFiles()...)
errors = append(errors, validateBackpressureMaxPendingWrites()...)
errors = append(errors, validateBackpressureMaxMemoryUsage()...)
errors = append(errors, validateBackpressureMemoryCheckInterval()...)
return errors
}
// validateResourceLimits validates the resource limits configuration.
// validateResourceLimitsMaxFiles validates max files configuration.
func validateResourceLimitsMaxFiles() []string {
var errors []string
if !viper.IsSet("resourceLimits.maxFiles") {
return errors
}
// Validate resource limits configuration
if viper.IsSet("resourceLimits.maxFiles") {
maxFiles := viper.GetInt("resourceLimits.maxFiles")
if maxFiles < MinMaxFiles {
validationErrors = append(validationErrors, fmt.Sprintf("resourceLimits.maxFiles (%d) must be at least %d", maxFiles, MinMaxFiles))
errors = append(
errors,
fmt.Sprintf("resourceLimits.maxFiles (%d) must be at least %d", maxFiles, MinMaxFiles),
)
}
if maxFiles > MaxMaxFiles {
validationErrors = append(validationErrors, fmt.Sprintf("resourceLimits.maxFiles (%d) exceeds maximum (%d)", maxFiles, MaxMaxFiles))
errors = append(
errors,
fmt.Sprintf("resourceLimits.maxFiles (%d) exceeds maximum (%d)", maxFiles, MaxMaxFiles),
)
}
return errors
}
// validateResourceLimitsMaxTotalSize validates max total size configuration.
func validateResourceLimitsMaxTotalSize() []string {
var errors []string
if !viper.IsSet("resourceLimits.maxTotalSize") {
return errors
}
if viper.IsSet("resourceLimits.maxTotalSize") {
maxTotalSize := viper.GetInt64("resourceLimits.maxTotalSize")
if maxTotalSize < MinMaxTotalSize {
validationErrors = append(validationErrors, fmt.Sprintf("resourceLimits.maxTotalSize (%d) must be at least %d", maxTotalSize, MinMaxTotalSize))
errors = append(
errors,
fmt.Sprintf("resourceLimits.maxTotalSize (%d) must be at least %d", maxTotalSize, MinMaxTotalSize),
)
}
if maxTotalSize > MaxMaxTotalSize {
validationErrors = append(validationErrors, fmt.Sprintf("resourceLimits.maxTotalSize (%d) exceeds maximum (%d)", maxTotalSize, MaxMaxTotalSize))
errors = append(
errors,
fmt.Sprintf("resourceLimits.maxTotalSize (%d) exceeds maximum (%d)", maxTotalSize, MaxMaxTotalSize),
)
}
return errors
}
// validateResourceLimitsTimeouts validates timeout configurations.
func validateResourceLimitsTimeouts() []string {
var errors []string
if viper.IsSet("resourceLimits.fileProcessingTimeoutSec") {
timeout := viper.GetInt("resourceLimits.fileProcessingTimeoutSec")
if timeout < MinFileProcessingTimeoutSec {
validationErrors = append(validationErrors, fmt.Sprintf("resourceLimits.fileProcessingTimeoutSec (%d) must be at least %d", timeout, MinFileProcessingTimeoutSec))
errors = append(
errors,
fmt.Sprintf(
"resourceLimits.fileProcessingTimeoutSec (%d) must be at least %d",
timeout,
MinFileProcessingTimeoutSec,
),
)
}
if timeout > MaxFileProcessingTimeoutSec {
validationErrors = append(validationErrors, fmt.Sprintf("resourceLimits.fileProcessingTimeoutSec (%d) exceeds maximum (%d)", timeout, MaxFileProcessingTimeoutSec))
errors = append(
errors,
fmt.Sprintf(
"resourceLimits.fileProcessingTimeoutSec (%d) exceeds maximum (%d)",
timeout,
MaxFileProcessingTimeoutSec,
),
)
}
}
if viper.IsSet("resourceLimits.overallTimeoutSec") {
timeout := viper.GetInt("resourceLimits.overallTimeoutSec")
if timeout < MinOverallTimeoutSec {
validationErrors = append(validationErrors, fmt.Sprintf("resourceLimits.overallTimeoutSec (%d) must be at least %d", timeout, MinOverallTimeoutSec))
errors = append(
errors,
fmt.Sprintf("resourceLimits.overallTimeoutSec (%d) must be at least %d", timeout, MinOverallTimeoutSec),
)
}
if timeout > MaxOverallTimeoutSec {
validationErrors = append(validationErrors, fmt.Sprintf("resourceLimits.overallTimeoutSec (%d) exceeds maximum (%d)", timeout, MaxOverallTimeoutSec))
errors = append(
errors,
fmt.Sprintf(
"resourceLimits.overallTimeoutSec (%d) exceeds maximum (%d)",
timeout,
MaxOverallTimeoutSec,
),
)
}
}
return errors
}
// validateResourceLimitsConcurrency validates concurrency configurations.
func validateResourceLimitsConcurrency() []string {
var errors []string
if viper.IsSet("resourceLimits.maxConcurrentReads") {
maxReads := viper.GetInt("resourceLimits.maxConcurrentReads")
if maxReads < MinMaxConcurrentReads {
validationErrors = append(validationErrors, fmt.Sprintf("resourceLimits.maxConcurrentReads (%d) must be at least %d", maxReads, MinMaxConcurrentReads))
errors = append(
errors,
fmt.Sprintf(
"resourceLimits.maxConcurrentReads (%d) must be at least %d",
maxReads,
MinMaxConcurrentReads,
),
)
}
if maxReads > MaxMaxConcurrentReads {
validationErrors = append(validationErrors, fmt.Sprintf("resourceLimits.maxConcurrentReads (%d) exceeds maximum (%d)", maxReads, MaxMaxConcurrentReads))
errors = append(
errors,
fmt.Sprintf(
"resourceLimits.maxConcurrentReads (%d) exceeds maximum (%d)",
maxReads,
MaxMaxConcurrentReads,
),
)
}
}
if viper.IsSet("resourceLimits.rateLimitFilesPerSec") {
rateLimit := viper.GetInt("resourceLimits.rateLimitFilesPerSec")
if rateLimit < MinRateLimitFilesPerSec {
validationErrors = append(validationErrors, fmt.Sprintf("resourceLimits.rateLimitFilesPerSec (%d) must be at least %d", rateLimit, MinRateLimitFilesPerSec))
errors = append(
errors,
fmt.Sprintf(
"resourceLimits.rateLimitFilesPerSec (%d) must be at least %d",
rateLimit,
MinRateLimitFilesPerSec,
),
)
}
if rateLimit > MaxRateLimitFilesPerSec {
validationErrors = append(validationErrors, fmt.Sprintf("resourceLimits.rateLimitFilesPerSec (%d) exceeds maximum (%d)", rateLimit, MaxRateLimitFilesPerSec))
errors = append(
errors,
fmt.Sprintf(
"resourceLimits.rateLimitFilesPerSec (%d) exceeds maximum (%d)",
rateLimit,
MaxRateLimitFilesPerSec,
),
)
}
}
if viper.IsSet("resourceLimits.hardMemoryLimitMB") {
return errors
}
// validateResourceLimitsMemory validates memory limit configuration.
func validateResourceLimitsMemory() []string {
var errors []string
if !viper.IsSet("resourceLimits.hardMemoryLimitMB") {
return errors
}
memLimit := viper.GetInt("resourceLimits.hardMemoryLimitMB")
if memLimit < MinHardMemoryLimitMB {
validationErrors = append(validationErrors, fmt.Sprintf("resourceLimits.hardMemoryLimitMB (%d) must be at least %d", memLimit, MinHardMemoryLimitMB))
errors = append(
errors,
fmt.Sprintf(
"resourceLimits.hardMemoryLimitMB (%d) must be at least %d",
memLimit,
MinHardMemoryLimitMB,
),
)
}
if memLimit > MaxHardMemoryLimitMB {
validationErrors = append(validationErrors, fmt.Sprintf("resourceLimits.hardMemoryLimitMB (%d) exceeds maximum (%d)", memLimit, MaxHardMemoryLimitMB))
errors = append(
errors,
fmt.Sprintf(
"resourceLimits.hardMemoryLimitMB (%d) exceeds maximum (%d)",
memLimit,
MaxHardMemoryLimitMB,
),
)
}
return errors
}
// validateResourceLimits validates the resource limits configuration.
func validateResourceLimits() []string {
var errors []string
errors = append(errors, validateResourceLimitsMaxFiles()...)
errors = append(errors, validateResourceLimitsMaxTotalSize()...)
errors = append(errors, validateResourceLimitsTimeouts()...)
errors = append(errors, validateResourceLimitsConcurrency()...)
errors = append(errors, validateResourceLimitsMemory()...)
return errors
}
// ValidateConfig validates the loaded configuration.
func ValidateConfig() error {
var validationErrors []string
// Collect validation errors from all validation helpers
validationErrors = append(validationErrors, validateFileSizeLimit()...)
validationErrors = append(validationErrors, validateIgnoreDirectories()...)
validationErrors = append(validationErrors, validateSupportedFormats()...)
validationErrors = append(validationErrors, validateConcurrencySettings()...)
validationErrors = append(validationErrors, validateFilePatterns()...)
validationErrors = append(validationErrors, validateFileTypes()...)
validationErrors = append(validationErrors, validateBackpressureConfig()...)
validationErrors = append(validationErrors, validateResourceLimits()...)
if len(validationErrors) > 0 {
return utils.NewStructuredError(
utils.ErrorTypeConfiguration,
utils.CodeConfigValidation,
return gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeConfiguration,
gibidiutils.CodeConfigValidation,
"configuration validation failed: "+strings.Join(validationErrors, "; "),
"",
map[string]interface{}{"validation_errors": validationErrors},
@@ -253,9 +545,9 @@ func ValidateConfig() error {
func ValidateFileSize(size int64) error {
limit := GetFileSizeLimit()
if size > limit {
return utils.NewStructuredError(
utils.ErrorTypeValidation,
utils.CodeValidationSize,
return gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeValidation,
gibidiutils.CodeValidationSize,
fmt.Sprintf("file size (%d bytes) exceeds limit (%d bytes)", size, limit),
"",
map[string]interface{}{"file_size": size, "size_limit": limit},
@@ -267,9 +559,9 @@ func ValidateFileSize(size int64) error {
// ValidateOutputFormat checks if an output format is valid.
func ValidateOutputFormat(format string) error {
if !IsValidFormat(format) {
return utils.NewStructuredError(
utils.ErrorTypeValidation,
utils.CodeValidationFormat,
return gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeValidation,
gibidiutils.CodeValidationFormat,
fmt.Sprintf("unsupported output format: %s (supported: json, yaml, markdown)", format),
"",
map[string]interface{}{"format": format},
@@ -281,9 +573,9 @@ func ValidateOutputFormat(format string) error {
// ValidateConcurrency checks if a concurrency level is valid.
func ValidateConcurrency(concurrency int) error {
if concurrency < 1 {
return utils.NewStructuredError(
utils.ErrorTypeValidation,
utils.CodeValidationFormat,
return gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeValidation,
gibidiutils.CodeValidationFormat,
fmt.Sprintf("concurrency (%d) must be at least 1", concurrency),
"",
map[string]interface{}{"concurrency": concurrency},
@@ -293,9 +585,9 @@ func ValidateConcurrency(concurrency int) error {
if viper.IsSet("maxConcurrency") {
maxConcurrency := GetMaxConcurrency()
if concurrency > maxConcurrency {
return utils.NewStructuredError(
utils.ErrorTypeValidation,
utils.CodeValidationFormat,
return gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeValidation,
gibidiutils.CodeValidationFormat,
fmt.Sprintf("concurrency (%d) exceeds maximum (%d)", concurrency, maxConcurrency),
"",
map[string]interface{}{"concurrency": concurrency, "max_concurrency": maxConcurrency},

View File

@@ -1,13 +1,14 @@
package config_test
import (
"errors"
"strings"
"testing"
"github.com/spf13/viper"
"github.com/ivuorinen/gibidify/config"
"github.com/ivuorinen/gibidify/utils"
"github.com/ivuorinen/gibidify/gibidiutils"
)
// TestValidateConfig tests the configuration validation functionality.
@@ -112,22 +113,20 @@ func TestValidateConfig(t *testing.T) {
}
// Check that it's a structured error
var structErr *utils.StructuredError
var structErr *gibidiutils.StructuredError
if !errorAs(err, &structErr) {
t.Errorf("Expected structured error, got %T", err)
return
}
if structErr.Type != utils.ErrorTypeConfiguration {
t.Errorf("Expected error type %v, got %v", utils.ErrorTypeConfiguration, structErr.Type)
if structErr.Type != gibidiutils.ErrorTypeConfiguration {
t.Errorf("Expected error type %v, got %v", gibidiutils.ErrorTypeConfiguration, structErr.Type)
}
if structErr.Code != utils.CodeConfigValidation {
t.Errorf("Expected error code %v, got %v", utils.CodeConfigValidation, structErr.Code)
if structErr.Code != gibidiutils.CodeConfigValidation {
t.Errorf("Expected error code %v, got %v", gibidiutils.CodeConfigValidation, structErr.Code)
}
} else {
if err != nil {
} else if err != nil {
t.Errorf("Expected no error but got: %v", err)
}
}
})
}
}
@@ -235,8 +234,9 @@ func errorAs(err error, target interface{}) bool {
if err == nil {
return false
}
if structErr, ok := err.(*utils.StructuredError); ok {
if ptr, ok := target.(**utils.StructuredError); ok {
var structErr *gibidiutils.StructuredError
if errors.As(err, &structErr) {
if ptr, ok := target.(**gibidiutils.StructuredError); ok {
*ptr = structErr
return true
}

View File

@@ -3,6 +3,7 @@ package fileproc
import (
"context"
"math"
"runtime"
"sync"
"sync/atomic"
@@ -11,6 +12,7 @@ import (
"github.com/sirupsen/logrus"
"github.com/ivuorinen/gibidify/config"
"github.com/ivuorinen/gibidify/gibidiutils"
)
// BackpressureManager manages memory usage and applies back-pressure when needed.
@@ -59,21 +61,22 @@ func (bp *BackpressureManager) CreateChannels() (chan string, chan WriteRequest)
}
// ShouldApplyBackpressure checks if back-pressure should be applied.
func (bp *BackpressureManager) ShouldApplyBackpressure(ctx context.Context) bool {
func (bp *BackpressureManager) ShouldApplyBackpressure(_ context.Context) bool {
if !bp.enabled {
return false
}
// Check if we should evaluate memory usage
filesProcessed := atomic.AddInt64(&bp.filesProcessed, 1)
if int(filesProcessed)%bp.memoryCheckInterval != 0 {
// Avoid divide by zero - if interval is 0, check every file
if bp.memoryCheckInterval > 0 && int(filesProcessed)%bp.memoryCheckInterval != 0 {
return false
}
// Get current memory usage
var m runtime.MemStats
runtime.ReadMemStats(&m)
currentMemory := int64(m.Alloc)
currentMemory := gibidiutils.SafeUint64ToInt64WithDefault(m.Alloc, math.MaxInt64)
bp.mu.Lock()
defer bp.mu.Unlock()
@@ -133,7 +136,7 @@ func (bp *BackpressureManager) GetStats() BackpressureStats {
return BackpressureStats{
Enabled: bp.enabled,
FilesProcessed: atomic.LoadInt64(&bp.filesProcessed),
CurrentMemoryUsage: int64(m.Alloc),
CurrentMemoryUsage: gibidiutils.SafeUint64ToInt64WithDefault(m.Alloc, math.MaxInt64),
MaxMemoryUsage: bp.maxMemoryUsage,
MemoryWarningActive: bp.memoryWarningLogged,
LastMemoryCheck: bp.lastMemoryCheck,
@@ -160,8 +163,8 @@ func (bp *BackpressureManager) WaitForChannelSpace(ctx context.Context, fileCh c
return
}
// Check if file channel is getting full (>90% capacity)
if len(fileCh) > bp.maxPendingFiles*9/10 {
// Check if file channel is getting full (>=90% capacity)
if bp.maxPendingFiles > 0 && len(fileCh) >= bp.maxPendingFiles*9/10 {
logrus.Debugf("File channel is %d%% full, waiting for space", len(fileCh)*100/bp.maxPendingFiles)
// Wait a bit for the channel to drain
@@ -172,8 +175,8 @@ func (bp *BackpressureManager) WaitForChannelSpace(ctx context.Context, fileCh c
}
}
// Check if write channel is getting full (>90% capacity)
if len(writeCh) > bp.maxPendingWrites*9/10 {
// Check if write channel is getting full (>=90% capacity)
if bp.maxPendingWrites > 0 && len(writeCh) >= bp.maxPendingWrites*9/10 {
logrus.Debugf("Write channel is %d%% full, waiting for space", len(writeCh)*100/bp.maxPendingWrites)
// Wait a bit for the channel to drain

View File

@@ -0,0 +1,177 @@
package fileproc
import (
"context"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestBackpressureManagerShouldApplyBackpressure(t *testing.T) {
ctx := context.Background()
t.Run("returns false when disabled", func(t *testing.T) {
bm := NewBackpressureManager()
bm.enabled = false
shouldApply := bm.ShouldApplyBackpressure(ctx)
assert.False(t, shouldApply)
})
t.Run("checks memory at intervals", func(_ *testing.T) {
bm := NewBackpressureManager()
bm.enabled = true
bm.memoryCheckInterval = 10
// Should not check memory on most calls
for i := 1; i < 10; i++ {
shouldApply := bm.ShouldApplyBackpressure(ctx)
// Can't predict result, but shouldn't panic
_ = shouldApply
}
// Should check memory on 10th call
shouldApply := bm.ShouldApplyBackpressure(ctx)
// Result depends on actual memory usage
_ = shouldApply
})
t.Run("detects high memory usage", func(t *testing.T) {
bm := NewBackpressureManager()
bm.enabled = true
bm.memoryCheckInterval = 1
bm.maxMemoryUsage = 1 // Set very low limit to trigger
shouldApply := bm.ShouldApplyBackpressure(ctx)
// Should detect high memory usage
assert.True(t, shouldApply)
})
}
func TestBackpressureManagerApplyBackpressure(t *testing.T) {
ctx := context.Background()
t.Run("does nothing when disabled", func(t *testing.T) {
bm := NewBackpressureManager()
bm.enabled = false
// Use a channel to verify the function returns quickly
done := make(chan struct{})
go func() {
bm.ApplyBackpressure(ctx)
close(done)
}()
// Should complete quickly when disabled
select {
case <-done:
// Success - function returned
case <-time.After(50 * time.Millisecond):
t.Fatal("ApplyBackpressure did not return quickly when disabled")
}
})
t.Run("applies delay when enabled", func(t *testing.T) {
bm := NewBackpressureManager()
bm.enabled = true
// Use a channel to verify the function blocks for some time
done := make(chan struct{})
started := make(chan struct{})
go func() {
close(started)
bm.ApplyBackpressure(ctx)
close(done)
}()
// Wait for goroutine to start
<-started
// Should NOT complete immediately - verify it blocks for at least 5ms
select {
case <-done:
t.Fatal("ApplyBackpressure returned too quickly when enabled")
case <-time.After(5 * time.Millisecond):
// Good - it's blocking as expected
}
// Now wait for it to complete (should finish within reasonable time)
select {
case <-done:
// Success - function eventually returned
case <-time.After(500 * time.Millisecond):
t.Fatal("ApplyBackpressure did not complete within timeout")
}
})
t.Run("respects context cancellation", func(t *testing.T) {
bm := NewBackpressureManager()
bm.enabled = true
ctx, cancel := context.WithCancel(context.Background())
cancel() // Cancel immediately
start := time.Now()
bm.ApplyBackpressure(ctx)
duration := time.Since(start)
// Should return quickly when context is cancelled
assert.Less(t, duration, 5*time.Millisecond)
})
}
func TestBackpressureManagerLogBackpressureInfo(t *testing.T) {
bm := NewBackpressureManager()
bm.enabled = true // Ensure enabled so filesProcessed is incremented
// Apply some operations
ctx := context.Background()
bm.ShouldApplyBackpressure(ctx)
bm.ApplyBackpressure(ctx)
// This should not panic
bm.LogBackpressureInfo()
stats := bm.GetStats()
assert.Greater(t, stats.FilesProcessed, int64(0))
}
func TestBackpressureManagerMemoryLimiting(t *testing.T) {
t.Run("triggers on low memory limit", func(t *testing.T) {
bm := NewBackpressureManager()
bm.enabled = true
bm.memoryCheckInterval = 1 // Check every file
bm.maxMemoryUsage = 1 // Very low limit to guarantee trigger
ctx := context.Background()
// Should detect memory over limit
shouldApply := bm.ShouldApplyBackpressure(ctx)
assert.True(t, shouldApply)
stats := bm.GetStats()
assert.True(t, stats.MemoryWarningActive)
})
t.Run("resets warning when memory normalizes", func(t *testing.T) {
bm := NewBackpressureManager()
bm.enabled = true
bm.memoryCheckInterval = 1
// Simulate warning by first triggering high memory usage
bm.maxMemoryUsage = 1 // Very low to trigger warning
ctx := context.Background()
_ = bm.ShouldApplyBackpressure(ctx)
stats := bm.GetStats()
assert.True(t, stats.MemoryWarningActive)
// Now set high limit so we're under it
bm.maxMemoryUsage = 1024 * 1024 * 1024 * 10 // 10GB
shouldApply := bm.ShouldApplyBackpressure(ctx)
assert.False(t, shouldApply)
// Warning should be reset (via public API)
stats = bm.GetStats()
assert.False(t, stats.MemoryWarningActive)
})
}

View File

@@ -0,0 +1,262 @@
package fileproc
import (
"context"
"testing"
"time"
"github.com/spf13/viper"
"github.com/stretchr/testify/assert"
)
const (
// CI-safe timeout constants
fastOpTimeout = 100 * time.Millisecond // Operations that should complete quickly
slowOpMinTime = 10 * time.Millisecond // Minimum time for blocking operations
)
// cleanupViperConfig is a test helper that captures and restores viper configuration.
// It takes a testing.T and a list of config keys to save/restore.
// Returns a cleanup function that should be called via t.Cleanup.
func cleanupViperConfig(t *testing.T, keys ...string) {
t.Helper()
// Capture original values
origValues := make(map[string]interface{})
for _, key := range keys {
origValues[key] = viper.Get(key)
}
// Register cleanup to restore values
t.Cleanup(func() {
for key, val := range origValues {
if val != nil {
viper.Set(key, val)
}
}
})
}
func TestBackpressureManagerCreateChannels(t *testing.T) {
t.Run("creates buffered channels when enabled", func(t *testing.T) {
// Capture and restore viper config
cleanupViperConfig(t, testBackpressureEnabled, testBackpressureMaxFiles, testBackpressureMaxWrites)
viper.Set(testBackpressureEnabled, true)
viper.Set(testBackpressureMaxFiles, 10)
viper.Set(testBackpressureMaxWrites, 10)
bm := NewBackpressureManager()
fileCh, writeCh := bm.CreateChannels()
assert.NotNil(t, fileCh)
assert.NotNil(t, writeCh)
// Test that channels have buffer capacity
assert.Greater(t, cap(fileCh), 0)
assert.Greater(t, cap(writeCh), 0)
// Test sending and receiving
fileCh <- "test.go"
val := <-fileCh
assert.Equal(t, "test.go", val)
writeCh <- WriteRequest{Content: "test content"}
writeReq := <-writeCh
assert.Equal(t, "test content", writeReq.Content)
close(fileCh)
close(writeCh)
})
t.Run("creates unbuffered channels when disabled", func(t *testing.T) {
// Use viper to configure instead of direct field access
cleanupViperConfig(t, testBackpressureEnabled)
viper.Set(testBackpressureEnabled, false)
bm := NewBackpressureManager()
fileCh, writeCh := bm.CreateChannels()
assert.NotNil(t, fileCh)
assert.NotNil(t, writeCh)
// Unbuffered channels have capacity 0
assert.Equal(t, 0, cap(fileCh))
assert.Equal(t, 0, cap(writeCh))
close(fileCh)
close(writeCh)
})
}
func TestBackpressureManagerWaitForChannelSpace(t *testing.T) {
t.Run("does nothing when disabled", func(t *testing.T) {
// Use viper to configure instead of direct field access
cleanupViperConfig(t, testBackpressureEnabled)
viper.Set(testBackpressureEnabled, false)
bm := NewBackpressureManager()
fileCh := make(chan string, 1)
writeCh := make(chan WriteRequest, 1)
// Use context with timeout instead of measuring elapsed time
ctx, cancel := context.WithTimeout(context.Background(), fastOpTimeout)
defer cancel()
done := make(chan struct{})
go func() {
bm.WaitForChannelSpace(ctx, fileCh, writeCh)
close(done)
}()
// Should return immediately (before timeout)
select {
case <-done:
// Success - operation completed quickly
case <-ctx.Done():
t.Fatal("WaitForChannelSpace should return immediately when disabled")
}
close(fileCh)
close(writeCh)
})
t.Run("waits when file channel is nearly full", func(t *testing.T) {
// Use viper to configure instead of direct field access
cleanupViperConfig(t, testBackpressureEnabled, testBackpressureMaxFiles)
viper.Set(testBackpressureEnabled, true)
viper.Set(testBackpressureMaxFiles, 10)
bm := NewBackpressureManager()
// Create channel with exact capacity
fileCh := make(chan string, 10)
writeCh := make(chan WriteRequest, 10)
// Fill file channel to >90% (with minimum of 1)
target := max(1, int(float64(cap(fileCh))*0.9))
for i := 0; i < target; i++ {
fileCh <- "file.txt"
}
// Test that it blocks by verifying it doesn't complete immediately
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
done := make(chan struct{})
start := time.Now()
go func() {
bm.WaitForChannelSpace(ctx, fileCh, writeCh)
close(done)
}()
// Verify it doesn't complete immediately (within first millisecond)
select {
case <-done:
t.Fatal("WaitForChannelSpace should block when channel is nearly full")
case <-time.After(1 * time.Millisecond):
// Good - it's blocking as expected
}
// Wait for it to complete
<-done
duration := time.Since(start)
// Just verify it took some measurable time (very lenient for CI)
assert.GreaterOrEqual(t, duration, 1*time.Millisecond)
// Clean up
for i := 0; i < target; i++ {
<-fileCh
}
close(fileCh)
close(writeCh)
})
t.Run("waits when write channel is nearly full", func(t *testing.T) {
// Use viper to configure instead of direct field access
cleanupViperConfig(t, testBackpressureEnabled, testBackpressureMaxWrites)
viper.Set(testBackpressureEnabled, true)
viper.Set(testBackpressureMaxWrites, 10)
bm := NewBackpressureManager()
fileCh := make(chan string, 10)
writeCh := make(chan WriteRequest, 10)
// Fill write channel to >90% (with minimum of 1)
target := max(1, int(float64(cap(writeCh))*0.9))
for i := 0; i < target; i++ {
writeCh <- WriteRequest{}
}
// Test that it blocks by verifying it doesn't complete immediately
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
done := make(chan struct{})
start := time.Now()
go func() {
bm.WaitForChannelSpace(ctx, fileCh, writeCh)
close(done)
}()
// Verify it doesn't complete immediately (within first millisecond)
select {
case <-done:
t.Fatal("WaitForChannelSpace should block when channel is nearly full")
case <-time.After(1 * time.Millisecond):
// Good - it's blocking as expected
}
// Wait for it to complete
<-done
duration := time.Since(start)
// Just verify it took some measurable time (very lenient for CI)
assert.GreaterOrEqual(t, duration, 1*time.Millisecond)
// Clean up
for i := 0; i < target; i++ {
<-writeCh
}
close(fileCh)
close(writeCh)
})
t.Run("respects context cancellation", func(t *testing.T) {
// Use viper to configure instead of direct field access
cleanupViperConfig(t, testBackpressureEnabled, testBackpressureMaxFiles)
viper.Set(testBackpressureEnabled, true)
viper.Set(testBackpressureMaxFiles, 10)
bm := NewBackpressureManager()
fileCh := make(chan string, 10)
writeCh := make(chan WriteRequest, 10)
// Fill channel
for i := 0; i < 10; i++ {
fileCh <- "file.txt"
}
ctx, cancel := context.WithCancel(context.Background())
cancel() // Cancel immediately
// Use timeout to verify it returns quickly
done := make(chan struct{})
go func() {
bm.WaitForChannelSpace(ctx, fileCh, writeCh)
close(done)
}()
// Should return quickly when context is cancelled
select {
case <-done:
// Success - returned due to cancellation
case <-time.After(fastOpTimeout):
t.Fatal("WaitForChannelSpace should return immediately when context is cancelled")
}
// Clean up
for i := 0; i < 10; i++ {
<-fileCh
}
close(fileCh)
close(writeCh)
})
}

View File

@@ -0,0 +1,195 @@
package fileproc
import (
"context"
"sync"
"testing"
"time"
"github.com/spf13/viper"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestBackpressureManagerConcurrency(t *testing.T) {
// Configure via viper instead of direct field access
origEnabled := viper.Get(testBackpressureEnabled)
t.Cleanup(func() {
if origEnabled != nil {
viper.Set(testBackpressureEnabled, origEnabled)
}
})
viper.Set(testBackpressureEnabled, true)
bm := NewBackpressureManager()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var wg sync.WaitGroup
// Multiple goroutines checking backpressure
for i := 0; i < 10; i++ {
wg.Add(1)
go func() {
defer wg.Done()
bm.ShouldApplyBackpressure(ctx)
}()
}
// Multiple goroutines applying backpressure
for i := 0; i < 5; i++ {
wg.Add(1)
go func() {
defer wg.Done()
bm.ApplyBackpressure(ctx)
}()
}
// Multiple goroutines getting stats
for i := 0; i < 5; i++ {
wg.Add(1)
go func() {
defer wg.Done()
bm.GetStats()
}()
}
// Multiple goroutines creating channels
// Note: CreateChannels returns new channels each time, caller owns them
type channelResult struct {
fileCh chan string
writeCh chan WriteRequest
}
results := make(chan channelResult, 3)
for i := 0; i < 3; i++ {
wg.Add(1)
go func() {
defer wg.Done()
fileCh, writeCh := bm.CreateChannels()
results <- channelResult{fileCh, writeCh}
}()
}
wg.Wait()
close(results)
// Verify channels are created and have expected properties
for result := range results {
assert.NotNil(t, result.fileCh)
assert.NotNil(t, result.writeCh)
// Close channels to prevent resource leak (caller owns them)
close(result.fileCh)
close(result.writeCh)
}
// Verify stats are consistent
stats := bm.GetStats()
assert.GreaterOrEqual(t, stats.FilesProcessed, int64(10))
}
func TestBackpressureManagerIntegration(t *testing.T) {
// Configure via viper instead of direct field access
origEnabled := viper.Get(testBackpressureEnabled)
origMaxFiles := viper.Get(testBackpressureMaxFiles)
origMaxWrites := viper.Get(testBackpressureMaxWrites)
origCheckInterval := viper.Get(testBackpressureMemoryCheck)
origMaxMemory := viper.Get(testBackpressureMaxMemory)
t.Cleanup(func() {
if origEnabled != nil {
viper.Set(testBackpressureEnabled, origEnabled)
}
if origMaxFiles != nil {
viper.Set(testBackpressureMaxFiles, origMaxFiles)
}
if origMaxWrites != nil {
viper.Set(testBackpressureMaxWrites, origMaxWrites)
}
if origCheckInterval != nil {
viper.Set(testBackpressureMemoryCheck, origCheckInterval)
}
if origMaxMemory != nil {
viper.Set(testBackpressureMaxMemory, origMaxMemory)
}
})
viper.Set(testBackpressureEnabled, true)
viper.Set(testBackpressureMaxFiles, 10)
viper.Set(testBackpressureMaxWrites, 10)
viper.Set(testBackpressureMemoryCheck, 10)
viper.Set(testBackpressureMaxMemory, 100*1024*1024) // 100MB
bm := NewBackpressureManager()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Create channels - caller owns these channels and is responsible for closing them
fileCh, writeCh := bm.CreateChannels()
require.NotNil(t, fileCh)
require.NotNil(t, writeCh)
require.Greater(t, cap(fileCh), 0, "fileCh should be buffered")
require.Greater(t, cap(writeCh), 0, "writeCh should be buffered")
// Simulate file processing
var wg sync.WaitGroup
// Producer
wg.Add(1)
go func() {
defer wg.Done()
for i := 0; i < 100; i++ {
// Check for backpressure
if bm.ShouldApplyBackpressure(ctx) {
bm.ApplyBackpressure(ctx)
}
// Wait for channel space if needed
bm.WaitForChannelSpace(ctx, fileCh, writeCh)
select {
case fileCh <- "file.txt":
// File sent
case <-ctx.Done():
return
}
}
}()
// Consumer
wg.Add(1)
go func() {
defer wg.Done()
for i := 0; i < 100; i++ {
select {
case <-fileCh:
// Process file (do not manually increment filesProcessed)
case <-ctx.Done():
return
}
}
}()
// Wait for completion
done := make(chan struct{})
go func() {
wg.Wait()
close(done)
}()
select {
case <-done:
// Success
case <-time.After(5 * time.Second):
t.Fatal("Integration test timeout")
}
// Log final info
bm.LogBackpressureInfo()
// Check final stats
stats := bm.GetStats()
assert.GreaterOrEqual(t, stats.FilesProcessed, int64(100))
// Clean up - caller owns the channels, safe to close now that goroutines have finished
close(fileCh)
close(writeCh)
}

View File

@@ -0,0 +1,151 @@
package fileproc
import (
"context"
"testing"
"github.com/spf13/viper"
"github.com/stretchr/testify/assert"
)
// setupViperCleanup is a test helper that captures and restores viper configuration.
// It takes a testing.T and a list of config keys to save/restore.
func setupViperCleanup(t *testing.T, keys []string) {
t.Helper()
// Capture original values and track which keys existed
origValues := make(map[string]interface{})
keysExisted := make(map[string]bool)
for _, key := range keys {
val := viper.Get(key)
origValues[key] = val
keysExisted[key] = viper.IsSet(key)
}
// Register cleanup to restore values
t.Cleanup(func() {
for _, key := range keys {
if keysExisted[key] {
viper.Set(key, origValues[key])
} else {
// Key didn't exist originally, so remove it
allSettings := viper.AllSettings()
delete(allSettings, key)
viper.Reset()
for k, v := range allSettings {
viper.Set(k, v)
}
}
}
})
}
func TestNewBackpressureManager(t *testing.T) {
keys := []string{
testBackpressureEnabled,
testBackpressureMaxMemory,
testBackpressureMemoryCheck,
testBackpressureMaxFiles,
testBackpressureMaxWrites,
}
setupViperCleanup(t, keys)
viper.Set(testBackpressureEnabled, true)
viper.Set(testBackpressureMaxMemory, 100)
viper.Set(testBackpressureMemoryCheck, 10)
viper.Set(testBackpressureMaxFiles, 10)
viper.Set(testBackpressureMaxWrites, 10)
bm := NewBackpressureManager()
assert.NotNil(t, bm)
assert.True(t, bm.enabled)
assert.Greater(t, bm.maxMemoryUsage, int64(0))
assert.Greater(t, bm.memoryCheckInterval, 0)
assert.Greater(t, bm.maxPendingFiles, 0)
assert.Greater(t, bm.maxPendingWrites, 0)
assert.Equal(t, int64(0), bm.filesProcessed)
}
func TestBackpressureStatsStructure(t *testing.T) {
// Behavioral test that exercises BackpressureManager and validates stats
keys := []string{
testBackpressureEnabled,
testBackpressureMaxMemory,
testBackpressureMemoryCheck,
testBackpressureMaxFiles,
testBackpressureMaxWrites,
}
setupViperCleanup(t, keys)
// Configure backpressure with realistic settings
viper.Set(testBackpressureEnabled, true)
viper.Set(testBackpressureMaxMemory, 100*1024*1024) // 100MB
viper.Set(testBackpressureMemoryCheck, 1) // Check every file
viper.Set(testBackpressureMaxFiles, 1000)
viper.Set(testBackpressureMaxWrites, 500)
bm := NewBackpressureManager()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Simulate processing files
initialStats := bm.GetStats()
assert.True(t, initialStats.Enabled, "backpressure should be enabled")
assert.Equal(t, int64(0), initialStats.FilesProcessed, "initially no files processed")
// Capture initial timestamp to verify it gets updated
initialLastCheck := initialStats.LastMemoryCheck
// Process some files to trigger memory checks
for i := 0; i < 5; i++ {
bm.ShouldApplyBackpressure(ctx)
}
// Verify stats reflect the operations
stats := bm.GetStats()
assert.True(t, stats.Enabled, "enabled flag should be set")
assert.Equal(t, int64(5), stats.FilesProcessed, "should have processed 5 files")
assert.Greater(t, stats.CurrentMemoryUsage, int64(0), "memory usage should be tracked")
assert.Equal(t, int64(100*1024*1024), stats.MaxMemoryUsage, "max memory should match config")
assert.Equal(t, 1000, stats.MaxPendingFiles, "maxPendingFiles should match config")
assert.Equal(t, 500, stats.MaxPendingWrites, "maxPendingWrites should match config")
assert.True(t, stats.LastMemoryCheck.After(initialLastCheck) || stats.LastMemoryCheck.Equal(initialLastCheck),
"lastMemoryCheck should be updated or remain initialized")
}
func TestBackpressureManagerGetStats(t *testing.T) {
keys := []string{
testBackpressureEnabled,
testBackpressureMemoryCheck,
}
setupViperCleanup(t, keys)
// Ensure config enables backpressure and checks every call
viper.Set(testBackpressureEnabled, true)
viper.Set(testBackpressureMemoryCheck, 1)
bm := NewBackpressureManager()
// Capture initial timestamp to verify it gets updated
initialStats := bm.GetStats()
initialLastCheck := initialStats.LastMemoryCheck
// Process some files to update stats
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
for i := 0; i < 5; i++ {
bm.ShouldApplyBackpressure(ctx)
}
stats := bm.GetStats()
assert.True(t, stats.Enabled)
assert.Equal(t, int64(5), stats.FilesProcessed)
assert.Greater(t, stats.CurrentMemoryUsage, int64(0))
assert.Equal(t, bm.maxMemoryUsage, stats.MaxMemoryUsage)
assert.Equal(t, bm.maxPendingFiles, stats.MaxPendingFiles)
assert.Equal(t, bm.maxPendingWrites, stats.MaxPendingWrites)
// LastMemoryCheck should be updated after processing files (memoryCheckInterval=1)
assert.True(t, stats.LastMemoryCheck.After(initialLastCheck),
"lastMemoryCheck should be updated after memory checks")
}

View File

@@ -1,9 +1,162 @@
package fileproc
import "strings"
import (
"fmt"
"path/filepath"
"strings"
)
const (
// MaxRegistryEntries is the maximum number of entries allowed in registry config slices/maps.
MaxRegistryEntries = 1000
// MaxExtensionLength is the maximum length for a single extension string.
MaxExtensionLength = 100
)
// RegistryConfig holds configuration for file type registry.
// All paths must be relative without path traversal (no ".." or leading "/").
// Extensions in CustomLanguages keys must start with "." or be alphanumeric with underscore/hyphen.
type RegistryConfig struct {
// CustomImages: file extensions to treat as images (e.g., ".svg", ".webp").
// Must be relative paths without ".." or leading separators.
CustomImages []string
// CustomBinary: file extensions to treat as binary (e.g., ".bin", ".dat").
// Must be relative paths without ".." or leading separators.
CustomBinary []string
// CustomLanguages: maps file extensions to language names (e.g., {".tsx": "TypeScript"}).
// Keys must start with "." or be alphanumeric with underscore/hyphen.
CustomLanguages map[string]string
// DisabledImages: image extensions to disable from default registry.
DisabledImages []string
// DisabledBinary: binary extensions to disable from default registry.
DisabledBinary []string
// DisabledLanguages: language extensions to disable from default registry.
DisabledLanguages []string
}
// Validate checks the RegistryConfig for invalid entries and enforces limits.
func (c *RegistryConfig) Validate() error {
// Validate CustomImages
if err := validateExtensionSlice(c.CustomImages, "CustomImages"); err != nil {
return err
}
// Validate CustomBinary
if err := validateExtensionSlice(c.CustomBinary, "CustomBinary"); err != nil {
return err
}
// Validate CustomLanguages
if len(c.CustomLanguages) > MaxRegistryEntries {
return fmt.Errorf(
"CustomLanguages exceeds maximum entries (%d > %d)",
len(c.CustomLanguages),
MaxRegistryEntries,
)
}
for ext, lang := range c.CustomLanguages {
if err := validateExtension(ext, "CustomLanguages key"); err != nil {
return err
}
if len(lang) > MaxExtensionLength {
return fmt.Errorf(
"CustomLanguages value %q exceeds maximum length (%d > %d)",
lang,
len(lang),
MaxExtensionLength,
)
}
}
// Validate Disabled slices
if err := validateExtensionSlice(c.DisabledImages, "DisabledImages"); err != nil {
return err
}
if err := validateExtensionSlice(c.DisabledBinary, "DisabledBinary"); err != nil {
return err
}
return validateExtensionSlice(c.DisabledLanguages, "DisabledLanguages")
}
// validateExtensionSlice validates a slice of extensions for path safety and limits.
func validateExtensionSlice(slice []string, fieldName string) error {
if len(slice) > MaxRegistryEntries {
return fmt.Errorf("%s exceeds maximum entries (%d > %d)", fieldName, len(slice), MaxRegistryEntries)
}
for _, ext := range slice {
if err := validateExtension(ext, fieldName); err != nil {
return err
}
}
return nil
}
// validateExtension validates a single extension for path safety.
//
//revive:disable-next-line:cyclomatic
func validateExtension(ext, context string) error {
// Reject empty strings
if ext == "" {
return fmt.Errorf("%s entry cannot be empty", context)
}
if len(ext) > MaxExtensionLength {
return fmt.Errorf(
"%s entry %q exceeds maximum length (%d > %d)",
context, ext, len(ext), MaxExtensionLength,
)
}
// Reject absolute paths
if filepath.IsAbs(ext) {
return fmt.Errorf("%s entry %q is an absolute path (not allowed)", context, ext)
}
// Reject path traversal
if strings.Contains(ext, "..") {
return fmt.Errorf("%s entry %q contains path traversal (not allowed)", context, ext)
}
// For extensions, verify they start with "." or are alphanumeric
if strings.HasPrefix(ext, ".") {
// Reject extensions containing path separators
if strings.ContainsRune(ext, filepath.Separator) || strings.ContainsRune(ext, '/') ||
strings.ContainsRune(ext, '\\') {
return fmt.Errorf("%s entry %q contains path separators (not allowed)", context, ext)
}
// Valid extension format
return nil
}
// Check if purely alphanumeric (for bare names)
for _, r := range ext {
isValid := (r >= 'a' && r <= 'z') ||
(r >= 'A' && r <= 'Z') ||
(r >= '0' && r <= '9') ||
r == '_' || r == '-'
if !isValid {
return fmt.Errorf(
"%s entry %q contains invalid characters (must start with '.' or be alphanumeric/_/-)",
context,
ext,
)
}
}
return nil
}
// ApplyCustomExtensions applies custom extensions from configuration.
func (r *FileTypeRegistry) ApplyCustomExtensions(customImages, customBinary []string, customLanguages map[string]string) {
func (r *FileTypeRegistry) ApplyCustomExtensions(
customImages, customBinary []string,
customLanguages map[string]string,
) {
// Add custom image extensions
r.addExtensions(customImages, r.AddImageExtension)
@@ -29,12 +182,24 @@ func (r *FileTypeRegistry) addExtensions(extensions []string, adder func(string)
// ConfigureFromSettings applies configuration settings to the registry.
// This function is called from main.go after config is loaded to avoid circular imports.
func ConfigureFromSettings(
customImages, customBinary []string,
customLanguages map[string]string,
disabledImages, disabledBinary, disabledLanguages []string,
) {
registry := GetDefaultRegistry()
registry.ApplyCustomExtensions(customImages, customBinary, customLanguages)
registry.DisableExtensions(disabledImages, disabledBinary, disabledLanguages)
// It validates the configuration before applying it.
func ConfigureFromSettings(config RegistryConfig) error {
// Validate configuration first
if err := config.Validate(); err != nil {
return err
}
registry := GetDefaultRegistry()
// Only apply custom extensions if they are non-empty (len() for nil slices/maps is zero)
if len(config.CustomImages) > 0 || len(config.CustomBinary) > 0 || len(config.CustomLanguages) > 0 {
registry.ApplyCustomExtensions(config.CustomImages, config.CustomBinary, config.CustomLanguages)
}
// Only disable extensions if they are non-empty
if len(config.DisabledImages) > 0 || len(config.DisabledBinary) > 0 || len(config.DisabledLanguages) > 0 {
registry.DisableExtensions(config.DisabledImages, config.DisabledBinary, config.DisabledLanguages)
}
return nil
}

View File

@@ -14,10 +14,10 @@ func TestFileTypeRegistry_ThreadSafety(t *testing.T) {
var wg sync.WaitGroup
// Test concurrent read operations
t.Run("ConcurrentReads", func(t *testing.T) {
t.Run("ConcurrentReads", func(_ *testing.T) {
for i := 0; i < numGoroutines; i++ {
wg.Add(1)
go func(id int) {
go func(_ int) {
defer wg.Done()
registry := GetDefaultRegistry()

View File

@@ -1,8 +1,9 @@
package fileproc
import (
"sync"
"testing"
"github.com/stretchr/testify/require"
)
// TestFileTypeRegistry_Configuration tests the configuration functionality.
@@ -142,7 +143,7 @@ func TestFileTypeRegistry_Configuration(t *testing.T) {
}
})
// Test case insensitive handling
// Test case-insensitive handling
t.Run("CaseInsensitiveHandling", func(t *testing.T) {
registry := &FileTypeRegistry{
imageExts: make(map[string]bool),
@@ -184,8 +185,9 @@ func TestFileTypeRegistry_Configuration(t *testing.T) {
// TestConfigureFromSettings tests the global configuration function.
func TestConfigureFromSettings(t *testing.T) {
// Reset registry to ensure clean state
registryOnce = sync.Once{}
registry = nil
ResetRegistryForTesting()
// Ensure cleanup runs even if test fails
t.Cleanup(ResetRegistryForTesting)
// Test configuration application
customImages := []string{".webp", ".avif"}
@@ -195,14 +197,15 @@ func TestConfigureFromSettings(t *testing.T) {
disabledBinary := []string{".exe"} // Disable default extension
disabledLanguages := []string{".rb"} // Disable default extension
ConfigureFromSettings(
customImages,
customBinary,
customLanguages,
disabledImages,
disabledBinary,
disabledLanguages,
)
err := ConfigureFromSettings(RegistryConfig{
CustomImages: customImages,
CustomBinary: customBinary,
CustomLanguages: customLanguages,
DisabledImages: disabledImages,
DisabledBinary: disabledBinary,
DisabledLanguages: disabledLanguages,
})
require.NoError(t, err)
// Test that custom extensions work
if !IsImage("test.webp") {
@@ -238,14 +241,15 @@ func TestConfigureFromSettings(t *testing.T) {
}
// Test multiple calls don't override previous configuration
ConfigureFromSettings(
[]string{".extra"},
[]string{},
map[string]string{},
[]string{},
[]string{},
[]string{},
)
err = ConfigureFromSettings(RegistryConfig{
CustomImages: []string{".extra"},
CustomBinary: []string{},
CustomLanguages: map[string]string{},
DisabledImages: []string{},
DisabledBinary: []string{},
DisabledLanguages: []string{},
})
require.NoError(t, err)
// Previous configuration should still work
if !IsImage("test.webp") {

View File

@@ -4,9 +4,21 @@ import (
"testing"
)
// newTestRegistry creates a fresh registry instance for testing to avoid global state pollution.
func newTestRegistry() *FileTypeRegistry {
return &FileTypeRegistry{
imageExts: getImageExtensions(),
binaryExts: getBinaryExtensions(),
languageMap: getLanguageMap(),
extCache: make(map[string]string, 1000),
resultCache: make(map[string]FileTypeResult, 500),
maxCacheSize: 500,
}
}
// TestFileTypeRegistry_LanguageDetection tests the language detection functionality.
func TestFileTypeRegistry_LanguageDetection(t *testing.T) {
registry := GetDefaultRegistry()
registry := newTestRegistry()
tests := []struct {
filename string
@@ -94,7 +106,7 @@ func TestFileTypeRegistry_LanguageDetection(t *testing.T) {
// TestFileTypeRegistry_ImageDetection tests the image detection functionality.
func TestFileTypeRegistry_ImageDetection(t *testing.T) {
registry := GetDefaultRegistry()
registry := newTestRegistry()
tests := []struct {
filename string
@@ -144,7 +156,7 @@ func TestFileTypeRegistry_ImageDetection(t *testing.T) {
// TestFileTypeRegistry_BinaryDetection tests the binary detection functionality.
func TestFileTypeRegistry_BinaryDetection(t *testing.T) {
registry := GetDefaultRegistry()
registry := newTestRegistry()
tests := []struct {
filename string

View File

@@ -31,7 +31,7 @@ func TestFileTypeRegistry_EdgeCases(t *testing.T) {
}
for _, tc := range edgeCases {
t.Run(tc.name, func(t *testing.T) {
t.Run(tc.name, func(_ *testing.T) {
// These should not panic
_ = registry.IsImage(tc.filename)
_ = registry.IsBinary(tc.filename)

View File

@@ -21,7 +21,7 @@ func TestFileTypeRegistry_ModificationMethods(t *testing.T) {
t.Errorf("Expected .webp to be recognized as image after adding")
}
// Test case insensitive addition
// Test case-insensitive addition
registry.AddImageExtension(".AVIF")
if !registry.IsImage("test.avif") {
t.Errorf("Expected .avif to be recognized as image after adding .AVIF")
@@ -51,7 +51,7 @@ func TestFileTypeRegistry_ModificationMethods(t *testing.T) {
t.Errorf("Expected .custom to be recognized as binary after adding")
}
// Test case insensitive addition
// Test case-insensitive addition
registry.AddBinaryExtension(".SPECIAL")
if !registry.IsBinary("file.special") {
t.Errorf("Expected .special to be recognized as binary after adding .SPECIAL")
@@ -81,7 +81,7 @@ func TestFileTypeRegistry_ModificationMethods(t *testing.T) {
t.Errorf("Expected CustomLang, got %s", lang)
}
// Test case insensitive addition
// Test case-insensitive addition
registry.AddLanguageMapping(".ABC", "UpperLang")
if lang := registry.GetLanguage("file.abc"); lang != "UpperLang" {
t.Errorf("Expected UpperLang, got %s", lang)

View File

@@ -6,7 +6,7 @@ import (
"io"
"os"
"github.com/ivuorinen/gibidify/utils"
"github.com/ivuorinen/gibidify/gibidiutils"
)
// JSONWriter handles JSON format output with streaming support.
@@ -27,27 +27,42 @@ func NewJSONWriter(outFile *os.File) *JSONWriter {
func (w *JSONWriter) Start(prefix, suffix string) error {
// Start JSON structure
if _, err := w.outFile.WriteString(`{"prefix":"`); err != nil {
return utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIOWrite, "failed to write JSON start")
return gibidiutils.WrapError(
err,
gibidiutils.ErrorTypeIO,
gibidiutils.CodeIOWrite,
"failed to write JSON start",
)
}
// Write escaped prefix
escapedPrefix := utils.EscapeForJSON(prefix)
if err := utils.WriteWithErrorWrap(w.outFile, escapedPrefix, "failed to write JSON prefix", ""); err != nil {
escapedPrefix := gibidiutils.EscapeForJSON(prefix)
if err := gibidiutils.WriteWithErrorWrap(w.outFile, escapedPrefix, "failed to write JSON prefix", ""); err != nil {
return err
}
if _, err := w.outFile.WriteString(`","suffix":"`); err != nil {
return utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIOWrite, "failed to write JSON middle")
return gibidiutils.WrapError(
err,
gibidiutils.ErrorTypeIO,
gibidiutils.CodeIOWrite,
"failed to write JSON middle",
)
}
// Write escaped suffix
escapedSuffix := utils.EscapeForJSON(suffix)
if err := utils.WriteWithErrorWrap(w.outFile, escapedSuffix, "failed to write JSON suffix", ""); err != nil {
escapedSuffix := gibidiutils.EscapeForJSON(suffix)
if err := gibidiutils.WriteWithErrorWrap(w.outFile, escapedSuffix, "failed to write JSON suffix", ""); err != nil {
return err
}
if _, err := w.outFile.WriteString(`","files":[`); err != nil {
return utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIOWrite, "failed to write JSON files start")
return gibidiutils.WrapError(
err,
gibidiutils.ErrorTypeIO,
gibidiutils.CodeIOWrite,
"failed to write JSON files start",
)
}
return nil
@@ -57,7 +72,12 @@ func (w *JSONWriter) Start(prefix, suffix string) error {
func (w *JSONWriter) WriteFile(req WriteRequest) error {
if !w.firstFile {
if _, err := w.outFile.WriteString(","); err != nil {
return utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIOWrite, "failed to write JSON separator")
return gibidiutils.WrapError(
err,
gibidiutils.ErrorTypeIO,
gibidiutils.CodeIOWrite,
"failed to write JSON separator",
)
}
}
w.firstFile = false
@@ -72,21 +92,24 @@ func (w *JSONWriter) WriteFile(req WriteRequest) error {
func (w *JSONWriter) Close() error {
// Close JSON structure
if _, err := w.outFile.WriteString("]}"); err != nil {
return utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIOWrite, "failed to write JSON end")
return gibidiutils.WrapError(err, gibidiutils.ErrorTypeIO, gibidiutils.CodeIOWrite, "failed to write JSON end")
}
return nil
}
// writeStreaming writes a large file as JSON in streaming chunks.
func (w *JSONWriter) writeStreaming(req WriteRequest) error {
defer utils.SafeCloseReader(req.Reader, req.Path)
defer gibidiutils.SafeCloseReader(req.Reader, req.Path)
language := detectLanguage(req.Path)
// Write file start
escapedPath := utils.EscapeForJSON(req.Path)
escapedPath := gibidiutils.EscapeForJSON(req.Path)
if _, err := fmt.Fprintf(w.outFile, `{"path":"%s","language":"%s","content":"`, escapedPath, language); err != nil {
return utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIOWrite, "failed to write JSON file start").WithFilePath(req.Path)
return gibidiutils.WrapError(
err, gibidiutils.ErrorTypeIO, gibidiutils.CodeIOWrite,
"failed to write JSON file start",
).WithFilePath(req.Path)
}
// Stream content with JSON escaping
@@ -96,7 +119,10 @@ func (w *JSONWriter) writeStreaming(req WriteRequest) error {
// Write file end
if _, err := w.outFile.WriteString(`"}`); err != nil {
return utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIOWrite, "failed to write JSON file end").WithFilePath(req.Path)
return gibidiutils.WrapError(
err, gibidiutils.ErrorTypeIO, gibidiutils.CodeIOWrite,
"failed to write JSON file end",
).WithFilePath(req.Path)
}
return nil
@@ -113,25 +139,29 @@ func (w *JSONWriter) writeInline(req WriteRequest) error {
encoded, err := json.Marshal(fileData)
if err != nil {
return utils.WrapError(err, utils.ErrorTypeProcessing, utils.CodeProcessingEncode, "failed to marshal JSON").WithFilePath(req.Path)
return gibidiutils.WrapError(
err, gibidiutils.ErrorTypeProcessing, gibidiutils.CodeProcessingEncode,
"failed to marshal JSON",
).WithFilePath(req.Path)
}
if _, err := w.outFile.Write(encoded); err != nil {
return utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIOWrite, "failed to write JSON file").WithFilePath(req.Path)
return gibidiutils.WrapError(
err, gibidiutils.ErrorTypeIO, gibidiutils.CodeIOWrite,
"failed to write JSON file",
).WithFilePath(req.Path)
}
return nil
}
// streamJSONContent streams content with JSON escaping.
func (w *JSONWriter) streamJSONContent(reader io.Reader, path string) error {
return utils.StreamContent(reader, w.outFile, StreamChunkSize, path, func(chunk []byte) []byte {
escaped := utils.EscapeForJSON(string(chunk))
return gibidiutils.StreamContent(reader, w.outFile, StreamChunkSize, path, func(chunk []byte) []byte {
escaped := gibidiutils.EscapeForJSON(string(chunk))
return []byte(escaped)
})
}
// startJSONWriter handles JSON format output with streaming support.
func startJSONWriter(outFile *os.File, writeCh <-chan WriteRequest, done chan<- struct{}, prefix, suffix string) {
defer close(done)
@@ -140,19 +170,19 @@ func startJSONWriter(outFile *os.File, writeCh <-chan WriteRequest, done chan<-
// Start writing
if err := writer.Start(prefix, suffix); err != nil {
utils.LogError("Failed to write JSON start", err)
gibidiutils.LogError("Failed to write JSON start", err)
return
}
// Process files
for req := range writeCh {
if err := writer.WriteFile(req); err != nil {
utils.LogError("Failed to write JSON file", err)
gibidiutils.LogError("Failed to write JSON file", err)
}
}
// Close writer
if err := writer.Close(); err != nil {
utils.LogError("Failed to write JSON end", err)
gibidiutils.LogError("Failed to write JSON end", err)
}
}

View File

@@ -4,11 +4,13 @@ import (
"fmt"
"io"
"os"
"path/filepath"
"strings"
"github.com/ivuorinen/gibidify/utils"
"github.com/ivuorinen/gibidify/gibidiutils"
)
// MarkdownWriter handles markdown format output with streaming support.
// MarkdownWriter handles Markdown format output with streaming support.
type MarkdownWriter struct {
outFile *os.File
}
@@ -19,16 +21,21 @@ func NewMarkdownWriter(outFile *os.File) *MarkdownWriter {
}
// Start writes the markdown header.
func (w *MarkdownWriter) Start(prefix, suffix string) error {
func (w *MarkdownWriter) Start(prefix, _ string) error {
if prefix != "" {
if _, err := fmt.Fprintf(w.outFile, "# %s\n\n", prefix); err != nil {
return utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIOWrite, "failed to write prefix")
return gibidiutils.WrapError(
err,
gibidiutils.ErrorTypeIO,
gibidiutils.CodeIOWrite,
"failed to write prefix",
)
}
}
return nil
}
// WriteFile writes a file entry in markdown format.
// WriteFile writes a file entry in Markdown format.
func (w *MarkdownWriter) WriteFile(req WriteRequest) error {
if req.IsStream {
return w.writeStreaming(req)
@@ -40,21 +47,99 @@ func (w *MarkdownWriter) WriteFile(req WriteRequest) error {
func (w *MarkdownWriter) Close(suffix string) error {
if suffix != "" {
if _, err := fmt.Fprintf(w.outFile, "\n# %s\n", suffix); err != nil {
return utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIOWrite, "failed to write suffix")
return gibidiutils.WrapError(
err,
gibidiutils.ErrorTypeIO,
gibidiutils.CodeIOWrite,
"failed to write suffix",
)
}
}
return nil
}
// validateMarkdownPath validates a file path for markdown output.
func validateMarkdownPath(path string) error {
trimmed := strings.TrimSpace(path)
if trimmed == "" {
return gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeValidation,
gibidiutils.CodeValidationRequired,
"file path cannot be empty",
"",
nil,
)
}
// Reject absolute paths
if filepath.IsAbs(trimmed) {
return gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeValidation,
gibidiutils.CodeValidationPath,
"absolute paths are not allowed",
trimmed,
map[string]any{"path": trimmed},
)
}
// Clean and validate path components
cleaned := filepath.Clean(trimmed)
if filepath.IsAbs(cleaned) || strings.HasPrefix(cleaned, "/") {
return gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeValidation,
gibidiutils.CodeValidationPath,
"path must be relative",
trimmed,
map[string]any{"path": trimmed, "cleaned": cleaned},
)
}
// Check for path traversal in components
components := strings.Split(filepath.ToSlash(cleaned), "/")
for _, component := range components {
if component == ".." {
return gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeValidation,
gibidiutils.CodeValidationPath,
"path traversal not allowed",
trimmed,
map[string]any{"path": trimmed, "cleaned": cleaned},
)
}
}
return nil
}
// writeStreaming writes a large file in streaming chunks.
func (w *MarkdownWriter) writeStreaming(req WriteRequest) error {
defer w.closeReader(req.Reader, req.Path)
// Validate path before use
if err := validateMarkdownPath(req.Path); err != nil {
return err
}
// Check for nil reader
if req.Reader == nil {
return gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeValidation,
gibidiutils.CodeValidationRequired,
"nil reader in write request",
"",
nil,
).WithFilePath(req.Path)
}
defer gibidiutils.SafeCloseReader(req.Reader, req.Path)
language := detectLanguage(req.Path)
// Write file header
if _, err := fmt.Fprintf(w.outFile, "## File: `%s`\n```%s\n", req.Path, language); err != nil {
return utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIOWrite, "failed to write file header").WithFilePath(req.Path)
safePath := gibidiutils.EscapeForMarkdown(req.Path)
if _, err := fmt.Fprintf(w.outFile, "## File: `%s`\n```%s\n", safePath, language); err != nil {
return gibidiutils.WrapError(
err, gibidiutils.ErrorTypeIO, gibidiutils.CodeIOWrite,
"failed to write file header",
).WithFilePath(req.Path)
}
// Stream file content in chunks
@@ -64,7 +149,10 @@ func (w *MarkdownWriter) writeStreaming(req WriteRequest) error {
// Write file footer
if _, err := w.outFile.WriteString("\n```\n\n"); err != nil {
return utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIOWrite, "failed to write file footer").WithFilePath(req.Path)
return gibidiutils.WrapError(
err, gibidiutils.ErrorTypeIO, gibidiutils.CodeIOWrite,
"failed to write file footer",
).WithFilePath(req.Path)
}
return nil
@@ -72,68 +160,55 @@ func (w *MarkdownWriter) writeStreaming(req WriteRequest) error {
// writeInline writes a small file directly from content.
func (w *MarkdownWriter) writeInline(req WriteRequest) error {
// Validate path before use
if err := validateMarkdownPath(req.Path); err != nil {
return err
}
language := detectLanguage(req.Path)
formatted := fmt.Sprintf("## File: `%s`\n```%s\n%s\n```\n\n", req.Path, language, req.Content)
safePath := gibidiutils.EscapeForMarkdown(req.Path)
formatted := fmt.Sprintf("## File: `%s`\n```%s\n%s\n```\n\n", safePath, language, req.Content)
if _, err := w.outFile.WriteString(formatted); err != nil {
return utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIOWrite, "failed to write inline content").WithFilePath(req.Path)
return gibidiutils.WrapError(
err, gibidiutils.ErrorTypeIO, gibidiutils.CodeIOWrite,
"failed to write inline content",
).WithFilePath(req.Path)
}
return nil
}
// streamContent streams file content in chunks.
func (w *MarkdownWriter) streamContent(reader io.Reader, path string) error {
buf := make([]byte, StreamChunkSize)
for {
n, err := reader.Read(buf)
if n > 0 {
if _, writeErr := w.outFile.Write(buf[:n]); writeErr != nil {
return utils.WrapError(writeErr, utils.ErrorTypeIO, utils.CodeIOWrite, "failed to write chunk").WithFilePath(path)
}
}
if err == io.EOF {
break
}
if err != nil {
return utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIORead, "failed to read chunk").WithFilePath(path)
}
}
return nil
return gibidiutils.StreamContent(reader, w.outFile, StreamChunkSize, path, nil)
}
// closeReader safely closes a reader if it implements io.Closer.
func (w *MarkdownWriter) closeReader(reader io.Reader, path string) {
if closer, ok := reader.(io.Closer); ok {
if err := closer.Close(); err != nil {
utils.LogError(
"Failed to close file reader",
utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIOClose, "failed to close file reader").WithFilePath(path),
)
}
}
}
// startMarkdownWriter handles markdown format output with streaming support.
func startMarkdownWriter(outFile *os.File, writeCh <-chan WriteRequest, done chan<- struct{}, prefix, suffix string) {
// startMarkdownWriter handles Markdown format output with streaming support.
func startMarkdownWriter(
outFile *os.File,
writeCh <-chan WriteRequest,
done chan<- struct{},
prefix, suffix string,
) {
defer close(done)
writer := NewMarkdownWriter(outFile)
// Start writing
if err := writer.Start(prefix, suffix); err != nil {
utils.LogError("Failed to write markdown prefix", err)
gibidiutils.LogError("Failed to write markdown prefix", err)
return
}
// Process files
for req := range writeCh {
if err := writer.WriteFile(req); err != nil {
utils.LogError("Failed to write markdown file", err)
gibidiutils.LogError("Failed to write markdown file", err)
}
}
// Close writer
if err := writer.Close(suffix); err != nil {
utils.LogError("Failed to write markdown suffix", err)
gibidiutils.LogError("Failed to write markdown suffix", err)
}
}

View File

@@ -3,6 +3,7 @@ package fileproc
import (
"context"
"errors"
"fmt"
"io"
"os"
@@ -13,7 +14,7 @@ import (
"github.com/sirupsen/logrus"
"github.com/ivuorinen/gibidify/config"
"github.com/ivuorinen/gibidify/utils"
"github.com/ivuorinen/gibidify/gibidiutils"
)
const (
@@ -33,6 +34,26 @@ type WriteRequest struct {
Reader io.Reader
}
// multiReaderCloser wraps an io.Reader with a Close method that closes underlying closers.
type multiReaderCloser struct {
reader io.Reader
closers []io.Closer
}
func (m *multiReaderCloser) Read(p []byte) (n int, err error) {
return m.reader.Read(p)
}
func (m *multiReaderCloser) Close() error {
var firstErr error
for _, c := range m.closers {
if err := c.Close(); err != nil && firstErr == nil {
firstErr = err
}
}
return firstErr
}
// FileProcessor handles file processing operations.
type FileProcessor struct {
rootPath string
@@ -58,6 +79,34 @@ func NewFileProcessorWithMonitor(rootPath string, monitor *ResourceMonitor) *Fil
}
}
// checkContextCancellation checks if context is cancelled and logs an error if so.
// Returns true if context is cancelled, false otherwise.
func (p *FileProcessor) checkContextCancellation(ctx context.Context, filePath, stage string) bool {
select {
case <-ctx.Done():
// Format stage with leading space if provided
stageMsg := stage
if stage != "" {
stageMsg = " " + stage
}
gibidiutils.LogErrorf(
gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeValidation,
gibidiutils.CodeResourceLimitTimeout,
fmt.Sprintf("file processing cancelled%s", stageMsg),
filePath,
nil,
),
"File processing cancelled%s: %s",
stageMsg,
filePath,
)
return true
default:
return false
}
}
// ProcessFile reads the file at filePath and sends a formatted output to outCh.
// It automatically chooses between loading the entire file or streaming based on file size.
func ProcessFile(filePath string, outCh chan<- WriteRequest, rootPath string) {
@@ -67,7 +116,13 @@ func ProcessFile(filePath string, outCh chan<- WriteRequest, rootPath string) {
}
// ProcessFileWithMonitor processes a file using a shared resource monitor.
func ProcessFileWithMonitor(ctx context.Context, filePath string, outCh chan<- WriteRequest, rootPath string, monitor *ResourceMonitor) {
func ProcessFileWithMonitor(
ctx context.Context,
filePath string,
outCh chan<- WriteRequest,
rootPath string,
monitor *ResourceMonitor,
) {
processor := NewFileProcessorWithMonitor(rootPath, monitor)
processor.ProcessWithContext(ctx, filePath, outCh)
}
@@ -86,10 +141,17 @@ func (p *FileProcessor) ProcessWithContext(ctx context.Context, filePath string,
// Wait for rate limiting
if err := p.resourceMonitor.WaitForRateLimit(fileCtx); err != nil {
if err == context.DeadlineExceeded {
utils.LogErrorf(
utils.NewStructuredError(utils.ErrorTypeValidation, utils.CodeResourceLimitTimeout, "file processing timeout during rate limiting", filePath, nil),
"File processing timeout during rate limiting: %s", filePath,
if errors.Is(err, context.DeadlineExceeded) {
gibidiutils.LogErrorf(
gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeValidation,
gibidiutils.CodeResourceLimitTimeout,
"file processing timeout during rate limiting",
filePath,
nil,
),
"File processing timeout during rate limiting: %s",
filePath,
)
}
return
@@ -103,10 +165,17 @@ func (p *FileProcessor) ProcessWithContext(ctx context.Context, filePath string,
// Acquire read slot for concurrent processing
if err := p.resourceMonitor.AcquireReadSlot(fileCtx); err != nil {
if err == context.DeadlineExceeded {
utils.LogErrorf(
utils.NewStructuredError(utils.ErrorTypeValidation, utils.CodeResourceLimitTimeout, "file processing timeout waiting for read slot", filePath, nil),
"File processing timeout waiting for read slot: %s", filePath,
if errors.Is(err, context.DeadlineExceeded) {
gibidiutils.LogErrorf(
gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeValidation,
gibidiutils.CodeResourceLimitTimeout,
"file processing timeout waiting for read slot",
filePath,
nil,
),
"File processing timeout waiting for read slot: %s",
filePath,
)
}
return
@@ -115,7 +184,7 @@ func (p *FileProcessor) ProcessWithContext(ctx context.Context, filePath string,
// Check hard memory limits before processing
if err := p.resourceMonitor.CheckHardMemoryLimit(); err != nil {
utils.LogErrorf(err, "Hard memory limit check failed for file: %s", filePath)
gibidiutils.LogErrorf(err, "Hard memory limit check failed for file: %s", filePath)
return
}
@@ -138,7 +207,6 @@ func (p *FileProcessor) ProcessWithContext(ctx context.Context, filePath string,
}
}
// validateFileWithLimits checks if the file can be processed with resource limits.
func (p *FileProcessor) validateFileWithLimits(ctx context.Context, filePath string) (os.FileInfo, error) {
// Check context cancellation
@@ -150,24 +218,27 @@ func (p *FileProcessor) validateFileWithLimits(ctx context.Context, filePath str
fileInfo, err := os.Stat(filePath)
if err != nil {
structErr := utils.WrapError(err, utils.ErrorTypeFileSystem, utils.CodeFSAccess, "failed to stat file").WithFilePath(filePath)
utils.LogErrorf(structErr, "Failed to stat file %s", filePath)
return nil, err
structErr := gibidiutils.WrapError(
err, gibidiutils.ErrorTypeFileSystem, gibidiutils.CodeFSAccess,
"failed to stat file",
).WithFilePath(filePath)
gibidiutils.LogErrorf(structErr, "Failed to stat file %s", filePath)
return nil, structErr
}
// Check traditional size limit
if fileInfo.Size() > p.sizeLimit {
context := map[string]interface{}{
filesizeContext := map[string]interface{}{
"file_size": fileInfo.Size(),
"size_limit": p.sizeLimit,
}
utils.LogErrorf(
utils.NewStructuredError(
utils.ErrorTypeValidation,
utils.CodeValidationSize,
gibidiutils.LogErrorf(
gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeValidation,
gibidiutils.CodeValidationSize,
fmt.Sprintf("file size (%d bytes) exceeds limit (%d bytes)", fileInfo.Size(), p.sizeLimit),
filePath,
context,
filesizeContext,
),
"Skipping large file %s", filePath,
)
@@ -176,7 +247,7 @@ func (p *FileProcessor) validateFileWithLimits(ctx context.Context, filePath str
// Check resource limits
if err := p.resourceMonitor.ValidateFileProcessing(filePath, fileInfo.Size()); err != nil {
utils.LogErrorf(err, "Resource limit validation failed for file: %s", filePath)
gibidiutils.LogErrorf(err, "Resource limit validation failed for file: %s", filePath)
return nil, err
}
@@ -192,66 +263,54 @@ func (p *FileProcessor) getRelativePath(filePath string) string {
return relPath
}
// processInMemoryWithContext loads the entire file into memory with context awareness.
func (p *FileProcessor) processInMemoryWithContext(ctx context.Context, filePath, relPath string, outCh chan<- WriteRequest) {
func (p *FileProcessor) processInMemoryWithContext(
ctx context.Context,
filePath, relPath string,
outCh chan<- WriteRequest,
) {
// Check context before reading
select {
case <-ctx.Done():
utils.LogErrorf(
utils.NewStructuredError(utils.ErrorTypeValidation, utils.CodeResourceLimitTimeout, "file processing cancelled", filePath, nil),
"File processing cancelled: %s", filePath,
)
if p.checkContextCancellation(ctx, filePath, "") {
return
default:
}
content, err := os.ReadFile(filePath) // #nosec G304 - filePath is validated by walker
// #nosec G304 - filePath is validated by walker
content, err := os.ReadFile(filePath)
if err != nil {
structErr := utils.WrapError(err, utils.ErrorTypeProcessing, utils.CodeProcessingFileRead, "failed to read file").WithFilePath(filePath)
utils.LogErrorf(structErr, "Failed to read file %s", filePath)
structErr := gibidiutils.WrapError(
err, gibidiutils.ErrorTypeProcessing, gibidiutils.CodeProcessingFileRead,
"failed to read file",
).WithFilePath(filePath)
gibidiutils.LogErrorf(structErr, "Failed to read file %s", filePath)
return
}
// Check context again after reading
select {
case <-ctx.Done():
utils.LogErrorf(
utils.NewStructuredError(utils.ErrorTypeValidation, utils.CodeResourceLimitTimeout, "file processing cancelled after read", filePath, nil),
"File processing cancelled after read: %s", filePath,
)
if p.checkContextCancellation(ctx, filePath, "after read") {
return
default:
}
// Try to send the result, but respect context cancellation
select {
case <-ctx.Done():
utils.LogErrorf(
utils.NewStructuredError(utils.ErrorTypeValidation, utils.CodeResourceLimitTimeout, "file processing cancelled before output", filePath, nil),
"File processing cancelled before output: %s", filePath,
)
// Check context before sending output
if p.checkContextCancellation(ctx, filePath, "before output") {
return
case outCh <- WriteRequest{
}
outCh <- WriteRequest{
Path: relPath,
Content: p.formatContent(relPath, string(content)),
IsStream: false,
}:
}
}
// processStreamingWithContext creates a streaming reader for large files with context awareness.
func (p *FileProcessor) processStreamingWithContext(ctx context.Context, filePath, relPath string, outCh chan<- WriteRequest) {
func (p *FileProcessor) processStreamingWithContext(
ctx context.Context,
filePath, relPath string,
outCh chan<- WriteRequest,
) {
// Check context before creating reader
select {
case <-ctx.Done():
utils.LogErrorf(
utils.NewStructuredError(utils.ErrorTypeValidation, utils.CodeResourceLimitTimeout, "streaming processing cancelled", filePath, nil),
"Streaming processing cancelled: %s", filePath,
)
if p.checkContextCancellation(ctx, filePath, "before streaming") {
return
default:
}
reader := p.createStreamReaderWithContext(ctx, filePath, relPath)
@@ -259,43 +318,47 @@ func (p *FileProcessor) processStreamingWithContext(ctx context.Context, filePat
return // Error already logged
}
// Try to send the result, but respect context cancellation
select {
case <-ctx.Done():
utils.LogErrorf(
utils.NewStructuredError(utils.ErrorTypeValidation, utils.CodeResourceLimitTimeout, "streaming processing cancelled before output", filePath, nil),
"Streaming processing cancelled before output: %s", filePath,
)
// Check context before sending output
if p.checkContextCancellation(ctx, filePath, "before streaming output") {
// Close the reader to prevent file descriptor leak
if closer, ok := reader.(io.Closer); ok {
_ = closer.Close()
}
return
case outCh <- WriteRequest{
}
outCh <- WriteRequest{
Path: relPath,
Content: "", // Empty since content is in Reader
IsStream: true,
Reader: reader,
}:
}
}
// createStreamReaderWithContext creates a reader that combines header and file content with context awareness.
func (p *FileProcessor) createStreamReaderWithContext(ctx context.Context, filePath, relPath string) io.Reader {
// Check context before opening file
select {
case <-ctx.Done():
if p.checkContextCancellation(ctx, filePath, "before opening file") {
return nil
default:
}
file, err := os.Open(filePath) // #nosec G304 - filePath is validated by walker
// #nosec G304 - filePath is validated by walker
file, err := os.Open(filePath)
if err != nil {
structErr := utils.WrapError(err, utils.ErrorTypeProcessing, utils.CodeProcessingFileRead, "failed to open file for streaming").WithFilePath(filePath)
utils.LogErrorf(structErr, "Failed to open file for streaming %s", filePath)
structErr := gibidiutils.WrapError(
err, gibidiutils.ErrorTypeProcessing, gibidiutils.CodeProcessingFileRead,
"failed to open file for streaming",
).WithFilePath(filePath)
gibidiutils.LogErrorf(structErr, "Failed to open file for streaming %s", filePath)
return nil
}
// Note: file will be closed by the writer
header := p.formatHeader(relPath)
return io.MultiReader(header, file)
// Wrap in multiReaderCloser to ensure file is closed even on cancellation
return &multiReaderCloser{
reader: io.MultiReader(header, file),
closers: []io.Closer{file},
}
}
// formatContent formats the file content with header.

View File

@@ -51,7 +51,9 @@ func (rm *ResourceMonitor) CreateFileProcessingContext(parent context.Context) (
}
// CreateOverallProcessingContext creates a context with overall processing timeout.
func (rm *ResourceMonitor) CreateOverallProcessingContext(parent context.Context) (context.Context, context.CancelFunc) {
func (rm *ResourceMonitor) CreateOverallProcessingContext(
parent context.Context,
) (context.Context, context.CancelFunc) {
if !rm.enabled || rm.overallTimeout <= 0 {
return parent, func() {}
}

View File

@@ -6,6 +6,8 @@ import (
"time"
"github.com/sirupsen/logrus"
"github.com/ivuorinen/gibidify/gibidiutils"
)
// RecordFileProcessed records that a file has been successfully processed.
@@ -55,7 +57,7 @@ func (rm *ResourceMonitor) GetMetrics() ResourceMetrics {
ProcessingDuration: duration,
AverageFileSize: avgFileSize,
ProcessingRate: processingRate,
MemoryUsageMB: int64(m.Alloc) / 1024 / 1024,
MemoryUsageMB: gibidiutils.SafeUint64ToInt64WithDefault(m.Alloc, 0) / 1024 / 1024,
MaxMemoryUsageMB: int64(rm.hardMemoryLimitMB),
ViolationsDetected: violations,
DegradationActive: rm.degradationActive,
@@ -67,8 +69,13 @@ func (rm *ResourceMonitor) GetMetrics() ResourceMetrics {
// LogResourceInfo logs current resource limit configuration.
func (rm *ResourceMonitor) LogResourceInfo() {
if rm.enabled {
logrus.Infof("Resource limits enabled: maxFiles=%d, maxTotalSize=%dMB, fileTimeout=%ds, overallTimeout=%ds",
rm.maxFiles, rm.maxTotalSize/1024/1024, int(rm.fileProcessingTimeout.Seconds()), int(rm.overallTimeout.Seconds()))
logrus.Infof(
"Resource limits enabled: maxFiles=%d, maxTotalSize=%dMB, fileTimeout=%ds, overallTimeout=%ds",
rm.maxFiles,
rm.maxTotalSize/1024/1024,
int(rm.fileProcessingTimeout.Seconds()),
int(rm.overallTimeout.Seconds()),
)
logrus.Infof("Resource limits: maxConcurrentReads=%d, rateLimitFPS=%d, hardMemoryMB=%d",
rm.maxConcurrentReads, rm.rateLimitFilesPerSec, rm.hardMemoryLimitMB)
logrus.Infof("Resource features: gracefulDegradation=%v, monitoring=%v",

View File

@@ -7,7 +7,7 @@ import (
"github.com/sirupsen/logrus"
"github.com/ivuorinen/gibidify/utils"
"github.com/ivuorinen/gibidify/gibidiutils"
)
// ValidateFileProcessing checks if a file can be processed based on resource limits.
@@ -21,9 +21,9 @@ func (rm *ResourceMonitor) ValidateFileProcessing(filePath string, fileSize int6
// Check if emergency stop is active
if rm.emergencyStopRequested {
return utils.NewStructuredError(
utils.ErrorTypeValidation,
utils.CodeResourceLimitMemory,
return gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeValidation,
gibidiutils.CodeResourceLimitMemory,
"processing stopped due to emergency memory condition",
filePath,
map[string]interface{}{
@@ -35,9 +35,9 @@ func (rm *ResourceMonitor) ValidateFileProcessing(filePath string, fileSize int6
// Check file count limit
currentFiles := atomic.LoadInt64(&rm.filesProcessed)
if int(currentFiles) >= rm.maxFiles {
return utils.NewStructuredError(
utils.ErrorTypeValidation,
utils.CodeResourceLimitFiles,
return gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeValidation,
gibidiutils.CodeResourceLimitFiles,
"maximum file count limit exceeded",
filePath,
map[string]interface{}{
@@ -50,9 +50,9 @@ func (rm *ResourceMonitor) ValidateFileProcessing(filePath string, fileSize int6
// Check total size limit
currentTotalSize := atomic.LoadInt64(&rm.totalSizeProcessed)
if currentTotalSize+fileSize > rm.maxTotalSize {
return utils.NewStructuredError(
utils.ErrorTypeValidation,
utils.CodeResourceLimitTotalSize,
return gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeValidation,
gibidiutils.CodeResourceLimitTotalSize,
"maximum total size limit would be exceeded",
filePath,
map[string]interface{}{
@@ -65,9 +65,9 @@ func (rm *ResourceMonitor) ValidateFileProcessing(filePath string, fileSize int6
// Check overall timeout
if time.Since(rm.startTime) > rm.overallTimeout {
return utils.NewStructuredError(
utils.ErrorTypeValidation,
utils.CodeResourceLimitTimeout,
return gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeValidation,
gibidiutils.CodeResourceLimitTimeout,
"overall processing timeout exceeded",
filePath,
map[string]interface{}{
@@ -88,7 +88,7 @@ func (rm *ResourceMonitor) CheckHardMemoryLimit() error {
var m runtime.MemStats
runtime.ReadMemStats(&m)
currentMemory := int64(m.Alloc)
currentMemory := gibidiutils.SafeUint64ToInt64WithDefault(m.Alloc, 0)
if currentMemory > rm.hardMemoryLimitBytes {
rm.mu.Lock()
@@ -108,14 +108,14 @@ func (rm *ResourceMonitor) CheckHardMemoryLimit() error {
// Check again after GC
runtime.ReadMemStats(&m)
currentMemory = int64(m.Alloc)
currentMemory = gibidiutils.SafeUint64ToInt64WithDefault(m.Alloc, 0)
if currentMemory > rm.hardMemoryLimitBytes {
// Still over limit, activate emergency stop
rm.emergencyStopRequested = true
return utils.NewStructuredError(
utils.ErrorTypeValidation,
utils.CodeResourceLimitMemory,
return gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeValidation,
gibidiutils.CodeResourceLimitMemory,
"hard memory limit exceeded, emergency stop activated",
"",
map[string]interface{}{
@@ -124,16 +124,15 @@ func (rm *ResourceMonitor) CheckHardMemoryLimit() error {
"emergency_stop": true,
},
)
} else {
}
// Memory freed by GC, continue with degradation
rm.degradationActive = true
logrus.Info("Memory freed by garbage collection, continuing with degradation mode")
}
} else {
// No graceful degradation, hard stop
return utils.NewStructuredError(
utils.ErrorTypeValidation,
utils.CodeResourceLimitMemory,
return gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeValidation,
gibidiutils.CodeResourceLimitMemory,
"hard memory limit exceeded",
"",
map[string]interface{}{

View File

@@ -1,12 +1,13 @@
package fileproc
import (
"errors"
"testing"
"github.com/spf13/viper"
"github.com/ivuorinen/gibidify/gibidiutils"
"github.com/ivuorinen/gibidify/testutil"
"github.com/ivuorinen/gibidify/utils"
)
func TestResourceMonitor_FileCountLimit(t *testing.T) {
@@ -40,11 +41,12 @@ func TestResourceMonitor_FileCountLimit(t *testing.T) {
}
// Verify it's the correct error type
structErr, ok := err.(*utils.StructuredError)
var structErr *gibidiutils.StructuredError
ok := errors.As(err, &structErr)
if !ok {
t.Errorf("Expected StructuredError, got %T", err)
} else if structErr.Code != utils.CodeResourceLimitFiles {
t.Errorf("Expected error code %s, got %s", utils.CodeResourceLimitFiles, structErr.Code)
} else if structErr.Code != gibidiutils.CodeResourceLimitFiles {
t.Errorf("Expected error code %s, got %s", gibidiutils.CodeResourceLimitFiles, structErr.Code)
}
}
@@ -79,10 +81,11 @@ func TestResourceMonitor_TotalSizeLimit(t *testing.T) {
}
// Verify it's the correct error type
structErr, ok := err.(*utils.StructuredError)
var structErr *gibidiutils.StructuredError
ok := errors.As(err, &structErr)
if !ok {
t.Errorf("Expected StructuredError, got %T", err)
} else if structErr.Code != utils.CodeResourceLimitTotalSize {
t.Errorf("Expected error code %s, got %s", utils.CodeResourceLimitTotalSize, structErr.Code)
} else if structErr.Code != gibidiutils.CodeResourceLimitTotalSize {
t.Errorf("Expected error code %s, got %s", gibidiutils.CodeResourceLimitTotalSize, structErr.Code)
}
}

View File

@@ -0,0 +1,12 @@
package fileproc
// Test constants to avoid duplication in test files.
// These constants are used across multiple test files in the fileproc package.
const (
// Backpressure configuration keys
testBackpressureEnabled = "backpressure.enabled"
testBackpressureMaxMemory = "backpressure.maxMemoryUsage"
testBackpressureMemoryCheck = "backpressure.memoryCheckInterval"
testBackpressureMaxFiles = "backpressure.maxPendingFiles"
testBackpressureMaxWrites = "backpressure.maxPendingWrites"
)

View File

@@ -5,7 +5,7 @@ import (
"os"
"path/filepath"
"github.com/ivuorinen/gibidify/utils"
"github.com/ivuorinen/gibidify/gibidiutils"
)
// Walker defines an interface for scanning directories.
@@ -30,9 +30,12 @@ func NewProdWalker() *ProdWalker {
// Walk scans the given root directory recursively and returns a slice of file paths
// that are not ignored based on .gitignore/.ignore files, the configuration, or the default binary/image filter.
func (w *ProdWalker) Walk(root string) ([]string, error) {
absRoot, err := utils.GetAbsolutePath(root)
absRoot, err := gibidiutils.GetAbsolutePath(root)
if err != nil {
return nil, utils.WrapError(err, utils.ErrorTypeFileSystem, utils.CodeFSPathResolution, "failed to resolve root path").WithFilePath(root)
return nil, gibidiutils.WrapError(
err, gibidiutils.ErrorTypeFileSystem, gibidiutils.CodeFSPathResolution,
"failed to resolve root path",
).WithFilePath(root)
}
return w.walkDir(absRoot, []ignoreRule{})
}
@@ -47,7 +50,10 @@ func (w *ProdWalker) walkDir(currentDir string, parentRules []ignoreRule) ([]str
entries, err := os.ReadDir(currentDir)
if err != nil {
return nil, utils.WrapError(err, utils.ErrorTypeFileSystem, utils.CodeFSAccess, "failed to read directory").WithFilePath(currentDir)
return nil, gibidiutils.WrapError(
err, gibidiutils.ErrorTypeFileSystem, gibidiutils.CodeFSAccess,
"failed to read directory",
).WithFilePath(currentDir)
}
rules := loadIgnoreRules(currentDir, parentRules)
@@ -63,7 +69,10 @@ func (w *ProdWalker) walkDir(currentDir string, parentRules []ignoreRule) ([]str
if entry.IsDir() {
subFiles, err := w.walkDir(fullPath, rules)
if err != nil {
return nil, utils.WrapError(err, utils.ErrorTypeProcessing, utils.CodeProcessingTraversal, "failed to traverse subdirectory").WithFilePath(fullPath)
return nil, gibidiutils.WrapError(
err, gibidiutils.ErrorTypeProcessing, gibidiutils.CodeProcessingTraversal,
"failed to traverse subdirectory",
).WithFilePath(fullPath)
}
results = append(results, subFiles...)
} else {

View File

@@ -61,6 +61,8 @@ func TestProdWalkerBinaryCheck(t *testing.T) {
// Reset FileTypeRegistry to ensure clean state
fileproc.ResetRegistryForTesting()
// Ensure cleanup runs even if test fails
t.Cleanup(fileproc.ResetRegistryForTesting)
// Run walker
w := fileproc.NewProdWalker()

View File

@@ -5,30 +5,100 @@ import (
"fmt"
"os"
"github.com/ivuorinen/gibidify/utils"
"github.com/ivuorinen/gibidify/gibidiutils"
)
// StartWriter writes the output in the specified format with memory optimization.
func StartWriter(outFile *os.File, writeCh <-chan WriteRequest, done chan<- struct{}, format, prefix, suffix string) {
switch format {
case "markdown":
startMarkdownWriter(outFile, writeCh, done, prefix, suffix)
case "json":
startJSONWriter(outFile, writeCh, done, prefix, suffix)
case "yaml":
startYAMLWriter(outFile, writeCh, done, prefix, suffix)
default:
context := map[string]interface{}{
"format": format,
// WriterConfig holds configuration for the writer.
type WriterConfig struct {
Format string
Prefix string
Suffix string
}
err := utils.NewStructuredError(
utils.ErrorTypeValidation,
utils.CodeValidationFormat,
fmt.Sprintf("unsupported format: %s", format),
// Validate checks if the WriterConfig is valid.
func (c WriterConfig) Validate() error {
if c.Format == "" {
return gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeValidation,
gibidiutils.CodeValidationFormat,
"format cannot be empty",
"",
nil,
)
}
switch c.Format {
case "markdown", "json", "yaml":
return nil
default:
context := map[string]any{
"format": c.Format,
}
return gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeValidation,
gibidiutils.CodeValidationFormat,
fmt.Sprintf("unsupported format: %s", c.Format),
"",
context,
)
utils.LogError("Failed to encode output", err)
}
}
// StartWriter writes the output in the specified format with memory optimization.
func StartWriter(outFile *os.File, writeCh <-chan WriteRequest, done chan<- struct{}, config WriterConfig) {
// Validate config
if err := config.Validate(); err != nil {
gibidiutils.LogError("Invalid writer configuration", err)
close(done)
return
}
// Validate outFile is not nil
if outFile == nil {
err := gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeIO,
gibidiutils.CodeIOFileWrite,
"output file is nil",
"",
nil,
)
gibidiutils.LogError("Failed to write output", err)
close(done)
return
}
// Validate outFile is accessible
if _, err := outFile.Stat(); err != nil {
structErr := gibidiutils.WrapError(
err,
gibidiutils.ErrorTypeIO,
gibidiutils.CodeIOFileWrite,
"failed to stat output file",
)
gibidiutils.LogError("Failed to validate output file", structErr)
close(done)
return
}
switch config.Format {
case "markdown":
startMarkdownWriter(outFile, writeCh, done, config.Prefix, config.Suffix)
case "json":
startJSONWriter(outFile, writeCh, done, config.Prefix, config.Suffix)
case "yaml":
startYAMLWriter(outFile, writeCh, done, config.Prefix, config.Suffix)
default:
context := map[string]interface{}{
"format": config.Format,
}
err := gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeValidation,
gibidiutils.CodeValidationFormat,
fmt.Sprintf("unsupported format: %s", config.Format),
"",
context,
)
gibidiutils.LogError("Failed to encode output", err)
close(done)
}
}

View File

@@ -68,7 +68,11 @@ func runWriterTest(t *testing.T, format string) []byte {
wg.Add(1)
go func() {
defer wg.Done()
fileproc.StartWriter(outFile, writeCh, doneCh, format, "PREFIX", "SUFFIX")
fileproc.StartWriter(outFile, writeCh, doneCh, fileproc.WriterConfig{
Format: format,
Prefix: "PREFIX",
Suffix: "SUFFIX",
})
}()
// Wait until writer signals completion

View File

@@ -5,9 +5,10 @@ import (
"fmt"
"io"
"os"
"path/filepath"
"strings"
"github.com/ivuorinen/gibidify/utils"
"github.com/ivuorinen/gibidify/gibidiutils"
)
// YAMLWriter handles YAML format output with streaming support.
@@ -20,11 +21,151 @@ func NewYAMLWriter(outFile *os.File) *YAMLWriter {
return &YAMLWriter{outFile: outFile}
}
const (
maxPathLength = 4096 // Maximum total path length
maxFilenameLength = 255 // Maximum individual filename component length
)
// validatePathComponents validates individual path components for security issues.
func validatePathComponents(trimmed, cleaned string, components []string) error {
for i, component := range components {
// Reject path components that are exactly ".." (path traversal)
if component == ".." {
return gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeValidation,
gibidiutils.CodeValidationPath,
"path traversal not allowed",
trimmed,
map[string]any{
"path": trimmed,
"cleaned": cleaned,
"invalid_component": component,
"component_index": i,
},
)
}
// Reject empty components (e.g., from "foo//bar")
if component == "" && i > 0 && i < len(components)-1 {
return gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeValidation,
gibidiutils.CodeValidationPath,
"path contains empty component",
trimmed,
map[string]any{
"path": trimmed,
"cleaned": cleaned,
"component_index": i,
},
)
}
// Enforce maximum filename length for each component
if len(component) > maxFilenameLength {
return gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeValidation,
gibidiutils.CodeValidationPath,
"path component exceeds maximum length",
trimmed,
map[string]any{
"component": component,
"component_length": len(component),
"max_length": maxFilenameLength,
"component_index": i,
},
)
}
}
return nil
}
// validatePath validates and sanitizes a file path for safe output.
// It rejects absolute paths, path traversal attempts, empty paths, and overly long paths.
func validatePath(path string) error {
// Reject empty paths
trimmed := strings.TrimSpace(path)
if trimmed == "" {
return gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeValidation,
gibidiutils.CodeValidationRequired,
"file path cannot be empty",
"",
nil,
)
}
// Enforce maximum path length to prevent resource abuse
if len(trimmed) > maxPathLength {
return gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeValidation,
gibidiutils.CodeValidationPath,
"path exceeds maximum length",
trimmed,
map[string]any{
"path_length": len(trimmed),
"max_length": maxPathLength,
},
)
}
// Reject absolute paths
if filepath.IsAbs(trimmed) {
return gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeValidation,
gibidiutils.CodeValidationPath,
"absolute paths are not allowed",
trimmed,
map[string]any{"path": trimmed},
)
}
// Validate original trimmed path components before cleaning
origComponents := strings.Split(filepath.ToSlash(trimmed), "/")
for _, comp := range origComponents {
if comp == "" || comp == "." || comp == ".." {
return gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeValidation,
gibidiutils.CodeValidationPath,
"invalid or traversal path component in original path",
trimmed,
map[string]any{"path": trimmed, "component": comp},
)
}
}
// Clean the path to normalize it
cleaned := filepath.Clean(trimmed)
// After cleaning, ensure it's still relative and doesn't start with /
if filepath.IsAbs(cleaned) || strings.HasPrefix(cleaned, "/") {
return gibidiutils.NewStructuredError(
gibidiutils.ErrorTypeValidation,
gibidiutils.CodeValidationPath,
"path must be relative",
trimmed,
map[string]any{"path": trimmed, "cleaned": cleaned},
)
}
// Split into components and validate each one
// Use ToSlash to normalize for cross-platform validation
components := strings.Split(filepath.ToSlash(cleaned), "/")
return validatePathComponents(trimmed, cleaned, components)
}
// Start writes the YAML header.
func (w *YAMLWriter) Start(prefix, suffix string) error {
// Write YAML header
if _, err := fmt.Fprintf(w.outFile, "prefix: %s\nsuffix: %s\nfiles:\n", yamlQuoteString(prefix), yamlQuoteString(suffix)); err != nil {
return utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIOWrite, "failed to write YAML header")
if _, err := fmt.Fprintf(
w.outFile, "prefix: %s\nsuffix: %s\nfiles:\n",
gibidiutils.EscapeForYAML(prefix), gibidiutils.EscapeForYAML(suffix),
); err != nil {
return gibidiutils.WrapError(
err,
gibidiutils.ErrorTypeIO,
gibidiutils.CodeIOWrite,
"failed to write YAML header",
)
}
return nil
}
@@ -44,13 +185,32 @@ func (w *YAMLWriter) Close() error {
// writeStreaming writes a large file as YAML in streaming chunks.
func (w *YAMLWriter) writeStreaming(req WriteRequest) error {
defer w.closeReader(req.Reader, req.Path)
// Validate path before using it
if err := validatePath(req.Path); err != nil {
return err
}
// Check for nil reader
if req.Reader == nil {
return gibidiutils.WrapError(
nil, gibidiutils.ErrorTypeValidation, gibidiutils.CodeValidationRequired,
"nil reader in write request",
).WithFilePath(req.Path)
}
defer gibidiutils.SafeCloseReader(req.Reader, req.Path)
language := detectLanguage(req.Path)
// Write YAML file entry start
if _, err := fmt.Fprintf(w.outFile, " - path: %s\n language: %s\n content: |\n", yamlQuoteString(req.Path), language); err != nil {
return utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIOWrite, "failed to write YAML file start").WithFilePath(req.Path)
if _, err := fmt.Fprintf(
w.outFile, " - path: %s\n language: %s\n content: |\n",
gibidiutils.EscapeForYAML(req.Path), language,
); err != nil {
return gibidiutils.WrapError(
err, gibidiutils.ErrorTypeIO, gibidiutils.CodeIOWrite,
"failed to write YAML file start",
).WithFilePath(req.Path)
}
// Stream content with YAML indentation
@@ -59,6 +219,11 @@ func (w *YAMLWriter) writeStreaming(req WriteRequest) error {
// writeInline writes a small file directly as YAML.
func (w *YAMLWriter) writeInline(req WriteRequest) error {
// Validate path before using it
if err := validatePath(req.Path); err != nil {
return err
}
language := detectLanguage(req.Path)
fileData := FileData{
Path: req.Path,
@@ -67,15 +232,24 @@ func (w *YAMLWriter) writeInline(req WriteRequest) error {
}
// Write YAML entry
if _, err := fmt.Fprintf(w.outFile, " - path: %s\n language: %s\n content: |\n", yamlQuoteString(fileData.Path), fileData.Language); err != nil {
return utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIOWrite, "failed to write YAML entry start").WithFilePath(req.Path)
if _, err := fmt.Fprintf(
w.outFile, " - path: %s\n language: %s\n content: |\n",
gibidiutils.EscapeForYAML(fileData.Path), fileData.Language,
); err != nil {
return gibidiutils.WrapError(
err, gibidiutils.ErrorTypeIO, gibidiutils.CodeIOWrite,
"failed to write YAML entry start",
).WithFilePath(req.Path)
}
// Write indented content
lines := strings.Split(fileData.Content, "\n")
for _, line := range lines {
if _, err := fmt.Fprintf(w.outFile, " %s\n", line); err != nil {
return utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIOWrite, "failed to write YAML content line").WithFilePath(req.Path)
return gibidiutils.WrapError(
err, gibidiutils.ErrorTypeIO, gibidiutils.CodeIOWrite,
"failed to write YAML content line",
).WithFilePath(req.Path)
}
}
@@ -85,43 +259,29 @@ func (w *YAMLWriter) writeInline(req WriteRequest) error {
// streamYAMLContent streams content with YAML indentation.
func (w *YAMLWriter) streamYAMLContent(reader io.Reader, path string) error {
scanner := bufio.NewScanner(reader)
// Increase buffer size to handle long lines (up to 10MB per line)
buf := make([]byte, 0, 64*1024)
scanner.Buffer(buf, 10*1024*1024)
for scanner.Scan() {
line := scanner.Text()
if _, err := fmt.Fprintf(w.outFile, " %s\n", line); err != nil {
return utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIOWrite, "failed to write YAML line").WithFilePath(path)
return gibidiutils.WrapError(
err, gibidiutils.ErrorTypeIO, gibidiutils.CodeIOWrite,
"failed to write YAML line",
).WithFilePath(path)
}
}
if err := scanner.Err(); err != nil {
return utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIORead, "failed to scan YAML content").WithFilePath(path)
return gibidiutils.WrapError(
err, gibidiutils.ErrorTypeIO, gibidiutils.CodeIOFileRead,
"failed to scan YAML content",
).WithFilePath(path)
}
return nil
}
// closeReader safely closes a reader if it implements io.Closer.
func (w *YAMLWriter) closeReader(reader io.Reader, path string) {
if closer, ok := reader.(io.Closer); ok {
if err := closer.Close(); err != nil {
utils.LogError(
"Failed to close file reader",
utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIOClose, "failed to close file reader").WithFilePath(path),
)
}
}
}
// yamlQuoteString quotes a string for YAML output if needed.
func yamlQuoteString(s string) string {
if s == "" {
return `""`
}
// Simple YAML quoting - use double quotes if string contains special characters
if strings.ContainsAny(s, "\n\r\t:\"'\\") {
return fmt.Sprintf(`"%s"`, strings.ReplaceAll(s, `"`, `\"`))
}
return s
}
// startYAMLWriter handles YAML format output with streaming support.
func startYAMLWriter(outFile *os.File, writeCh <-chan WriteRequest, done chan<- struct{}, prefix, suffix string) {
defer close(done)
@@ -130,19 +290,19 @@ func startYAMLWriter(outFile *os.File, writeCh <-chan WriteRequest, done chan<-
// Start writing
if err := writer.Start(prefix, suffix); err != nil {
utils.LogError("Failed to write YAML header", err)
gibidiutils.LogError("Failed to write YAML header", err)
return
}
// Process files
for req := range writeCh {
if err := writer.WriteFile(req); err != nil {
utils.LogError("Failed to write YAML file", err)
gibidiutils.LogError("Failed to write YAML file", err)
}
}
// Close writer
if err := writer.Close(); err != nil {
utils.LogError("Failed to write YAML end", err)
gibidiutils.LogError("Failed to write YAML end", err)
}
}

View File

@@ -1,8 +1,11 @@
// Package utils provides common utility functions.
package utils
// Package gibidiutils provides common utility functions for gibidify.
package gibidiutils
import (
"errors"
"fmt"
"sort"
"strings"
"github.com/sirupsen/logrus"
)
@@ -47,6 +50,11 @@ func (e ErrorType) String() string {
}
}
// Error formatting templates.
const (
errorFormatWithCause = "%s: %v"
)
// StructuredError represents a structured error with type, code, and context.
type StructuredError struct {
Type ErrorType
@@ -60,10 +68,25 @@ type StructuredError struct {
// Error implements the error interface.
func (e *StructuredError) Error() string {
if e.Cause != nil {
return fmt.Sprintf("%s [%s]: %s: %v", e.Type, e.Code, e.Message, e.Cause)
base := fmt.Sprintf("%s [%s]: %s", e.Type, e.Code, e.Message)
if len(e.Context) > 0 {
// Sort keys for deterministic output
keys := make([]string, 0, len(e.Context))
for k := range e.Context {
keys = append(keys, k)
}
return fmt.Sprintf("%s [%s]: %s", e.Type, e.Code, e.Message)
sort.Strings(keys)
ctxPairs := make([]string, 0, len(e.Context))
for _, k := range keys {
ctxPairs = append(ctxPairs, fmt.Sprintf("%s=%v", k, e.Context[k]))
}
base = fmt.Sprintf("%s | context: %s", base, strings.Join(ctxPairs, ", "))
}
if e.Cause != nil {
return fmt.Sprintf(errorFormatWithCause, base, e.Cause)
}
return base
}
// Unwrap returns the underlying cause error.
@@ -93,7 +116,11 @@ func (e *StructuredError) WithLine(line int) *StructuredError {
}
// NewStructuredError creates a new structured error.
func NewStructuredError(errorType ErrorType, code, message, filePath string, context map[string]interface{}) *StructuredError {
func NewStructuredError(
errorType ErrorType,
code, message, filePath string,
context map[string]any,
) *StructuredError {
return &StructuredError{
Type: errorType,
Code: code,
@@ -135,34 +162,40 @@ func WrapErrorf(err error, errorType ErrorType, code, format string, args ...any
// Common error codes for each type
const (
// CLI Error Codes
CodeCLIMissingSource = "MISSING_SOURCE"
CodeCLIInvalidArgs = "INVALID_ARGS"
// FileSystem Error Codes
CodeFSPathResolution = "PATH_RESOLUTION"
CodeFSPermission = "PERMISSION_DENIED"
CodeFSNotFound = "NOT_FOUND"
CodeFSAccess = "ACCESS_DENIED"
// Processing Error Codes
CodeProcessingFileRead = "FILE_READ"
CodeProcessingCollection = "COLLECTION"
CodeProcessingTraversal = "TRAVERSAL"
CodeProcessingEncode = "ENCODE"
// Configuration Error Codes
CodeConfigValidation = "VALIDATION"
CodeConfigMissing = "MISSING"
// IO Error Codes
CodeIOFileCreate = "FILE_CREATE"
CodeIOFileWrite = "FILE_WRITE"
CodeIOEncoding = "ENCODING"
CodeIOWrite = "WRITE"
CodeIORead = "READ"
CodeIOFileRead = "FILE_READ"
CodeIOClose = "CLOSE"
// Validation Error Codes
CodeValidationFormat = "FORMAT"
CodeValidationFileType = "FILE_TYPE"
CodeValidationSize = "SIZE_LIMIT"
@@ -170,6 +203,7 @@ const (
CodeValidationPath = "PATH_TRAVERSAL"
// Resource Limit Error Codes
CodeResourceLimitFiles = "FILE_COUNT_LIMIT"
CodeResourceLimitTotalSize = "TOTAL_SIZE_LIMIT"
CodeResourceLimitTimeout = "TIMEOUT"
@@ -180,9 +214,16 @@ const (
// Predefined error constructors for common error scenarios
// NewCLIMissingSourceError creates a CLI error for missing source argument.
func NewCLIMissingSourceError() *StructuredError {
return NewStructuredError(ErrorTypeCLI, CodeCLIMissingSource, "usage: gibidify -source <source_directory> [--destination <output_file>] [--format=json|yaml|markdown]", "", nil)
// NewMissingSourceError creates a CLI error for missing source argument.
func NewMissingSourceError() *StructuredError {
return NewStructuredError(
ErrorTypeCLI,
CodeCLIMissingSource,
"usage: gibidify -source <source_directory> "+
"[--destination <output_file>] [--format=json|yaml|markdown]",
"",
nil,
)
}
// NewFileSystemError creates a file system error.
@@ -217,16 +258,18 @@ func LogError(operation string, err error, args ...any) {
}
// Check if it's a structured error and log with additional context
if structErr, ok := err.(*StructuredError); ok {
var structErr *StructuredError
if errors.As(err, &structErr) {
logrus.WithFields(logrus.Fields{
"error_type": structErr.Type.String(),
"error_code": structErr.Code,
"context": structErr.Context,
"file_path": structErr.FilePath,
"line": structErr.Line,
}).Errorf("%s: %v", msg, err)
}).Errorf(errorFormatWithCause, msg, err)
} else {
logrus.Errorf("%s: %v", msg, err)
// Log regular errors without structured fields
logrus.Errorf(errorFormatWithCause, msg, err)
}
}
}

View File

@@ -0,0 +1,367 @@
package gibidiutils
import (
"errors"
"testing"
"github.com/stretchr/testify/assert"
)
func TestErrorTypeString(t *testing.T) {
tests := []struct {
name string
errType ErrorType
expected string
}{
{
name: "CLI error type",
errType: ErrorTypeCLI,
expected: "CLI",
},
{
name: "FileSystem error type",
errType: ErrorTypeFileSystem,
expected: "FileSystem",
},
{
name: "Processing error type",
errType: ErrorTypeProcessing,
expected: "Processing",
},
{
name: "Configuration error type",
errType: ErrorTypeConfiguration,
expected: "Configuration",
},
{
name: "IO error type",
errType: ErrorTypeIO,
expected: "IO",
},
{
name: "Validation error type",
errType: ErrorTypeValidation,
expected: "Validation",
},
{
name: "Unknown error type",
errType: ErrorTypeUnknown,
expected: "Unknown",
},
{
name: "Invalid error type",
errType: ErrorType(999),
expected: "Unknown",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := tt.errType.String()
assert.Equal(t, tt.expected, result)
})
}
}
func TestStructuredErrorMethods(t *testing.T) {
t.Run("Error method", func(t *testing.T) {
err := &StructuredError{
Type: ErrorTypeValidation,
Code: CodeValidationRequired,
Message: "field is required",
}
expected := "Validation [REQUIRED]: field is required"
assert.Equal(t, expected, err.Error())
})
t.Run("Error method with context", func(t *testing.T) {
err := &StructuredError{
Type: ErrorTypeFileSystem,
Code: CodeFSNotFound,
Message: testErrFileNotFound,
Context: map[string]interface{}{
"path": "/test/file.txt",
},
}
errStr := err.Error()
assert.Contains(t, errStr, "FileSystem")
assert.Contains(t, errStr, "NOT_FOUND")
assert.Contains(t, errStr, testErrFileNotFound)
assert.Contains(t, errStr, "/test/file.txt")
assert.Contains(t, errStr, "path")
})
t.Run("Unwrap method", func(t *testing.T) {
innerErr := errors.New("inner error")
err := &StructuredError{
Type: ErrorTypeIO,
Code: CodeIOFileWrite,
Message: testErrWriteFailed,
Cause: innerErr,
}
assert.Equal(t, innerErr, err.Unwrap())
})
t.Run("Unwrap with nil cause", func(t *testing.T) {
err := &StructuredError{
Type: ErrorTypeIO,
Code: CodeIOFileWrite,
Message: testErrWriteFailed,
}
assert.Nil(t, err.Unwrap())
})
}
func TestWithContextMethods(t *testing.T) {
t.Run("WithContext", func(t *testing.T) {
err := &StructuredError{
Type: ErrorTypeValidation,
Code: CodeValidationFormat,
Message: testErrInvalidFormat,
}
err = err.WithContext("format", "xml")
err = err.WithContext("expected", "json")
assert.NotNil(t, err.Context)
assert.Equal(t, "xml", err.Context["format"])
assert.Equal(t, "json", err.Context["expected"])
})
t.Run("WithFilePath", func(t *testing.T) {
err := &StructuredError{
Type: ErrorTypeFileSystem,
Code: CodeFSPermission,
Message: "permission denied",
}
err = err.WithFilePath("/etc/passwd")
assert.Equal(t, "/etc/passwd", err.FilePath)
})
t.Run("WithLine", func(t *testing.T) {
err := &StructuredError{
Type: ErrorTypeProcessing,
Code: CodeProcessingFileRead,
Message: "read error",
}
err = err.WithLine(42)
assert.Equal(t, 42, err.Line)
})
}
func TestNewStructuredError(t *testing.T) {
tests := []struct {
name string
errType ErrorType
code string
message string
filePath string
context map[string]interface{}
}{
{
name: "basic error",
errType: ErrorTypeValidation,
code: CodeValidationRequired,
message: "field is required",
filePath: "",
context: nil,
},
{
name: "error with file path",
errType: ErrorTypeFileSystem,
code: CodeFSNotFound,
message: testErrFileNotFound,
filePath: "/test/missing.txt",
context: nil,
},
{
name: "error with context",
errType: ErrorTypeIO,
code: CodeIOFileWrite,
message: testErrWriteFailed,
context: map[string]interface{}{
"size": 1024,
"error": "disk full",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := NewStructuredError(tt.errType, tt.code, tt.message, tt.filePath, tt.context)
assert.NotNil(t, err)
assert.Equal(t, tt.errType, err.Type)
assert.Equal(t, tt.code, err.Code)
assert.Equal(t, tt.message, err.Message)
assert.Equal(t, tt.filePath, err.FilePath)
assert.Equal(t, tt.context, err.Context)
})
}
}
func TestNewStructuredErrorf(t *testing.T) {
err := NewStructuredErrorf(
ErrorTypeValidation,
CodeValidationSize,
"file size %d exceeds limit %d",
2048, 1024,
)
assert.NotNil(t, err)
assert.Equal(t, ErrorTypeValidation, err.Type)
assert.Equal(t, CodeValidationSize, err.Code)
assert.Equal(t, "file size 2048 exceeds limit 1024", err.Message)
}
func TestWrapError(t *testing.T) {
innerErr := errors.New("original error")
wrappedErr := WrapError(
innerErr,
ErrorTypeProcessing,
CodeProcessingFileRead,
"failed to process file",
)
assert.NotNil(t, wrappedErr)
assert.Equal(t, ErrorTypeProcessing, wrappedErr.Type)
assert.Equal(t, CodeProcessingFileRead, wrappedErr.Code)
assert.Equal(t, "failed to process file", wrappedErr.Message)
assert.Equal(t, innerErr, wrappedErr.Cause)
}
func TestWrapErrorf(t *testing.T) {
innerErr := errors.New("original error")
wrappedErr := WrapErrorf(
innerErr,
ErrorTypeIO,
CodeIOFileCreate,
"failed to create %s in %s",
"output.txt", "/tmp",
)
assert.NotNil(t, wrappedErr)
assert.Equal(t, ErrorTypeIO, wrappedErr.Type)
assert.Equal(t, CodeIOFileCreate, wrappedErr.Code)
assert.Equal(t, "failed to create output.txt in /tmp", wrappedErr.Message)
assert.Equal(t, innerErr, wrappedErr.Cause)
}
func TestSpecificErrorConstructors(t *testing.T) {
t.Run("NewMissingSourceError", func(t *testing.T) {
err := NewMissingSourceError()
assert.NotNil(t, err)
assert.Equal(t, ErrorTypeCLI, err.Type)
assert.Equal(t, CodeCLIMissingSource, err.Code)
assert.Contains(t, err.Message, "source")
})
t.Run("NewFileSystemError", func(t *testing.T) {
err := NewFileSystemError(CodeFSPermission, "access denied")
assert.NotNil(t, err)
assert.Equal(t, ErrorTypeFileSystem, err.Type)
assert.Equal(t, CodeFSPermission, err.Code)
assert.Equal(t, "access denied", err.Message)
})
t.Run("NewProcessingError", func(t *testing.T) {
err := NewProcessingError(CodeProcessingCollection, "collection failed")
assert.NotNil(t, err)
assert.Equal(t, ErrorTypeProcessing, err.Type)
assert.Equal(t, CodeProcessingCollection, err.Code)
assert.Equal(t, "collection failed", err.Message)
})
t.Run("NewIOError", func(t *testing.T) {
err := NewIOError(CodeIOFileWrite, testErrWriteFailed)
assert.NotNil(t, err)
assert.Equal(t, ErrorTypeIO, err.Type)
assert.Equal(t, CodeIOFileWrite, err.Code)
assert.Equal(t, testErrWriteFailed, err.Message)
})
t.Run("NewValidationError", func(t *testing.T) {
err := NewValidationError(CodeValidationFormat, testErrInvalidFormat)
assert.NotNil(t, err)
assert.Equal(t, ErrorTypeValidation, err.Type)
assert.Equal(t, CodeValidationFormat, err.Code)
assert.Equal(t, testErrInvalidFormat, err.Message)
})
}
// TestLogErrorf is already covered in errors_test.go
func TestStructuredErrorChaining(t *testing.T) {
// Test method chaining
err := NewStructuredError(
ErrorTypeFileSystem,
CodeFSNotFound,
testErrFileNotFound,
"",
nil,
).WithFilePath("/test.txt").WithLine(10).WithContext("operation", "read")
assert.Equal(t, "/test.txt", err.FilePath)
assert.Equal(t, 10, err.Line)
assert.Equal(t, "read", err.Context["operation"])
}
func TestErrorCodes(t *testing.T) {
// Test that all error codes are defined
codes := []string{
CodeCLIMissingSource,
CodeCLIInvalidArgs,
CodeFSPathResolution,
CodeFSPermission,
CodeFSNotFound,
CodeFSAccess,
CodeProcessingFileRead,
CodeProcessingCollection,
CodeProcessingTraversal,
CodeProcessingEncode,
CodeConfigValidation,
CodeConfigMissing,
CodeIOFileCreate,
CodeIOFileWrite,
CodeIOEncoding,
CodeIOWrite,
CodeIOFileRead,
CodeIOClose,
CodeValidationRequired,
CodeValidationFormat,
CodeValidationSize,
CodeValidationPath,
CodeResourceLimitFiles,
CodeResourceLimitTotalSize,
CodeResourceLimitMemory,
CodeResourceLimitTimeout,
}
// All codes should be non-empty strings
for _, code := range codes {
assert.NotEmpty(t, code, "Error code should not be empty")
assert.NotEqual(t, "", code, "Error code should be defined")
}
}
func TestErrorUnwrapChain(t *testing.T) {
// Test unwrapping through multiple levels
innermost := errors.New("innermost error")
middle := WrapError(innermost, ErrorTypeIO, CodeIOFileRead, "read failed")
outer := WrapError(middle, ErrorTypeProcessing, CodeProcessingFileRead, "processing failed")
// Test unwrapping
assert.Equal(t, middle, outer.Unwrap())
assert.Equal(t, innermost, middle.Unwrap())
// innermost is a plain error, doesn't have Unwrap() method
// No need to test it
// Test error chain messages
assert.Contains(t, outer.Error(), "Processing")
assert.Contains(t, middle.Error(), "IO")
}

View File

@@ -1,4 +1,5 @@
package utils
// Package gibidiutils provides common utility functions for gibidify.
package gibidiutils
import (
"bytes"
@@ -175,7 +176,7 @@ func TestLogErrorf(t *testing.T) {
}
}
func TestLogErrorConcurrency(t *testing.T) {
func TestLogErrorConcurrency(_ *testing.T) {
// Test that LogError is safe for concurrent use
done := make(chan bool)
for i := 0; i < 10; i++ {
@@ -191,7 +192,7 @@ func TestLogErrorConcurrency(t *testing.T) {
}
}
func TestLogErrorfConcurrency(t *testing.T) {
func TestLogErrorfConcurrency(_ *testing.T) {
// Test that LogErrorf is safe for concurrent use
done := make(chan bool)
for i := 0; i < 10; i++ {

10
gibidiutils/icons.go Normal file
View File

@@ -0,0 +1,10 @@
package gibidiutils
// Unicode icons and symbols for CLI UI and test output.
const (
IconSuccess = "✓" // U+2713
IconError = "✗" // U+2717
IconWarning = "⚠" // U+26A0
IconBullet = "•" // U+2022
IconInfo = "" // U+2139 FE0F
)

311
gibidiutils/paths.go Normal file
View File

@@ -0,0 +1,311 @@
// Package gibidiutils provides common utility functions for gibidify.
package gibidiutils
import (
"fmt"
"os"
"path/filepath"
"strings"
)
// EscapeForMarkdown sanitizes a string for safe use in Markdown code-fence and header lines.
// It replaces backticks with backslash-escaped backticks and removes/collapses newlines.
func EscapeForMarkdown(s string) string {
// Escape backticks
safe := strings.ReplaceAll(s, "`", "\\`")
// Remove newlines (collapse to space)
safe = strings.ReplaceAll(safe, "\n", " ")
safe = strings.ReplaceAll(safe, "\r", " ")
return safe
}
// GetAbsolutePath returns the absolute path for the given path.
// It wraps filepath.Abs with consistent error handling.
func GetAbsolutePath(path string) (string, error) {
abs, err := filepath.Abs(path)
if err != nil {
return "", fmt.Errorf("failed to get absolute path for %s: %w", path, err)
}
return abs, nil
}
// GetBaseName returns the base name for the given path, handling special cases.
func GetBaseName(absPath string) string {
baseName := filepath.Base(absPath)
if baseName == "." || baseName == "" {
return "output"
}
return baseName
}
// checkPathTraversal checks for path traversal patterns and returns an error if found.
func checkPathTraversal(path, context string) error {
// Normalize separators without cleaning (to preserve ..)
normalized := filepath.ToSlash(path)
// Split into components
components := strings.Split(normalized, "/")
// Check each component for exact ".." match
for _, component := range components {
if component == ".." {
return NewStructuredError(
ErrorTypeValidation,
CodeValidationPath,
fmt.Sprintf("path traversal attempt detected in %s", context),
path,
map[string]interface{}{
"original_path": path,
},
)
}
}
return nil
}
// cleanAndResolveAbsPath cleans a path and resolves it to an absolute path.
func cleanAndResolveAbsPath(path, context string) (string, error) {
cleaned := filepath.Clean(path)
abs, err := filepath.Abs(cleaned)
if err != nil {
return "", NewStructuredError(
ErrorTypeFileSystem,
CodeFSPathResolution,
fmt.Sprintf("cannot resolve %s", context),
path,
map[string]interface{}{
"error": err.Error(),
},
)
}
return abs, nil
}
// evalSymlinksOrStructuredError wraps filepath.EvalSymlinks with structured error handling.
func evalSymlinksOrStructuredError(path, context, original string) (string, error) {
eval, err := filepath.EvalSymlinks(path)
if err != nil {
return "", NewStructuredError(
ErrorTypeValidation,
CodeValidationPath,
fmt.Sprintf("cannot resolve symlinks for %s", context),
original,
map[string]interface{}{
"resolved_path": path,
"context": context,
"error": err.Error(),
},
)
}
return eval, nil
}
// validateWorkingDirectoryBoundary checks if the given absolute path escapes the working directory.
func validateWorkingDirectoryBoundary(abs, path string) error {
cwd, err := os.Getwd()
if err != nil {
return NewStructuredError(
ErrorTypeFileSystem,
CodeFSPathResolution,
"cannot get current working directory",
path,
map[string]interface{}{
"error": err.Error(),
},
)
}
cwdAbs, err := filepath.Abs(cwd)
if err != nil {
return NewStructuredError(
ErrorTypeFileSystem,
CodeFSPathResolution,
"cannot resolve current working directory",
path,
map[string]interface{}{
"error": err.Error(),
},
)
}
absEval, err := evalSymlinksOrStructuredError(abs, "source path", path)
if err != nil {
return err
}
cwdEval, err := evalSymlinksOrStructuredError(cwdAbs, "working directory", path)
if err != nil {
return err
}
rel, err := filepath.Rel(cwdEval, absEval)
if err != nil {
return NewStructuredError(
ErrorTypeValidation,
CodeValidationPath,
"cannot determine relative path",
path,
map[string]interface{}{
"resolved_path": absEval,
"working_dir": cwdEval,
"error": err.Error(),
},
)
}
if rel == ".." || strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
return NewStructuredError(
ErrorTypeValidation,
CodeValidationPath,
"source path attempts to access directories outside current working directory",
path,
map[string]interface{}{
"resolved_path": absEval,
"working_dir": cwdEval,
"relative_path": rel,
},
)
}
return nil
}
// ValidateSourcePath validates a source directory path for security.
// It ensures the path exists, is a directory, and doesn't contain path traversal attempts.
//
//revive:disable-next-line:function-length
func ValidateSourcePath(path string) error {
if path == "" {
return NewValidationError(CodeValidationRequired, "source path is required")
}
// Check for path traversal patterns before cleaning
if err := checkPathTraversal(path, "source path"); err != nil {
return err
}
// Clean and get absolute path
abs, err := cleanAndResolveAbsPath(path, "source path")
if err != nil {
return err
}
cleaned := filepath.Clean(path)
// Ensure the resolved path is within or below the current working directory for relative paths
if !filepath.IsAbs(path) {
if err := validateWorkingDirectoryBoundary(abs, path); err != nil {
return err
}
}
// Check if path exists and is a directory
info, err := os.Stat(cleaned)
if err != nil {
if os.IsNotExist(err) {
return NewFileSystemError(CodeFSNotFound, "source directory does not exist").WithFilePath(path)
}
return NewStructuredError(
ErrorTypeFileSystem,
CodeFSAccess,
"cannot access source directory",
path,
map[string]interface{}{
"error": err.Error(),
},
)
}
if !info.IsDir() {
return NewStructuredError(
ErrorTypeValidation,
CodeValidationPath,
"source path must be a directory",
path,
map[string]interface{}{
"is_file": true,
},
)
}
return nil
}
// ValidateDestinationPath validates a destination file path for security.
// It ensures the path doesn't contain path traversal attempts and the parent directory exists.
func ValidateDestinationPath(path string) error {
if path == "" {
return NewValidationError(CodeValidationRequired, "destination path is required")
}
// Check for path traversal patterns before cleaning
if err := checkPathTraversal(path, "destination path"); err != nil {
return err
}
// Get absolute path to ensure it's not trying to escape current working directory
abs, err := cleanAndResolveAbsPath(path, "destination path")
if err != nil {
return err
}
// Ensure the destination is not a directory
if info, err := os.Stat(abs); err == nil && info.IsDir() {
return NewStructuredError(
ErrorTypeValidation,
CodeValidationPath,
"destination cannot be a directory",
path,
map[string]interface{}{
"is_directory": true,
},
)
}
// Check if parent directory exists and is writable
parentDir := filepath.Dir(abs)
if parentInfo, err := os.Stat(parentDir); err != nil {
if os.IsNotExist(err) {
return NewStructuredError(
ErrorTypeFileSystem,
CodeFSNotFound,
"destination parent directory does not exist",
path,
map[string]interface{}{
"parent_dir": parentDir,
},
)
}
return NewStructuredError(
ErrorTypeFileSystem,
CodeFSAccess,
"cannot access destination parent directory",
path,
map[string]interface{}{
"parent_dir": parentDir,
"error": err.Error(),
},
)
} else if !parentInfo.IsDir() {
return NewStructuredError(
ErrorTypeValidation,
CodeValidationPath,
"destination parent is not a directory",
path,
map[string]interface{}{
"parent_dir": parentDir,
},
)
}
return nil
}
// ValidateConfigPath validates a configuration file path for security.
// It ensures the path doesn't contain path traversal attempts.
func ValidateConfigPath(path string) error {
if path == "" {
return nil // Empty path is allowed for config
}
// Check for path traversal patterns before cleaning
return checkPathTraversal(path, "config path")
}

View File

@@ -0,0 +1,368 @@
package gibidiutils
import (
"errors"
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestGetBaseName(t *testing.T) {
tests := []struct {
name string
absPath string
expected string
}{
{
name: "normal path",
absPath: "/home/user/project",
expected: "project",
},
{
name: "path with trailing slash",
absPath: "/home/user/project/",
expected: "project",
},
{
name: "root path",
absPath: "/",
expected: "/",
},
{
name: "current directory",
absPath: ".",
expected: "output",
},
{
name: testEmptyPath,
absPath: "",
expected: "output",
},
{
name: "file path",
absPath: "/home/user/file.txt",
expected: "file.txt",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := GetBaseName(tt.absPath)
assert.Equal(t, tt.expected, result)
})
}
}
func TestValidateSourcePath(t *testing.T) {
// Create a temp directory for testing
tempDir := t.TempDir()
tempFile := filepath.Join(tempDir, "test.txt")
require.NoError(t, os.WriteFile(tempFile, []byte("test"), 0o600))
tests := []struct {
name string
path string
expectedError string
}{
{
name: testEmptyPath,
path: "",
expectedError: "source path is required",
},
{
name: testPathTraversalAttempt,
path: "../../../etc/passwd",
expectedError: testPathTraversalDetected,
},
{
name: "path with double dots",
path: "/home/../etc/passwd",
expectedError: testPathTraversalDetected,
},
{
name: "non-existent path",
path: "/definitely/does/not/exist",
expectedError: "does not exist",
},
{
name: "file instead of directory",
path: tempFile,
expectedError: "must be a directory",
},
{
name: "valid directory",
path: tempDir,
expectedError: "",
},
{
name: "valid relative path",
path: ".",
expectedError: "",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := ValidateSourcePath(tt.path)
if tt.expectedError != "" {
assert.Error(t, err)
assert.Contains(t, err.Error(), tt.expectedError)
// Check if it's a StructuredError
var structErr *StructuredError
if errors.As(err, &structErr) {
assert.NotEmpty(t, structErr.Code)
assert.NotEqual(t, ErrorTypeUnknown, structErr.Type)
}
} else {
assert.NoError(t, err)
}
})
}
}
func TestValidateDestinationPath(t *testing.T) {
tempDir := t.TempDir()
tests := []struct {
name string
path string
expectedError string
}{
{
name: testEmptyPath,
path: "",
expectedError: "destination path is required",
},
{
name: testPathTraversalAttempt,
path: "../../etc/passwd",
expectedError: testPathTraversalDetected,
},
{
name: "absolute path traversal",
path: "/home/../../../etc/passwd",
expectedError: testPathTraversalDetected,
},
{
name: "valid new file",
path: filepath.Join(tempDir, "newfile.txt"),
expectedError: "",
},
{
name: "valid relative path",
path: "output.txt",
expectedError: "",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := ValidateDestinationPath(tt.path)
if tt.expectedError != "" {
assert.Error(t, err)
assert.Contains(t, err.Error(), tt.expectedError)
} else {
assert.NoError(t, err)
}
})
}
}
func TestValidateConfigPath(t *testing.T) {
tempDir := t.TempDir()
validConfig := filepath.Join(tempDir, "config.yaml")
require.NoError(t, os.WriteFile(validConfig, []byte("key: value"), 0o600))
tests := []struct {
name string
path string
expectedError string
}{
{
name: testEmptyPath,
path: "",
expectedError: "", // Empty config path is allowed
},
{
name: testPathTraversalAttempt,
path: "../../../etc/config.yaml",
expectedError: testPathTraversalDetected,
},
// ValidateConfigPath doesn't check if file exists or is regular file
// It only checks for path traversal
{
name: "valid config file",
path: validConfig,
expectedError: "",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := ValidateConfigPath(tt.path)
if tt.expectedError != "" {
assert.Error(t, err)
assert.Contains(t, err.Error(), tt.expectedError)
} else {
assert.NoError(t, err)
}
})
}
}
// TestGetAbsolutePath is already covered in paths_test.go
func TestValidationErrorTypes(t *testing.T) {
t.Run("source path validation errors", func(t *testing.T) {
// Test empty source
err := ValidateSourcePath("")
assert.Error(t, err)
var structErrEmptyPath *StructuredError
if errors.As(err, &structErrEmptyPath) {
assert.Equal(t, ErrorTypeValidation, structErrEmptyPath.Type)
assert.Equal(t, CodeValidationRequired, structErrEmptyPath.Code)
}
// Test path traversal
err = ValidateSourcePath("../../../etc")
assert.Error(t, err)
var structErrTraversal *StructuredError
if errors.As(err, &structErrTraversal) {
assert.Equal(t, ErrorTypeValidation, structErrTraversal.Type)
assert.Equal(t, CodeValidationPath, structErrTraversal.Code)
}
})
t.Run("destination path validation errors", func(t *testing.T) {
// Test empty destination
err := ValidateDestinationPath("")
assert.Error(t, err)
var structErrEmptyDest *StructuredError
if errors.As(err, &structErrEmptyDest) {
assert.Equal(t, ErrorTypeValidation, structErrEmptyDest.Type)
assert.Equal(t, CodeValidationRequired, structErrEmptyDest.Code)
}
})
t.Run("config path validation errors", func(t *testing.T) {
// Test path traversal in config
err := ValidateConfigPath("../../etc/config.yaml")
assert.Error(t, err)
var structErrTraversalInConfig *StructuredError
if errors.As(err, &structErrTraversalInConfig) {
assert.Equal(t, ErrorTypeValidation, structErrTraversalInConfig.Type)
assert.Equal(t, CodeValidationPath, structErrTraversalInConfig.Code)
}
})
}
func TestPathSecurityChecks(t *testing.T) {
// Test various path traversal attempts
traversalPaths := []string{
"../etc/passwd",
"../../root/.ssh/id_rsa",
"/home/../../../etc/shadow",
"./../../sensitive/data",
"foo/../../../bar",
}
for _, path := range traversalPaths {
t.Run("source_"+path, func(t *testing.T) {
err := ValidateSourcePath(path)
assert.Error(t, err)
assert.Contains(t, err.Error(), testPathTraversal)
})
t.Run("dest_"+path, func(t *testing.T) {
err := ValidateDestinationPath(path)
assert.Error(t, err)
assert.Contains(t, err.Error(), testPathTraversal)
})
t.Run("config_"+path, func(t *testing.T) {
err := ValidateConfigPath(path)
assert.Error(t, err)
assert.Contains(t, err.Error(), testPathTraversal)
})
}
}
func TestSpecialPaths(t *testing.T) {
t.Run("GetBaseName with special paths", func(t *testing.T) {
specialPaths := map[string]string{
"/": "/",
"": "output",
".": "output",
"..": "..",
"/.": "output", // filepath.Base("/.") returns "." which matches the output condition
"/..": "..",
"//": "/",
"///": "/",
}
for path, expected := range specialPaths {
result := GetBaseName(path)
assert.Equal(t, expected, result, "Path: %s", path)
}
})
}
func TestPathNormalization(t *testing.T) {
tempDir := t.TempDir()
t.Run("source path normalization", func(t *testing.T) {
// Create nested directory
nestedDir := filepath.Join(tempDir, "a", "b", "c")
require.NoError(t, os.MkdirAll(nestedDir, 0o750))
// Test path with redundant separators
redundantPath := tempDir + string(
os.PathSeparator,
) + string(
os.PathSeparator,
) + "a" + string(
os.PathSeparator,
) + "b" + string(
os.PathSeparator,
) + "c"
err := ValidateSourcePath(redundantPath)
assert.NoError(t, err)
})
}
func TestPathValidationConcurrency(t *testing.T) {
tempDir := t.TempDir()
// Test concurrent path validation
paths := []string{
tempDir,
".",
"/tmp",
}
errChan := make(chan error, len(paths)*2)
for _, path := range paths {
go func(p string) {
errChan <- ValidateSourcePath(p)
}(path)
go func(p string) {
errChan <- ValidateDestinationPath(p + "/output.txt")
}(path)
}
// Collect results
for i := 0; i < len(paths)*2; i++ {
<-errChan
}
// No assertions needed - test passes if no panic/race
}

View File

@@ -1,4 +1,5 @@
package utils
// Package gibidiutils provides common utility functions for gibidify.
package gibidiutils
import (
"os"
@@ -138,7 +139,7 @@ func TestGetAbsolutePathSpecialCases(t *testing.T) {
target := filepath.Join(tmpDir, "target")
link := filepath.Join(tmpDir, "link")
if err := os.Mkdir(target, 0o755); err != nil {
if err := os.Mkdir(target, 0o750); err != nil {
t.Fatalf("Failed to create target directory: %v", err)
}
if err := os.Symlink(target, link); err != nil {
@@ -189,7 +190,10 @@ func TestGetAbsolutePathSpecialCases(t *testing.T) {
}
}
func TestGetAbsolutePathConcurrency(t *testing.T) {
// TestGetAbsolutePathConcurrency verifies that GetAbsolutePath is safe for concurrent use.
// The test intentionally does not use assertions - it will panic if there's a race condition.
// Run with -race flag to detect concurrent access issues.
func TestGetAbsolutePathConcurrency(_ *testing.T) {
// Test that GetAbsolutePath is safe for concurrent use
paths := []string{".", "..", "test.go", "subdir/file.txt", "/tmp/test"}
done := make(chan bool)
@@ -224,13 +228,11 @@ func TestGetAbsolutePathErrorFormatting(t *testing.T) {
if !strings.Contains(err.Error(), path) {
t.Errorf("Error message should contain original path: %v", err)
}
} else {
} else if !filepath.IsAbs(got) {
// Normal case - just verify we got a valid absolute path
if !filepath.IsAbs(got) {
t.Errorf("Expected absolute path, got: %v", got)
}
}
}
// BenchmarkGetAbsolutePath benchmarks the GetAbsolutePath function
func BenchmarkGetAbsolutePath(b *testing.B) {

View File

@@ -0,0 +1,18 @@
package gibidiutils
// Test constants to avoid duplication in test files.
// These constants are used across multiple test files in the gibidiutils package.
const (
// Error messages
testErrFileNotFound = "file not found"
testErrWriteFailed = "write failed"
testErrInvalidFormat = "invalid format"
// Path validation messages
testEmptyPath = "empty path"
testPathTraversal = "path traversal"
testPathTraversalAttempt = "path traversal attempt"
testPathTraversalDetected = "path traversal attempt detected"
)

View File

@@ -1,8 +1,10 @@
package utils
// Package gibidiutils provides common utility functions for gibidify.
package gibidiutils
import (
"encoding/json"
"io"
"math"
"strings"
)
@@ -34,7 +36,15 @@ func WriteWithErrorWrap(writer io.Writer, content, errorMsg, filePath string) er
// StreamContent provides a common streaming implementation with chunk processing.
// This eliminates the similar streaming patterns across JSON and Markdown writers.
func StreamContent(reader io.Reader, writer io.Writer, chunkSize int, filePath string, processChunk func([]byte) []byte) error {
//
//revive:disable-next-line:cognitive-complexity
func StreamContent(
reader io.Reader,
writer io.Writer,
chunkSize int,
filePath string,
processChunk func([]byte) []byte,
) error {
buf := make([]byte, chunkSize)
for {
n, err := reader.Read(buf)
@@ -55,7 +65,7 @@ func StreamContent(reader io.Reader, writer io.Writer, chunkSize int, filePath s
break
}
if err != nil {
wrappedErr := WrapError(err, ErrorTypeIO, CodeIORead, "failed to read content chunk")
wrappedErr := WrapError(err, ErrorTypeIO, CodeIOFileRead, "failed to read content chunk")
if filePath != "" {
wrappedErr = wrappedErr.WithFilePath(filePath)
}
@@ -99,13 +109,27 @@ func EscapeForYAML(content string) string {
return content
}
// SafeUint64ToInt64WithDefault safely converts uint64 to int64, returning a default value if overflow would occur.
// When defaultValue is 0 (the safe default), clamps to MaxInt64 on overflow to keep guardrails active.
// This prevents overflow from making monitors think memory usage is zero when it's actually maxed out.
func SafeUint64ToInt64WithDefault(value uint64, defaultValue int64) int64 {
if value > math.MaxInt64 {
// When caller uses 0 as "safe" default, clamp to max so overflow still trips guardrails
if defaultValue == 0 {
return math.MaxInt64
}
return defaultValue
}
return int64(value) //#nosec G115 -- Safe: value <= MaxInt64 checked above
}
// StreamLines provides line-based streaming for YAML content.
// This provides an alternative streaming approach for YAML writers.
func StreamLines(reader io.Reader, writer io.Writer, filePath string, lineProcessor func(string) string) error {
// Read all content first (for small files this is fine)
content, err := io.ReadAll(reader)
if err != nil {
wrappedErr := WrapError(err, ErrorTypeIO, CodeIORead, "failed to read content for line processing")
wrappedErr := WrapError(err, ErrorTypeIO, CodeIOFileRead, "failed to read content for line processing")
if filePath != "" {
wrappedErr = wrappedErr.WithFilePath(filePath)
}

111
gibidiutils/writers_test.go Normal file
View File

@@ -0,0 +1,111 @@
// Package gibidiutils provides common utility functions for gibidify.
package gibidiutils
import (
"math"
"testing"
)
func TestSafeUint64ToInt64WithDefault(t *testing.T) {
tests := []struct {
name string
value uint64
defaultValue int64
want int64
}{
{
name: "normal value within range",
value: 1000,
defaultValue: 0,
want: 1000,
},
{
name: "zero value",
value: 0,
defaultValue: 0,
want: 0,
},
{
name: "max int64 exactly",
value: math.MaxInt64,
defaultValue: 0,
want: math.MaxInt64,
},
{
name: "overflow with zero default clamps to max",
value: math.MaxInt64 + 1,
defaultValue: 0,
want: math.MaxInt64,
},
{
name: "large overflow with zero default clamps to max",
value: math.MaxUint64,
defaultValue: 0,
want: math.MaxInt64,
},
{
name: "overflow with custom default returns custom",
value: math.MaxInt64 + 1,
defaultValue: -1,
want: -1,
},
{
name: "overflow with custom positive default",
value: math.MaxUint64,
defaultValue: 12345,
want: 12345,
},
{
name: "large value within range",
value: uint64(math.MaxInt64 - 1000),
defaultValue: 0,
want: math.MaxInt64 - 1000,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := SafeUint64ToInt64WithDefault(tt.value, tt.defaultValue)
if got != tt.want {
t.Errorf("SafeUint64ToInt64WithDefault(%d, %d) = %d, want %d",
tt.value, tt.defaultValue, got, tt.want)
}
})
}
}
func TestSafeUint64ToInt64WithDefaultGuardrailsBehavior(t *testing.T) {
// Test that overflow with default=0 returns MaxInt64, not 0
// This is critical for back-pressure and resource monitors
result := SafeUint64ToInt64WithDefault(math.MaxUint64, 0)
if result == 0 {
t.Error("Overflow with default=0 returned 0, which would disable guardrails")
}
if result != math.MaxInt64 {
t.Errorf("Overflow with default=0 should clamp to MaxInt64, got %d", result)
}
}
// BenchmarkSafeUint64ToInt64WithDefault benchmarks the conversion function
func BenchmarkSafeUint64ToInt64WithDefault(b *testing.B) {
b.Run("normal_value", func(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = SafeUint64ToInt64WithDefault(1000, 0)
}
})
b.Run("overflow_zero_default", func(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = SafeUint64ToInt64WithDefault(math.MaxUint64, 0)
}
})
b.Run("overflow_custom_default", func(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = SafeUint64ToInt64WithDefault(math.MaxUint64, -1)
}
})
}

18
go.mod
View File

@@ -1,6 +1,6 @@
module github.com/ivuorinen/gibidify
go 1.24.1
go 1.25
require (
github.com/fatih/color v1.18.0
@@ -8,26 +8,28 @@ require (
github.com/schollz/progressbar/v3 v3.18.0
github.com/sirupsen/logrus v1.9.3
github.com/spf13/viper v1.21.0
github.com/stretchr/testify v1.11.1
gopkg.in/yaml.v3 v3.0.1
)
require (
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/fsnotify/fsnotify v1.9.0 // indirect
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-colorable v0.1.14 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/sagikazarmark/locafero v0.11.0 // indirect
github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect
github.com/sagikazarmark/locafero v0.12.0 // indirect
github.com/spf13/afero v1.15.0 // indirect
github.com/spf13/cast v1.10.0 // indirect
github.com/spf13/pflag v1.0.10 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
golang.org/x/sys v0.31.0 // indirect
golang.org/x/term v0.28.0 // indirect
golang.org/x/text v0.28.0 // indirect
golang.org/x/sys v0.37.0 // indirect
golang.org/x/term v0.36.0 // indirect
golang.org/x/text v0.30.0 // indirect
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
)

52
go.sum
View File

@@ -7,12 +7,8 @@ github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M=
github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss=
github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=
github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
@@ -21,17 +17,14 @@ github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ=
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw=
github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M=
github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc=
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
@@ -42,58 +35,37 @@ github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZV
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 h1:OkMGxebDjyw0ULyrTYWeN0UNCCkmCWfjPnIA2W6oviI=
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06/go.mod h1:+ePHsJ1keEjQtpvf9HHw0f4ZeJ0TLRsxhunSI2hYJSs=
github.com/sagikazarmark/locafero v0.8.0 h1:mXaMVw7IqxNBxfv3LdWt9MDmcWDQ1fagDH918lOdVaQ=
github.com/sagikazarmark/locafero v0.8.0/go.mod h1:UBUyz37V+EdMS3hDF3QWIiVr/2dPrx49OMO0Bn0hJqk=
github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc=
github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik=
github.com/sagikazarmark/locafero v0.12.0 h1:/NQhBAkUb4+fH1jivKHWusDYFjMOOKU88eegjfxfHb4=
github.com/sagikazarmark/locafero v0.12.0/go.mod h1:sZh36u/YSZ918v0Io+U9ogLYQJ9tLLBmM4eneO6WwsI=
github.com/schollz/progressbar/v3 v3.18.0 h1:uXdoHABRFmNIjUfte/Ex7WtuyVslrw2wVPQmCN62HpA=
github.com/schollz/progressbar/v3 v3.18.0/go.mod h1:IsO3lpbaGuzh8zIMzgY3+J8l4C8GjO0Y9S69eFvNsec=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw=
github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U=
github.com/spf13/afero v1.14.0 h1:9tH6MapGnn/j0eb0yIXiLjERO8RB6xIVZRDCX7PtqWA=
github.com/spf13/afero v1.14.0/go.mod h1:acJQ8t0ohCGuMN3O+Pv0V0hgMxNYDlvdk+VTfyZmbYo=
github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I=
github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg=
github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y=
github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY=
github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo=
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.20.0 h1:zrxIyR3RQIOsarIrgL8+sAvALXul9jeEPa06Y0Ph6vY=
github.com/spf13/viper v1.20.0/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4=
github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4=
github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4=
github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU=
github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg=
golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek=
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q=
golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss=
golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k=
golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

View File

@@ -23,14 +23,13 @@ func main() {
if cli.IsUserError(err) {
errorFormatter.FormatError(err)
os.Exit(1)
} else {
}
// System errors still go to logrus for debugging
logrus.Errorf("System error: %v", err)
ui.PrintError("An unexpected error occurred. Please check the logs.")
os.Exit(2)
}
}
}
// Run executes the main logic of the CLI application using the provided context.
func run(ctx context.Context) error {

View File

@@ -7,6 +7,7 @@ import (
"testing"
"time"
"github.com/ivuorinen/gibidify/config"
"github.com/ivuorinen/gibidify/testutil"
)
@@ -14,13 +15,22 @@ const (
testFileCount = 1000
)
// TestMain configures test-time flags for packages.
func TestMain(m *testing.M) {
// Inform packages that we're running under tests so they can adjust noisy logging.
// The config package will suppress the specific info-level message about missing config
// while still allowing tests to enable debug/info level logging when needed.
config.SetRunningInTest(true)
os.Exit(m.Run())
}
// TestIntegrationFullCLI simulates a full run of the CLI application using adaptive concurrency.
func TestIntegrationFullCLI(t *testing.T) {
srcDir := setupTestFiles(t)
outFilePath := setupOutputFile(t)
setupCLIArgs(srcDir, outFilePath)
// Run the application with a background context.
// Run the application with the test context.
ctx := t.Context()
if runErr := run(ctx); runErr != nil {
t.Fatalf("Run failed: %v", runErr)
@@ -60,7 +70,7 @@ func setupCLIArgs(srcDir, outFilePath string) {
// verifyOutput checks that the output file contains expected content.
func verifyOutput(t *testing.T, outFilePath string) {
t.Helper()
data, err := os.ReadFile(outFilePath)
data, err := os.ReadFile(outFilePath) // #nosec G304 - test file path is controlled
if err != nil {
t.Fatalf("Failed to read output file: %v", err)
}

58
revive.toml Normal file
View File

@@ -0,0 +1,58 @@
# revive configuration for gibidify
# See https://revive.run/ for more information
# Global settings
ignoreGeneratedHeader = false
severity = "warning"
confidence = 0.8
errorCode = 1
warningCode = 0
# Enable all rules by default, then selectively disable or configure
[rule.blank-imports]
[rule.context-as-argument]
[rule.context-keys-type]
[rule.dot-imports]
[rule.error-return]
[rule.error-strings]
[rule.error-naming]
[rule.exported]
[rule.if-return]
[rule.increment-decrement]
[rule.var-naming]
[rule.var-declaration]
[rule.package-comments]
[rule.range]
[rule.receiver-naming]
[rule.time-naming]
[rule.unexported-return]
[rule.indent-error-flow]
[rule.errorf]
[rule.empty-block]
[rule.superfluous-else]
[rule.unused-parameter]
[rule.unreachable-code]
[rule.redefines-builtin-id]
# Configure specific rules
[rule.line-length-limit]
arguments = [120]
Exclude = ["**/*_test.go"]
[rule.function-length]
arguments = [50, 100]
Exclude = ["**/*_test.go"]
[rule.max-public-structs]
arguments = [10]
[rule.cognitive-complexity]
arguments = [15]
Exclude = ["**/*_test.go"]
[rule.cyclomatic]
arguments = [15]
Exclude = ["**/*_test.go"]
[rule.argument-limit]
arguments = [5]

View File

@@ -1,10 +1,11 @@
Available targets:
install-tools - Install required linting and development tools
lint - Run all linters (Go, Makefile, shell, YAML)
lint - Run all linters (Go, EditorConfig, Makefile, shell, YAML)
lint-fix - Run linters with auto-fix enabled
lint-verbose - Run linters with verbose output
test - Run tests
coverage - Run tests with coverage
test-coverage - Run tests with coverage output
coverage - Run tests with coverage and generate HTML report
build - Build the application
clean - Clean build artifacts
all - Run lint, test, and build
@@ -14,6 +15,11 @@ Security targets:
security-full - Run full security analysis with all tools
vuln-check - Check for dependency vulnerabilities
Dependency management:
deps-check - Check for available dependency updates
deps-update - Update all dependencies to latest versions
deps-tidy - Clean up and verify dependencies
Benchmark targets:
build-benchmark - Build the benchmark binary
benchmark - Run all benchmarks

View File

@@ -1,14 +1,23 @@
#!/bin/bash
set -e
#!/bin/sh
set -eu
echo "Running golangci-lint..."
golangci-lint run ./...
echo "Running revive..."
revive -config revive.toml -formatter friendly ./...
echo "Running checkmake..."
checkmake --config=.checkmake Makefile
echo "Running editorconfig-checker..."
editorconfig-checker
echo "Running shellcheck..."
shellcheck scripts/*.sh
echo "Running shfmt check..."
shfmt -d .
shfmt -d -i 0 -ci .
echo "Running yamllint..."
yamllint -c .yamllint .
yamllint .

View File

@@ -1,10 +1,10 @@
#!/bin/bash
set -euo pipefail
#!/bin/sh
set -eu
# Security Scanning Script for gibidify
# This script runs comprehensive security checks locally and in CI
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
cd "$PROJECT_ROOT"
@@ -20,63 +20,177 @@ NC='\033[0m' # No Color
# Function to print status
print_status() {
echo -e "${BLUE}[INFO]${NC} $1"
printf "${BLUE}[INFO]${NC} %s\n" "$1"
}
print_warning() {
echo -e "${YELLOW}[WARN]${NC} $1"
printf "${YELLOW}[WARN]${NC} %s\n" "$1"
}
print_error() {
echo -e "${RED}[ERROR]${NC} $1"
printf "${RED}[ERROR]${NC} %s\n" "$1"
}
print_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
printf "${GREEN}[SUCCESS]${NC} %s\n" "$1"
}
# Run command with timeout if available, otherwise run directly
# Usage: run_with_timeout DURATION COMMAND [ARGS...]
run_with_timeout() {
duration="$1"
shift
if command -v timeout >/dev/null 2>&1; then
timeout "$duration" "$@"
else
# timeout not available, run command directly
"$@"
fi
}
# Check if required tools are installed
check_dependencies() {
print_status "Checking security scanning dependencies..."
local missing_tools=()
missing_tools=""
if ! command -v go &>/dev/null; then
missing_tools+=("go")
if ! command -v go >/dev/null 2>&1; then
missing_tools="${missing_tools}go "
print_error "Go is not installed. Please install Go first."
print_error "Visit https://golang.org/doc/install for installation instructions."
exit 1
fi
if ! command -v golangci-lint &>/dev/null; then
if ! command -v golangci-lint >/dev/null 2>&1; then
print_warning "golangci-lint not found, installing..."
go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest
fi
if ! command -v gosec &>/dev/null; then
if ! command -v gosec >/dev/null 2>&1; then
print_warning "gosec not found, installing..."
go install github.com/securecodewarrior/gosec/v2/cmd/gosec@latest
go install github.com/securego/gosec/v2/cmd/gosec@latest
fi
if ! command -v govulncheck &>/dev/null; then
if ! command -v govulncheck >/dev/null 2>&1; then
print_warning "govulncheck not found, installing..."
go install golang.org/x/vuln/cmd/govulncheck@latest
fi
if ! command -v checkmake &>/dev/null; then
if ! command -v checkmake >/dev/null 2>&1; then
print_warning "checkmake not found, installing..."
go install github.com/mrtazz/checkmake/cmd/checkmake@latest
go install github.com/checkmake/checkmake/cmd/checkmake@latest
fi
if ! command -v shfmt &>/dev/null; then
if ! command -v shfmt >/dev/null 2>&1; then
print_warning "shfmt not found, installing..."
go install mvdan.cc/sh/v3/cmd/shfmt@latest
fi
if ! command -v yamllint &>/dev/null; then
print_warning "yamllint not found, installing..."
go install github.com/excilsploft/yamllint@latest
if ! command -v yamllint >/dev/null 2>&1; then
print_warning "yamllint not found, attempting to install..."
# Update PATH to include common user install directories
export PATH="$HOME/.local/bin:$HOME/.cargo/bin:$PATH"
installed=0
# Try pipx first
if command -v pipx >/dev/null 2>&1; then
print_status "Attempting install with pipx..."
if pipx install yamllint; then
# Update PATH to include pipx bin directory
pipx_bin_dir=$(pipx environment --value PIPX_BIN_DIR 2>/dev/null || echo "$HOME/.local/bin")
export PATH="$pipx_bin_dir:$PATH"
installed=1
else
print_warning "pipx install yamllint failed, trying next method..."
fi
fi
if [ ${#missing_tools[@]} -ne 0 ]; then
print_error "Missing required tools: ${missing_tools[*]}"
# Try pip3 --user if pipx didn't work
if [ "$installed" -eq 0 ] && command -v pip3 >/dev/null 2>&1; then
print_status "Attempting install with pip3 --user..."
if pip3 install --user yamllint; then
installed=1
else
print_warning "pip3 install yamllint failed, trying next method..."
fi
fi
# Try apt-get with smart sudo handling
if [ "$installed" -eq 0 ] && command -v apt-get >/dev/null 2>&1; then
sudo_cmd=""
can_use_apt=false
# Check if running as root
if [ "$(id -u)" -eq 0 ]; then
print_status "Running as root, no sudo needed for apt-get..."
sudo_cmd=""
can_use_apt=true
elif command -v sudo >/dev/null 2>&1; then
# Try non-interactive sudo first
if sudo -n true 2>/dev/null; then
print_status "Attempting install with apt-get (sudo cached)..."
sudo_cmd="sudo"
can_use_apt=true
elif [ -t 0 ]; then
# TTY available, allow interactive sudo
print_status "Attempting install with apt-get (may prompt for sudo)..."
sudo_cmd="sudo"
can_use_apt=true
else
print_warning "apt-get available but sudo not accessible (non-interactive, no cache). Skipping apt-get."
can_use_apt=false
fi
else
print_warning "apt-get available but sudo not found. Skipping apt-get."
can_use_apt=false
fi
# Attempt apt-get only if we have permission to use it
if [ "$can_use_apt" = true ]; then
if [ -n "$sudo_cmd" ]; then
if run_with_timeout 300 ${sudo_cmd:+"$sudo_cmd"} apt-get update; then
if run_with_timeout 300 ${sudo_cmd:+"$sudo_cmd"} apt-get install -y yamllint; then
installed=1
else
print_warning "apt-get install yamllint failed or timed out"
fi
else
print_warning "apt-get update failed or timed out"
fi
else
# Running as root without sudo
if run_with_timeout 300 apt-get update; then
if run_with_timeout 300 apt-get install -y yamllint; then
installed=1
else
print_warning "apt-get install yamllint failed or timed out"
fi
else
print_warning "apt-get update failed or timed out"
fi
fi
fi
fi
# Final check with updated PATH
if ! command -v yamllint >/dev/null 2>&1; then
print_error "yamllint installation failed or yamllint still not found in PATH."
print_error "Please install yamllint manually using one of:"
print_error " - pipx install yamllint"
print_error " - pip3 install --user yamllint"
print_error " - sudo apt-get install yamllint (Debian/Ubuntu)"
print_error " - brew install yamllint (macOS)"
exit 1
fi
print_status "yamllint successfully installed and found in PATH"
fi
if [ -n "$missing_tools" ]; then
print_error "Missing required tools: $missing_tools"
print_error "Please install the missing tools and try again."
exit 1
fi
@@ -103,15 +217,41 @@ run_gosec() {
run_govulncheck() {
print_status "Running govulncheck for dependency vulnerabilities..."
if govulncheck -json ./... >govulncheck-report.json 2>&1; then
print_success "No known vulnerabilities found in dependencies"
else
if grep -q '"finding"' govulncheck-report.json 2>/dev/null; then
# govulncheck with -json always exits 0, so we need to check the output
# Redirect stderr to separate file to avoid corrupting JSON output
govulncheck -json ./... >govulncheck-report.json 2>govulncheck-errors.log
# Check if there were errors during execution
if [ -s govulncheck-errors.log ]; then
print_warning "govulncheck produced errors (see govulncheck-errors.log)"
fi
# Use jq to detect finding entries in the JSON output
# govulncheck emits a stream of Message objects, need to slurp and filter for Finding field
if command -v jq >/dev/null 2>&1; then
# First validate JSON is parseable
if ! jq -s '.' govulncheck-report.json >/dev/null 2>&1; then
print_error "govulncheck report contains malformed JSON"
echo "Unable to parse govulncheck-report.json"
return 1
fi
# JSON is valid, now check for findings
if jq -s -e 'map(select(.Finding)) | length > 0' govulncheck-report.json >/dev/null 2>&1; then
print_error "Vulnerabilities found in dependencies!"
echo "Detailed report saved to govulncheck-report.json"
return 1
else
print_success "No vulnerabilities found"
print_success "No known vulnerabilities found in dependencies"
fi
else
# Fallback to grep if jq is not available (case-insensitive to match "Finding")
if grep -qi '"finding":' govulncheck-report.json 2>/dev/null; then
print_error "Vulnerabilities found in dependencies!"
echo "Detailed report saved to govulncheck-report.json"
return 1
else
print_success "No known vulnerabilities found in dependencies"
fi
fi
}
@@ -120,7 +260,7 @@ run_govulncheck() {
run_security_lint() {
print_status "Running security-focused linting..."
local security_linters="gosec,gocritic,bodyclose,rowserrcheck,misspell,unconvert,unparam,unused,errcheck,ineffassign,staticcheck"
security_linters="gosec,gocritic,bodyclose,rowserrcheck,misspell,unconvert,unparam,unused,errcheck,ineffassign,staticcheck"
if golangci-lint run --enable="$security_linters" --timeout=5m; then
print_success "Security linting passed"
@@ -134,31 +274,47 @@ run_security_lint() {
check_secrets() {
print_status "Scanning for potential secrets and sensitive data..."
local secrets_found=false
# POSIX-compatible secrets_found flag using a temp file
secrets_found_file="$(mktemp)" || {
print_error "Failed to create temporary file with mktemp"
exit 1
}
if [ -z "$secrets_found_file" ]; then
print_error "mktemp returned empty path"
exit 1
fi
# Clean up temp file on exit and signals (POSIX-portable)
trap 'rm -f "$secrets_found_file"' 0 HUP INT TERM
# Common secret patterns
local patterns=(
"password\s*[:=]\s*['\"][^'\"]{3,}['\"]"
"secret\s*[:=]\s*['\"][^'\"]{3,}['\"]"
"key\s*[:=]\s*['\"][^'\"]{8,}['\"]"
"token\s*[:=]\s*['\"][^'\"]{8,}['\"]"
"api_?key\s*[:=]\s*['\"][^'\"]{8,}['\"]"
"aws_?access_?key"
"aws_?secret"
"AKIA[0-9A-Z]{16}" # AWS Access Key pattern
"github_?token"
"private_?key"
)
for pattern in "${patterns[@]}"; do
if grep -r -i -E "$pattern" --include="*.go" . 2>/dev/null; then
# Common secret patterns (POSIX [[:space:]] and here-doc quoting)
cat <<'PATTERNS' | while IFS= read -r pattern; do
password[[:space:]]*[:=][[:space:]]*['"][^'"]{3,}['"]
secret[[:space:]]*[:=][[:space:]]*['"][^'"]{3,}['"]
key[[:space:]]*[:=][[:space:]]*['"][^'"]{8,}['"]
token[[:space:]]*[:=][[:space:]]*['"][^'"]{8,}['"]
api_?key[[:space:]]*[:=][[:space:]]*['"][^'"]{8,}['"]
aws_?access_?key
aws_?secret
AKIA[0-9A-Z]{16}
github_?token
private_?key
PATTERNS
if [ -n "$pattern" ]; then
if find . -type f -name "*.go" -exec grep -i -E -H -n -e "$pattern" {} + 2>/dev/null | grep -q .; then
print_warning "Potential secret pattern found: $pattern"
secrets_found=true
touch "$secrets_found_file"
fi
fi
done
if [ -f "$secrets_found_file" ]; then
secrets_found=true
else
secrets_found=false
fi
# Check git history for secrets (last 10 commits)
if git log --oneline -10 | grep -i -E "(password|secret|key|token)" >/dev/null 2>&1; then
if git log --oneline -10 2>/dev/null | grep -i -E "(password|secret|key|token)" >/dev/null 2>&1; then
print_warning "Potential secrets mentioned in recent commit messages"
secrets_found=true
fi
@@ -175,23 +331,23 @@ check_secrets() {
check_hardcoded_addresses() {
print_status "Checking for hardcoded network addresses..."
local addresses_found=false
addresses_found=false
# Look for IP addresses (excluding common safe ones)
if grep -r -E "([0-9]{1,3}\.){3}[0-9]{1,3}" --include="*.go" . |
if grep -r -E "([0-9]{1,3}\.){3}[0-9]{1,3}" --include="*.go" . 2>/dev/null |
grep -v -E "(127\.0\.0\.1|0\.0\.0\.0|255\.255\.255\.255|localhost)" >/dev/null 2>&1; then
print_warning "Hardcoded IP addresses found:"
grep -r -E "([0-9]{1,3}\.){3}[0-9]{1,3}" --include="*.go" . |
grep -r -E "([0-9]{1,3}\.){3}[0-9]{1,3}" --include="*.go" . 2>/dev/null |
grep -v -E "(127\.0\.0\.1|0\.0\.0\.0|255\.255\.255\.255|localhost)" || true
addresses_found=true
fi
# Look for URLs (excluding documentation examples)
if grep -r -E "https?://[^/\s]+" --include="*.go" . |
grep -v -E "(example\.com|localhost|127\.0\.0\.1|\$\{)" >/dev/null 2>&1; then
# Look for URLs (excluding documentation examples and comments)
if grep -r -E "https?://[^/\s]+" --include="*.go" . 2>/dev/null |
grep -v -E "(example\.com|localhost|127\.0\.0\.1|\$\{|//.*https?://)" >/dev/null 2>&1; then
print_warning "Hardcoded URLs found:"
grep -r -E "https?://[^/\s]+" --include="*.go" . |
grep -v -E "(example\.com|localhost|127\.0\.0\.1|\$\{)" || true
grep -r -E "https?://[^/\s]+" --include="*.go" . 2>/dev/null |
grep -v -E "(example\.com|localhost|127\.0\.0\.1|\$\{|//.*https?://)" || true
addresses_found=true
fi
@@ -209,7 +365,7 @@ check_docker_security() {
print_status "Checking Docker security..."
# Basic Dockerfile security checks
local docker_issues=false
docker_issues=false
if grep -q "^USER root" Dockerfile; then
print_warning "Dockerfile runs as root user"
@@ -221,7 +377,7 @@ check_docker_security() {
docker_issues=true
fi
if grep -q "RUN.*wget\|RUN.*curl" Dockerfile && ! grep -q "rm.*wget\|rm.*curl" Dockerfile; then
if grep -Eq 'RUN.*(wget|curl)' Dockerfile && ! grep -Eq 'rm.*(wget|curl)' Dockerfile; then
print_warning "Dockerfile may leave curl/wget installed"
docker_issues=true
fi
@@ -241,19 +397,21 @@ check_docker_security() {
check_file_permissions() {
print_status "Checking file permissions..."
local perm_issues=false
perm_issues=false
# Check for overly permissive files
if find . -type f -perm /o+w -not -path "./.git/*" | grep -q .; then
# Check for overly permissive files (using octal for cross-platform compatibility)
# -perm -002 finds files writable by others (works on both BSD and GNU find)
if find . -type f -perm -002 -not -path "./.git/*" 2>/dev/null | grep -q .; then
print_warning "World-writable files found:"
find . -type f -perm /o+w -not -path "./.git/*" || true
find . -type f -perm -002 -not -path "./.git/*" 2>/dev/null || true
perm_issues=true
fi
# Check for executable files that shouldn't be
if find . -type f -name "*.go" -perm /a+x | grep -q .; then
# -perm -111 finds files executable by anyone (works on both BSD and GNU find)
if find . -type f -name "*.go" -perm -111 -not -path "./.git/*" 2>/dev/null | grep -q .; then
print_warning "Executable Go files found (should not be executable):"
find . -type f -name "*.go" -perm /a+x || true
find . -type f -name "*.go" -perm -111 -not -path "./.git/*" 2>/dev/null || true
perm_issues=true
fi
@@ -285,7 +443,7 @@ check_makefile() {
check_shell_scripts() {
print_status "Checking shell script formatting..."
if find . -name "*.sh" -type f | head -1 | grep -q .; then
if find . -name "*.sh" -type f 2>/dev/null | head -1 | grep -q .; then
if shfmt -d .; then
print_success "Shell script formatting check passed"
else
@@ -301,8 +459,8 @@ check_shell_scripts() {
check_yaml_files() {
print_status "Checking YAML files..."
if find . -name "*.yml" -o -name "*.yaml" -type f | head -1 | grep -q .; then
if yamllint -c .yamllint .; then
if find . \( -name "*.yml" -o -name "*.yaml" \) -type f 2>/dev/null | head -1 | grep -q .; then
if yamllint .; then
print_success "YAML files check passed"
else
print_error "YAML file issues detected!"
@@ -317,7 +475,7 @@ check_yaml_files() {
generate_report() {
print_status "Generating security scan report..."
local report_file="security-report.md"
report_file="security-report.md"
cat >"$report_file" <<EOF
# Security Scan Report
@@ -370,7 +528,7 @@ main() {
echo "=========================="
echo
local exit_code=0
exit_code=0
check_dependencies
echo
@@ -409,7 +567,7 @@ main() {
generate_report
echo
if [ $exit_code -eq 0 ]; then
if [ "$exit_code" -eq 0 ]; then
print_success "🎉 All security checks passed!"
else
print_error "❌ Security issues detected. Please review the reports and fix identified issues."
@@ -419,7 +577,7 @@ main() {
print_status "- security-report.md"
fi
exit $exit_code
exit "$exit_code"
}
# Run main function

View File

@@ -83,7 +83,7 @@ func TestCreateTestFile(t *testing.T) {
}
// Verify content
readContent, err := os.ReadFile(filePath)
readContent, err := os.ReadFile(filePath) // #nosec G304 - test file path is controlled
if err != nil {
t.Fatalf("Failed to read created file: %v", err)
}
@@ -272,7 +272,7 @@ func TestCreateTestFiles(t *testing.T) {
// Verify each file
for i, filePath := range createdFiles {
content, err := os.ReadFile(filePath)
content, err := os.ReadFile(filePath) // #nosec G304 - test file path is controlled
if err != nil {
t.Errorf("Failed to read file %s: %v", filePath, err)
continue

View File

@@ -31,10 +31,10 @@ func TestVerifyContentContains(t *testing.T) {
}
}()
// This would normally fail but we're just checking it doesn't panic
// This would normally fail, but we're just checking it doesn't panic
content := "test"
expected := []string{"not found"}
// Create a sub-test that we expect to fail
// Create a subtest that we expect to fail
t.Run("expected_failure", func(t *testing.T) {
t.Skip("Skipping actual failure test")
VerifyContentContains(t, content, expected)
@@ -59,7 +59,7 @@ func TestMustSucceed(t *testing.T) {
}
}()
// Create a sub-test that we expect to fail
// Create a subtest that we expect to fail
t.Run("expected_failure", func(t *testing.T) {
t.Skip("Skipping actual failure test")
MustSucceed(t, errors.New("test error"), "failed operation")

View File

@@ -1,167 +0,0 @@
// Package utils provides common utility functions.
package utils
import (
"fmt"
"os"
"path/filepath"
"strings"
)
// GetAbsolutePath returns the absolute path for the given path.
// It wraps filepath.Abs with consistent error handling.
func GetAbsolutePath(path string) (string, error) {
abs, err := filepath.Abs(path)
if err != nil {
return "", fmt.Errorf("failed to get absolute path for %s: %w", path, err)
}
return abs, nil
}
// GetBaseName returns the base name for the given path, handling special cases.
func GetBaseName(absPath string) string {
baseName := filepath.Base(absPath)
if baseName == "." || baseName == "" {
return "output"
}
return baseName
}
// ValidateSourcePath validates a source directory path for security.
// It ensures the path exists, is a directory, and doesn't contain path traversal attempts.
func ValidateSourcePath(path string) error {
if path == "" {
return NewStructuredError(ErrorTypeValidation, CodeValidationRequired, "source path is required", "", nil)
}
// Check for path traversal patterns before cleaning
if strings.Contains(path, "..") {
return NewStructuredError(ErrorTypeValidation, CodeValidationPath, "path traversal attempt detected in source path", path, map[string]interface{}{
"original_path": path,
})
}
// Clean and get absolute path
cleaned := filepath.Clean(path)
abs, err := filepath.Abs(cleaned)
if err != nil {
return NewStructuredError(ErrorTypeFileSystem, CodeFSPathResolution, "cannot resolve source path", path, map[string]interface{}{
"error": err.Error(),
})
}
// Get current working directory to ensure we're not escaping it for relative paths
if !filepath.IsAbs(path) {
cwd, err := os.Getwd()
if err != nil {
return NewStructuredError(ErrorTypeFileSystem, CodeFSPathResolution, "cannot get current working directory", path, map[string]interface{}{
"error": err.Error(),
})
}
// Ensure the resolved path is within or below the current working directory
cwdAbs, err := filepath.Abs(cwd)
if err != nil {
return NewStructuredError(ErrorTypeFileSystem, CodeFSPathResolution, "cannot resolve current working directory", path, map[string]interface{}{
"error": err.Error(),
})
}
// Check if the absolute path tries to escape the current working directory
if !strings.HasPrefix(abs, cwdAbs) {
return NewStructuredError(ErrorTypeValidation, CodeValidationPath, "source path attempts to access directories outside current working directory", path, map[string]interface{}{
"resolved_path": abs,
"working_dir": cwdAbs,
})
}
}
// Check if path exists and is a directory
info, err := os.Stat(cleaned)
if err != nil {
if os.IsNotExist(err) {
return NewStructuredError(ErrorTypeFileSystem, CodeFSNotFound, "source directory does not exist", path, nil)
}
return NewStructuredError(ErrorTypeFileSystem, CodeFSAccess, "cannot access source directory", path, map[string]interface{}{
"error": err.Error(),
})
}
if !info.IsDir() {
return NewStructuredError(ErrorTypeValidation, CodeValidationPath, "source path must be a directory", path, map[string]interface{}{
"is_file": true,
})
}
return nil
}
// ValidateDestinationPath validates a destination file path for security.
// It ensures the path doesn't contain path traversal attempts and the parent directory exists.
func ValidateDestinationPath(path string) error {
if path == "" {
return NewStructuredError(ErrorTypeValidation, CodeValidationRequired, "destination path is required", "", nil)
}
// Check for path traversal patterns before cleaning
if strings.Contains(path, "..") {
return NewStructuredError(ErrorTypeValidation, CodeValidationPath, "path traversal attempt detected in destination path", path, map[string]interface{}{
"original_path": path,
})
}
// Clean and validate the path
cleaned := filepath.Clean(path)
// Get absolute path to ensure it's not trying to escape current working directory
abs, err := filepath.Abs(cleaned)
if err != nil {
return NewStructuredError(ErrorTypeFileSystem, CodeFSPathResolution, "cannot resolve destination path", path, map[string]interface{}{
"error": err.Error(),
})
}
// Ensure the destination is not a directory
if info, err := os.Stat(abs); err == nil && info.IsDir() {
return NewStructuredError(ErrorTypeValidation, CodeValidationPath, "destination cannot be a directory", path, map[string]interface{}{
"is_directory": true,
})
}
// Check if parent directory exists and is writable
parentDir := filepath.Dir(abs)
if parentInfo, err := os.Stat(parentDir); err != nil {
if os.IsNotExist(err) {
return NewStructuredError(ErrorTypeFileSystem, CodeFSNotFound, "destination parent directory does not exist", path, map[string]interface{}{
"parent_dir": parentDir,
})
}
return NewStructuredError(ErrorTypeFileSystem, CodeFSAccess, "cannot access destination parent directory", path, map[string]interface{}{
"parent_dir": parentDir,
"error": err.Error(),
})
} else if !parentInfo.IsDir() {
return NewStructuredError(ErrorTypeValidation, CodeValidationPath, "destination parent is not a directory", path, map[string]interface{}{
"parent_dir": parentDir,
})
}
return nil
}
// ValidateConfigPath validates a configuration file path for security.
// It ensures the path doesn't contain path traversal attempts.
func ValidateConfigPath(path string) error {
if path == "" {
return nil // Empty path is allowed for config
}
// Check for path traversal patterns before cleaning
if strings.Contains(path, "..") {
return NewStructuredError(ErrorTypeValidation, CodeValidationPath, "path traversal attempt detected in config path", path, map[string]interface{}{
"original_path": path,
})
}
return nil
}