Files
gibidify/fileproc/resource_monitor_validation.go
Ismo Vuorinen 95b7ef6dd3 chore: modernize workflows, security scanning, and linting configuration (#50)
* build: update Go 1.25, CI workflows, and build tooling

- Upgrade to Go 1.25
- Add benchmark targets to Makefile
- Implement parallel gosec execution
- Lock tool versions for reproducibility
- Add shellcheck directives to scripts
- Update CI workflows with improved caching

* refactor: migrate from golangci-lint to revive

- Replace golangci-lint with revive for linting
- Configure comprehensive revive rules
- Fix all EditorConfig violations
- Add yamllint and yamlfmt support
- Remove deprecated .golangci.yml

* refactor: rename utils to shared and deduplicate code

- Rename utils package to shared
- Add shared constants package
- Deduplicate constants across packages
- Address CodeRabbit review feedback

* fix: resolve SonarQube issues and add safety guards

- Fix all 73 SonarQube OPEN issues
- Add nil guards for resourceMonitor, backpressure, metricsCollector
- Implement io.Closer for headerFileReader
- Propagate errors from processing helpers
- Add metrics and templates packages
- Improve error handling across codebase

* test: improve test infrastructure and coverage

- Add benchmarks for cli, fileproc, metrics
- Improve test coverage for cli, fileproc, config
- Refactor tests with helper functions
- Add shared test constants
- Fix test function naming conventions
- Reduce cognitive complexity in benchmark tests

* docs: update documentation and configuration examples

- Update CLAUDE.md with current project state
- Refresh README with new features
- Add usage and configuration examples
- Add SonarQube project configuration
- Consolidate config.example.yaml

* fix: resolve shellcheck warnings in scripts

- Use ./*.go instead of *.go to prevent dash-prefixed filenames
  from being interpreted as options (SC2035)
- Remove unreachable return statement after exit (SC2317)
- Remove obsolete gibidiutils/ directory reference

* chore(deps): upgrade go dependencies

* chore(lint): megalinter fixes

* fix: improve test coverage and fix file descriptor leaks

- Add defer r.Close() to fix pipe file descriptor leaks in benchmark tests
- Refactor TestProcessorConfigureFileTypes with helper functions and assertions
- Refactor TestProcessorLogFinalStats with output capture and keyword verification
- Use shared constants instead of literal strings (TestFilePNG, FormatMarkdown, etc.)
- Reduce cognitive complexity by extracting helper functions

* fix: align test comments with function names

Remove underscores from test comments to match actual function names:
- benchmark/benchmark_test.go (2 fixes)
- fileproc/filetypes_config_test.go (4 fixes)
- fileproc/filetypes_registry_test.go (6 fixes)
- fileproc/processor_test.go (6 fixes)
- fileproc/resource_monitor_types_test.go (4 fixes)
- fileproc/writer_test.go (3 fixes)

* fix: various test improvements and bug fixes

- Remove duplicate maxCacheSize check in filetypes_registry_test.go
- Shorten long comment in processor_test.go to stay under 120 chars
- Remove flaky time.Sleep in collector_test.go, use >= 0 assertion
- Close pipe reader in benchmark_test.go to fix file descriptor leak
- Use ContinueOnError in flags_test.go to match ResetFlags behavior
- Add nil check for p.ui in processor_workers.go before UpdateProgress
- Fix resource_monitor_validation_test.go by setting hardMemoryLimitBytes directly

* chore(yaml): add missing document start markers

Add --- document start to YAML files to satisfy yamllint:
- .github/workflows/codeql.yml
- .github/workflows/build-test-publish.yml
- .github/workflows/security.yml
- .github/actions/setup/action.yml

* fix: guard nil resourceMonitor and fix test deadlock

- Guard resourceMonitor before CreateFileProcessingContext call
- Add ui.UpdateProgress on emergency stop and path error returns
- Fix potential deadlock in TestProcessFile using wg.Go with defer close
2025-12-10 19:07:11 +02:00

180 lines
4.6 KiB
Go

// Package fileproc handles file processing, collection, and output formatting.
package fileproc
import (
"runtime"
"sync/atomic"
"time"
"github.com/ivuorinen/gibidify/shared"
)
// ValidateFileProcessing checks if a file can be processed based on resource limits.
func (rm *ResourceMonitor) ValidateFileProcessing(filePath string, fileSize int64) error {
if !rm.enabled {
return nil
}
rm.mu.RLock()
defer rm.mu.RUnlock()
// Check if emergency stop is active
if rm.emergencyStopRequested {
return shared.NewStructuredError(
shared.ErrorTypeValidation,
shared.CodeResourceLimitMemory,
"processing stopped due to emergency memory condition",
filePath,
map[string]any{
"emergency_stop_active": true,
},
)
}
// Check file count limit
currentFiles := atomic.LoadInt64(&rm.filesProcessed)
if int(currentFiles) >= rm.maxFiles {
return shared.NewStructuredError(
shared.ErrorTypeValidation,
shared.CodeResourceLimitFiles,
"maximum file count limit exceeded",
filePath,
map[string]any{
"current_files": currentFiles,
"max_files": rm.maxFiles,
},
)
}
// Check total size limit
currentTotalSize := atomic.LoadInt64(&rm.totalSizeProcessed)
if currentTotalSize+fileSize > rm.maxTotalSize {
return shared.NewStructuredError(
shared.ErrorTypeValidation,
shared.CodeResourceLimitTotalSize,
"maximum total size limit would be exceeded",
filePath,
map[string]any{
"current_total_size": currentTotalSize,
"file_size": fileSize,
"max_total_size": rm.maxTotalSize,
},
)
}
// Check overall timeout
if time.Since(rm.startTime) > rm.overallTimeout {
return shared.NewStructuredError(
shared.ErrorTypeValidation,
shared.CodeResourceLimitTimeout,
"overall processing timeout exceeded",
filePath,
map[string]any{
"processing_duration": time.Since(rm.startTime),
"overall_timeout": rm.overallTimeout,
},
)
}
return nil
}
// CheckHardMemoryLimit checks if hard memory limit is exceeded and takes action.
func (rm *ResourceMonitor) CheckHardMemoryLimit() error {
if !rm.enabled || rm.hardMemoryLimitMB <= 0 {
return nil
}
var m runtime.MemStats
runtime.ReadMemStats(&m)
currentMemory := shared.SafeUint64ToInt64WithDefault(m.Alloc, 0)
if currentMemory <= rm.hardMemoryLimitBytes {
return nil
}
return rm.handleMemoryLimitExceeded(currentMemory)
}
// handleMemoryLimitExceeded handles the case when hard memory limit is exceeded.
func (rm *ResourceMonitor) handleMemoryLimitExceeded(currentMemory int64) error {
rm.mu.Lock()
defer rm.mu.Unlock()
rm.logMemoryViolation(currentMemory)
if !rm.enableGracefulDegr {
return rm.createHardMemoryLimitError(currentMemory, false)
}
return rm.tryGracefulRecovery(currentMemory)
}
// logMemoryViolation logs memory limit violation if not already logged.
func (rm *ResourceMonitor) logMemoryViolation(currentMemory int64) {
violationKey := "hard_memory_limit"
// Ensure map is initialized
if rm.violationLogged == nil {
rm.violationLogged = make(map[string]bool)
}
if rm.violationLogged[violationKey] {
return
}
logger := shared.GetLogger()
logger.Errorf("Hard memory limit exceeded: %dMB > %dMB",
currentMemory/int64(shared.BytesPerMB), rm.hardMemoryLimitMB)
rm.violationLogged[violationKey] = true
}
// tryGracefulRecovery attempts graceful recovery by forcing GC.
func (rm *ResourceMonitor) tryGracefulRecovery(_ int64) error {
// Force garbage collection
runtime.GC()
// Check again after GC
var m runtime.MemStats
runtime.ReadMemStats(&m)
newMemory := shared.SafeUint64ToInt64WithDefault(m.Alloc, 0)
if newMemory > rm.hardMemoryLimitBytes {
// Still over limit, activate emergency stop
rm.emergencyStopRequested = true
return rm.createHardMemoryLimitError(newMemory, true)
}
// Memory freed by GC, continue with degradation
rm.degradationActive = true
logger := shared.GetLogger()
logger.Info("Memory freed by garbage collection, continuing with degradation mode")
return nil
}
// createHardMemoryLimitError creates a structured error for memory limit exceeded.
func (rm *ResourceMonitor) createHardMemoryLimitError(currentMemory int64, emergencyStop bool) error {
message := "hard memory limit exceeded"
if emergencyStop {
message = "hard memory limit exceeded, emergency stop activated"
}
context := map[string]any{
"current_memory_mb": currentMemory / int64(shared.BytesPerMB),
"limit_mb": rm.hardMemoryLimitMB,
}
if emergencyStop {
context["emergency_stop"] = true
}
return shared.NewStructuredError(
shared.ErrorTypeValidation,
shared.CodeResourceLimitMemory,
message,
"",
context,
)
}