mirror of
https://github.com/ivuorinen/gibidify.git
synced 2026-01-26 03:24:05 +00:00
chore: tweaks, simplification, tests
This commit is contained in:
105
fileproc/filetypes_concurrency_test.go
Normal file
105
fileproc/filetypes_concurrency_test.go
Normal file
@@ -0,0 +1,105 @@
|
||||
package fileproc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestFileTypeRegistry_ThreadSafety tests thread safety of the FileTypeRegistry.
|
||||
func TestFileTypeRegistry_ThreadSafety(t *testing.T) {
|
||||
const numGoroutines = 100
|
||||
const numOperationsPerGoroutine = 100
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Test concurrent read operations
|
||||
t.Run("ConcurrentReads", func(t *testing.T) {
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
wg.Add(1)
|
||||
go func(id int) {
|
||||
defer wg.Done()
|
||||
registry := GetDefaultRegistry()
|
||||
|
||||
for j := 0; j < numOperationsPerGoroutine; j++ {
|
||||
// Test various file detection operations
|
||||
_ = registry.IsImage("test.png")
|
||||
_ = registry.IsBinary("test.exe")
|
||||
_ = registry.GetLanguage("test.go")
|
||||
|
||||
// Test global functions too
|
||||
_ = IsImage("image.jpg")
|
||||
_ = IsBinary("binary.dll")
|
||||
_ = GetLanguage("script.py")
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
wg.Wait()
|
||||
})
|
||||
|
||||
// Test concurrent registry access (singleton creation)
|
||||
t.Run("ConcurrentRegistryAccess", func(t *testing.T) {
|
||||
// Reset the registry to test concurrent initialization
|
||||
// Note: This is not safe in a real application, but needed for testing
|
||||
registryOnce = sync.Once{}
|
||||
registry = nil
|
||||
|
||||
registries := make([]*FileTypeRegistry, numGoroutines)
|
||||
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
wg.Add(1)
|
||||
go func(id int) {
|
||||
defer wg.Done()
|
||||
registries[id] = GetDefaultRegistry()
|
||||
}(i)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Verify all goroutines got the same registry instance
|
||||
firstRegistry := registries[0]
|
||||
for i := 1; i < numGoroutines; i++ {
|
||||
if registries[i] != firstRegistry {
|
||||
t.Errorf("Registry %d is different from registry 0", i)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// Test concurrent modifications on separate registry instances
|
||||
t.Run("ConcurrentModifications", func(t *testing.T) {
|
||||
// Create separate registry instances for each goroutine to test modification thread safety
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
wg.Add(1)
|
||||
go func(id int) {
|
||||
defer wg.Done()
|
||||
|
||||
// Create a new registry instance for this goroutine
|
||||
registry := &FileTypeRegistry{
|
||||
imageExts: make(map[string]bool),
|
||||
binaryExts: make(map[string]bool),
|
||||
languageMap: make(map[string]string),
|
||||
}
|
||||
|
||||
for j := 0; j < numOperationsPerGoroutine; j++ {
|
||||
// Add unique extensions for this goroutine
|
||||
extSuffix := fmt.Sprintf("_%d_%d", id, j)
|
||||
|
||||
registry.AddImageExtension(".img" + extSuffix)
|
||||
registry.AddBinaryExtension(".bin" + extSuffix)
|
||||
registry.AddLanguageMapping(".lang"+extSuffix, "lang"+extSuffix)
|
||||
|
||||
// Verify the additions worked
|
||||
if !registry.IsImage("test.img" + extSuffix) {
|
||||
t.Errorf("Failed to add image extension .img%s", extSuffix)
|
||||
}
|
||||
if !registry.IsBinary("test.bin" + extSuffix) {
|
||||
t.Errorf("Failed to add binary extension .bin%s", extSuffix)
|
||||
}
|
||||
if registry.GetLanguage("test.lang"+extSuffix) != "lang"+extSuffix {
|
||||
t.Errorf("Failed to add language mapping .lang%s", extSuffix)
|
||||
}
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
wg.Wait()
|
||||
})
|
||||
}
|
||||
258
fileproc/filetypes_config_test.go
Normal file
258
fileproc/filetypes_config_test.go
Normal file
@@ -0,0 +1,258 @@
|
||||
package fileproc
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestFileTypeRegistry_Configuration tests the configuration functionality.
|
||||
func TestFileTypeRegistry_Configuration(t *testing.T) {
|
||||
// Create a new registry instance for testing
|
||||
registry := &FileTypeRegistry{
|
||||
imageExts: make(map[string]bool),
|
||||
binaryExts: make(map[string]bool),
|
||||
languageMap: make(map[string]string),
|
||||
}
|
||||
|
||||
// Test ApplyCustomExtensions
|
||||
t.Run("ApplyCustomExtensions", func(t *testing.T) {
|
||||
customImages := []string{".webp", ".avif", ".heic"}
|
||||
customBinary := []string{".custom", ".mybin"}
|
||||
customLanguages := map[string]string{
|
||||
".zig": "zig",
|
||||
".odin": "odin",
|
||||
".v": "vlang",
|
||||
}
|
||||
|
||||
registry.ApplyCustomExtensions(customImages, customBinary, customLanguages)
|
||||
|
||||
// Test custom image extensions
|
||||
for _, ext := range customImages {
|
||||
if !registry.IsImage("test" + ext) {
|
||||
t.Errorf("Expected %s to be recognized as image", ext)
|
||||
}
|
||||
}
|
||||
|
||||
// Test custom binary extensions
|
||||
for _, ext := range customBinary {
|
||||
if !registry.IsBinary("test" + ext) {
|
||||
t.Errorf("Expected %s to be recognized as binary", ext)
|
||||
}
|
||||
}
|
||||
|
||||
// Test custom language mappings
|
||||
for ext, expectedLang := range customLanguages {
|
||||
if lang := registry.GetLanguage("test" + ext); lang != expectedLang {
|
||||
t.Errorf("Expected %s to map to %s, got %s", ext, expectedLang, lang)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// Test DisableExtensions
|
||||
t.Run("DisableExtensions", func(t *testing.T) {
|
||||
// Add some extensions first
|
||||
registry.AddImageExtension(".png")
|
||||
registry.AddImageExtension(".jpg")
|
||||
registry.AddBinaryExtension(".exe")
|
||||
registry.AddBinaryExtension(".dll")
|
||||
registry.AddLanguageMapping(".go", "go")
|
||||
registry.AddLanguageMapping(".py", "python")
|
||||
|
||||
// Verify they work
|
||||
if !registry.IsImage("test.png") {
|
||||
t.Error("Expected .png to be image before disabling")
|
||||
}
|
||||
if !registry.IsBinary("test.exe") {
|
||||
t.Error("Expected .exe to be binary before disabling")
|
||||
}
|
||||
if registry.GetLanguage("test.go") != "go" {
|
||||
t.Error("Expected .go to map to go before disabling")
|
||||
}
|
||||
|
||||
// Disable some extensions
|
||||
disabledImages := []string{".png"}
|
||||
disabledBinary := []string{".exe"}
|
||||
disabledLanguages := []string{".go"}
|
||||
|
||||
registry.DisableExtensions(disabledImages, disabledBinary, disabledLanguages)
|
||||
|
||||
// Test that disabled extensions no longer work
|
||||
if registry.IsImage("test.png") {
|
||||
t.Error("Expected .png to not be image after disabling")
|
||||
}
|
||||
if registry.IsBinary("test.exe") {
|
||||
t.Error("Expected .exe to not be binary after disabling")
|
||||
}
|
||||
if registry.GetLanguage("test.go") != "" {
|
||||
t.Error("Expected .go to not map to language after disabling")
|
||||
}
|
||||
|
||||
// Test that non-disabled extensions still work
|
||||
if !registry.IsImage("test.jpg") {
|
||||
t.Error("Expected .jpg to still be image after disabling .png")
|
||||
}
|
||||
if !registry.IsBinary("test.dll") {
|
||||
t.Error("Expected .dll to still be binary after disabling .exe")
|
||||
}
|
||||
if registry.GetLanguage("test.py") != "python" {
|
||||
t.Error("Expected .py to still map to python after disabling .go")
|
||||
}
|
||||
})
|
||||
|
||||
// Test empty values handling
|
||||
t.Run("EmptyValuesHandling", func(t *testing.T) {
|
||||
registry := &FileTypeRegistry{
|
||||
imageExts: make(map[string]bool),
|
||||
binaryExts: make(map[string]bool),
|
||||
languageMap: make(map[string]string),
|
||||
}
|
||||
|
||||
// Test with empty values
|
||||
customImages := []string{"", ".valid", ""}
|
||||
customBinary := []string{"", ".valid"}
|
||||
customLanguages := map[string]string{
|
||||
"": "invalid",
|
||||
".valid": "",
|
||||
".good": "good",
|
||||
}
|
||||
|
||||
registry.ApplyCustomExtensions(customImages, customBinary, customLanguages)
|
||||
|
||||
// Only valid entries should be added
|
||||
if registry.IsImage("test.") {
|
||||
t.Error("Expected empty extension to not be added as image")
|
||||
}
|
||||
if !registry.IsImage("test.valid") {
|
||||
t.Error("Expected .valid to be added as image")
|
||||
}
|
||||
if registry.IsBinary("test.") {
|
||||
t.Error("Expected empty extension to not be added as binary")
|
||||
}
|
||||
if !registry.IsBinary("test.valid") {
|
||||
t.Error("Expected .valid to be added as binary")
|
||||
}
|
||||
if registry.GetLanguage("test.") != "" {
|
||||
t.Error("Expected empty extension to not be added as language")
|
||||
}
|
||||
if registry.GetLanguage("test.valid") != "" {
|
||||
t.Error("Expected .valid with empty language to not be added")
|
||||
}
|
||||
if registry.GetLanguage("test.good") != "good" {
|
||||
t.Error("Expected .good to map to good")
|
||||
}
|
||||
})
|
||||
|
||||
// Test case insensitive handling
|
||||
t.Run("CaseInsensitiveHandling", func(t *testing.T) {
|
||||
registry := &FileTypeRegistry{
|
||||
imageExts: make(map[string]bool),
|
||||
binaryExts: make(map[string]bool),
|
||||
languageMap: make(map[string]string),
|
||||
}
|
||||
|
||||
customImages := []string{".WEBP", ".Avif"}
|
||||
customBinary := []string{".CUSTOM", ".MyBin"}
|
||||
customLanguages := map[string]string{
|
||||
".ZIG": "zig",
|
||||
".Odin": "odin",
|
||||
}
|
||||
|
||||
registry.ApplyCustomExtensions(customImages, customBinary, customLanguages)
|
||||
|
||||
// Test that both upper and lower case work
|
||||
if !registry.IsImage("test.webp") {
|
||||
t.Error("Expected .webp (lowercase) to work after adding .WEBP")
|
||||
}
|
||||
if !registry.IsImage("test.WEBP") {
|
||||
t.Error("Expected .WEBP (uppercase) to work")
|
||||
}
|
||||
if !registry.IsBinary("test.custom") {
|
||||
t.Error("Expected .custom (lowercase) to work after adding .CUSTOM")
|
||||
}
|
||||
if !registry.IsBinary("test.CUSTOM") {
|
||||
t.Error("Expected .CUSTOM (uppercase) to work")
|
||||
}
|
||||
if registry.GetLanguage("test.zig") != "zig" {
|
||||
t.Error("Expected .zig (lowercase) to work after adding .ZIG")
|
||||
}
|
||||
if registry.GetLanguage("test.ZIG") != "zig" {
|
||||
t.Error("Expected .ZIG (uppercase) to work")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// TestConfigureFromSettings tests the global configuration function.
|
||||
func TestConfigureFromSettings(t *testing.T) {
|
||||
// Reset registry to ensure clean state
|
||||
registryOnce = sync.Once{}
|
||||
registry = nil
|
||||
|
||||
// Test configuration application
|
||||
customImages := []string{".webp", ".avif"}
|
||||
customBinary := []string{".custom"}
|
||||
customLanguages := map[string]string{".zig": "zig"}
|
||||
disabledImages := []string{".gif"} // Disable default extension
|
||||
disabledBinary := []string{".exe"} // Disable default extension
|
||||
disabledLanguages := []string{".rb"} // Disable default extension
|
||||
|
||||
ConfigureFromSettings(
|
||||
customImages,
|
||||
customBinary,
|
||||
customLanguages,
|
||||
disabledImages,
|
||||
disabledBinary,
|
||||
disabledLanguages,
|
||||
)
|
||||
|
||||
// Test that custom extensions work
|
||||
if !IsImage("test.webp") {
|
||||
t.Error("Expected custom image extension .webp to work")
|
||||
}
|
||||
if !IsBinary("test.custom") {
|
||||
t.Error("Expected custom binary extension .custom to work")
|
||||
}
|
||||
if GetLanguage("test.zig") != "zig" {
|
||||
t.Error("Expected custom language .zig to work")
|
||||
}
|
||||
|
||||
// Test that disabled extensions don't work
|
||||
if IsImage("test.gif") {
|
||||
t.Error("Expected disabled image extension .gif to not work")
|
||||
}
|
||||
if IsBinary("test.exe") {
|
||||
t.Error("Expected disabled binary extension .exe to not work")
|
||||
}
|
||||
if GetLanguage("test.rb") != "" {
|
||||
t.Error("Expected disabled language extension .rb to not work")
|
||||
}
|
||||
|
||||
// Test that non-disabled defaults still work
|
||||
if !IsImage("test.png") {
|
||||
t.Error("Expected non-disabled image extension .png to still work")
|
||||
}
|
||||
if !IsBinary("test.dll") {
|
||||
t.Error("Expected non-disabled binary extension .dll to still work")
|
||||
}
|
||||
if GetLanguage("test.go") != "go" {
|
||||
t.Error("Expected non-disabled language extension .go to still work")
|
||||
}
|
||||
|
||||
// Test multiple calls don't override previous configuration
|
||||
ConfigureFromSettings(
|
||||
[]string{".extra"},
|
||||
[]string{},
|
||||
map[string]string{},
|
||||
[]string{},
|
||||
[]string{},
|
||||
[]string{},
|
||||
)
|
||||
|
||||
// Previous configuration should still work
|
||||
if !IsImage("test.webp") {
|
||||
t.Error("Expected previous configuration to persist")
|
||||
}
|
||||
// New configuration should also work
|
||||
if !IsImage("test.extra") {
|
||||
t.Error("Expected new configuration to be applied")
|
||||
}
|
||||
}
|
||||
226
fileproc/filetypes_detection_test.go
Normal file
226
fileproc/filetypes_detection_test.go
Normal file
@@ -0,0 +1,226 @@
|
||||
package fileproc
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestFileTypeRegistry_LanguageDetection tests the language detection functionality.
|
||||
func TestFileTypeRegistry_LanguageDetection(t *testing.T) {
|
||||
registry := GetDefaultRegistry()
|
||||
|
||||
tests := []struct {
|
||||
filename string
|
||||
expected string
|
||||
}{
|
||||
// Programming languages
|
||||
{"main.go", "go"},
|
||||
{"script.py", "python"},
|
||||
{"app.js", "javascript"},
|
||||
{"component.tsx", "typescript"},
|
||||
{"service.ts", "typescript"},
|
||||
{"App.java", "java"},
|
||||
{"program.c", "c"},
|
||||
{"program.cpp", "cpp"},
|
||||
{"header.h", "c"},
|
||||
{"header.hpp", "cpp"},
|
||||
{"main.rs", "rust"},
|
||||
{"script.rb", "ruby"},
|
||||
{"index.php", "php"},
|
||||
{"app.swift", "swift"},
|
||||
{"MainActivity.kt", "kotlin"},
|
||||
{"Main.scala", "scala"},
|
||||
{"analysis.r", "r"},
|
||||
{"ViewController.m", "objc"},
|
||||
{"ViewController.mm", "objcpp"},
|
||||
{"Program.cs", "csharp"},
|
||||
{"Module.vb", "vbnet"},
|
||||
{"program.fs", "fsharp"},
|
||||
{"script.lua", "lua"},
|
||||
{"script.pl", "perl"},
|
||||
|
||||
// Shell scripts
|
||||
{"script.sh", "bash"},
|
||||
{"script.bash", "bash"},
|
||||
{"script.zsh", "zsh"},
|
||||
{"script.fish", "fish"},
|
||||
{"script.ps1", "powershell"},
|
||||
{"script.bat", "batch"},
|
||||
{"script.cmd", "batch"},
|
||||
|
||||
// Data and markup
|
||||
{"query.sql", "sql"},
|
||||
{"index.html", "html"},
|
||||
{"page.htm", "html"},
|
||||
{"data.xml", "xml"},
|
||||
{"style.css", "css"},
|
||||
{"style.scss", "scss"},
|
||||
{"style.sass", "sass"},
|
||||
{"style.less", "less"},
|
||||
{"config.json", "json"},
|
||||
{"config.yaml", "yaml"},
|
||||
{"config.yml", "yaml"},
|
||||
{"data.toml", "toml"},
|
||||
{"page.md", "markdown"},
|
||||
{"readme.markdown", ""},
|
||||
{"doc.rst", "rst"},
|
||||
{"book.tex", "latex"},
|
||||
|
||||
// Configuration files
|
||||
{"Dockerfile", ""},
|
||||
{"Makefile", ""},
|
||||
{"GNUmakefile", ""},
|
||||
|
||||
// Case sensitivity tests
|
||||
{"MAIN.GO", "go"},
|
||||
{"SCRIPT.PY", "python"},
|
||||
{"APP.JS", "javascript"},
|
||||
|
||||
// Unknown extensions
|
||||
{"unknown.xyz", ""},
|
||||
{"file.unknown", ""},
|
||||
{"noextension", ""},
|
||||
{"", ""},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.filename, func(t *testing.T) {
|
||||
result := registry.GetLanguage(tt.filename)
|
||||
if result != tt.expected {
|
||||
t.Errorf("GetLanguage(%q) = %q, expected %q", tt.filename, result, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestFileTypeRegistry_ImageDetection tests the image detection functionality.
|
||||
func TestFileTypeRegistry_ImageDetection(t *testing.T) {
|
||||
registry := GetDefaultRegistry()
|
||||
|
||||
tests := []struct {
|
||||
filename string
|
||||
expected bool
|
||||
}{
|
||||
// Common image formats
|
||||
{"photo.png", true},
|
||||
{"image.jpg", true},
|
||||
{"picture.jpeg", true},
|
||||
{"animation.gif", true},
|
||||
{"bitmap.bmp", true},
|
||||
{"image.tiff", true},
|
||||
{"scan.tif", true},
|
||||
{"vector.svg", true},
|
||||
{"modern.webp", true},
|
||||
{"favicon.ico", true},
|
||||
|
||||
// Case sensitivity tests
|
||||
{"PHOTO.PNG", true},
|
||||
{"IMAGE.JPG", true},
|
||||
{"PICTURE.JPEG", true},
|
||||
|
||||
// Non-image files
|
||||
{"document.txt", false},
|
||||
{"script.js", false},
|
||||
{"data.json", false},
|
||||
{"archive.zip", false},
|
||||
{"executable.exe", false},
|
||||
|
||||
// Edge cases
|
||||
{"", false}, // Empty filename
|
||||
{"image", false}, // No extension
|
||||
{".png", true}, // Just extension
|
||||
{"file.png.bak", false}, // Multiple extensions
|
||||
{"image.unknown", false}, // Unknown extension
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.filename, func(t *testing.T) {
|
||||
result := registry.IsImage(tt.filename)
|
||||
if result != tt.expected {
|
||||
t.Errorf("IsImage(%q) = %t, expected %t", tt.filename, result, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestFileTypeRegistry_BinaryDetection tests the binary detection functionality.
|
||||
func TestFileTypeRegistry_BinaryDetection(t *testing.T) {
|
||||
registry := GetDefaultRegistry()
|
||||
|
||||
tests := []struct {
|
||||
filename string
|
||||
expected bool
|
||||
}{
|
||||
// Executable files
|
||||
{"program.exe", true},
|
||||
{"library.dll", true},
|
||||
{"libfoo.so", true},
|
||||
{"framework.dylib", true},
|
||||
{"data.bin", true},
|
||||
|
||||
// Object and library files
|
||||
{"object.o", true},
|
||||
{"archive.a", true},
|
||||
{"library.lib", true},
|
||||
{"application.jar", true},
|
||||
{"bytecode.class", true},
|
||||
{"compiled.pyc", true},
|
||||
{"optimized.pyo", true},
|
||||
|
||||
// System files
|
||||
{".DS_Store", true},
|
||||
|
||||
// Document files (treated as binary)
|
||||
{"document.pdf", true},
|
||||
|
||||
// Archive files
|
||||
{"archive.zip", true},
|
||||
{"backup.tar", true},
|
||||
{"compressed.gz", true},
|
||||
{"data.bz2", true},
|
||||
{"package.xz", true},
|
||||
{"archive.7z", true},
|
||||
{"backup.rar", true},
|
||||
|
||||
// Font files
|
||||
{"font.ttf", true},
|
||||
{"font.otf", true},
|
||||
{"font.woff", true},
|
||||
{"font.woff2", true},
|
||||
|
||||
// Media files (video/audio)
|
||||
{"video.mp4", true},
|
||||
{"movie.avi", true},
|
||||
{"clip.mov", true},
|
||||
{"song.mp3", true},
|
||||
{"audio.wav", true},
|
||||
{"music.flac", true},
|
||||
|
||||
// Case sensitivity tests
|
||||
{"PROGRAM.EXE", true},
|
||||
{"LIBRARY.DLL", true},
|
||||
{"ARCHIVE.ZIP", true},
|
||||
|
||||
// Non-binary files
|
||||
{"document.txt", false},
|
||||
{"script.py", false},
|
||||
{"config.json", false},
|
||||
{"style.css", false},
|
||||
{"page.html", false},
|
||||
|
||||
// Edge cases
|
||||
{"", false}, // Empty filename
|
||||
{"binary", false}, // No extension
|
||||
{".exe", true}, // Just extension
|
||||
{"file.exe.txt", false}, // Multiple extensions
|
||||
{"file.unknown", false}, // Unknown extension
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.filename, func(t *testing.T) {
|
||||
result := registry.IsBinary(tt.filename)
|
||||
if result != tt.expected {
|
||||
t.Errorf("IsBinary(%q) = %t, expected %t", tt.filename, result, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
128
fileproc/filetypes_edge_cases_test.go
Normal file
128
fileproc/filetypes_edge_cases_test.go
Normal file
@@ -0,0 +1,128 @@
|
||||
package fileproc
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestFileTypeRegistry_EdgeCases tests edge cases and boundary conditions.
|
||||
func TestFileTypeRegistry_EdgeCases(t *testing.T) {
|
||||
registry := GetDefaultRegistry()
|
||||
|
||||
// Test various edge cases for filename handling
|
||||
edgeCases := []struct {
|
||||
name string
|
||||
filename string
|
||||
desc string
|
||||
}{
|
||||
{"empty", "", "empty filename"},
|
||||
{"single_char", "a", "single character filename"},
|
||||
{"just_dot", ".", "just a dot"},
|
||||
{"double_dot", "..", "double dot"},
|
||||
{"hidden_file", ".hidden", "hidden file"},
|
||||
{"hidden_with_ext", ".hidden.txt", "hidden file with extension"},
|
||||
{"multiple_dots", "file.tar.gz", "multiple extensions"},
|
||||
{"trailing_dot", "file.", "trailing dot"},
|
||||
{"unicode", "файл.txt", "unicode filename"},
|
||||
{"spaces", "my file.txt", "filename with spaces"},
|
||||
{"special_chars", "file@#$.txt", "filename with special characters"},
|
||||
{"very_long", "very_long_filename_with_many_characters_in_it.extension", "very long filename"},
|
||||
{"no_basename", ".gitignore", "dotfile with no basename"},
|
||||
{"case_mixed", "FiLe.ExT", "mixed case"},
|
||||
}
|
||||
|
||||
for _, tc := range edgeCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// These should not panic
|
||||
_ = registry.IsImage(tc.filename)
|
||||
_ = registry.IsBinary(tc.filename)
|
||||
_ = registry.GetLanguage(tc.filename)
|
||||
|
||||
// Global functions should also not panic
|
||||
_ = IsImage(tc.filename)
|
||||
_ = IsBinary(tc.filename)
|
||||
_ = GetLanguage(tc.filename)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestFileTypeRegistry_MinimumExtensionLength tests the minimum extension length requirement.
|
||||
func TestFileTypeRegistry_MinimumExtensionLength(t *testing.T) {
|
||||
registry := GetDefaultRegistry()
|
||||
|
||||
tests := []struct {
|
||||
filename string
|
||||
expected string
|
||||
}{
|
||||
{"", ""}, // Empty filename
|
||||
{"a", ""}, // Single character (less than minExtensionLength)
|
||||
{"ab", ""}, // Two characters, no extension
|
||||
{"a.b", ""}, // Extension too short, but filename too short anyway
|
||||
{"ab.c", "c"}, // Valid: filename >= minExtensionLength and .c is valid extension
|
||||
{"a.go", "go"}, // Valid extension
|
||||
{"ab.py", "python"}, // Valid extension
|
||||
{"a.unknown", ""}, // Valid length but unknown extension
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.filename, func(t *testing.T) {
|
||||
result := registry.GetLanguage(tt.filename)
|
||||
if result != tt.expected {
|
||||
t.Errorf("GetLanguage(%q) = %q, expected %q", tt.filename, result, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmark tests for performance validation
|
||||
func BenchmarkFileTypeRegistry_IsImage(b *testing.B) {
|
||||
registry := GetDefaultRegistry()
|
||||
filename := "test.png"
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = registry.IsImage(filename)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkFileTypeRegistry_IsBinary(b *testing.B) {
|
||||
registry := GetDefaultRegistry()
|
||||
filename := "test.exe"
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = registry.IsBinary(filename)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkFileTypeRegistry_GetLanguage(b *testing.B) {
|
||||
registry := GetDefaultRegistry()
|
||||
filename := "test.go"
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = registry.GetLanguage(filename)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkFileTypeRegistry_GlobalFunctions(b *testing.B) {
|
||||
filename := "test.go"
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = IsImage(filename)
|
||||
_ = IsBinary(filename)
|
||||
_ = GetLanguage(filename)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkFileTypeRegistry_ConcurrentAccess(b *testing.B) {
|
||||
filename := "test.go"
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
_ = IsImage(filename)
|
||||
_ = IsBinary(filename)
|
||||
_ = GetLanguage(filename)
|
||||
}
|
||||
})
|
||||
}
|
||||
137
fileproc/filetypes_registry_test.go
Normal file
137
fileproc/filetypes_registry_test.go
Normal file
@@ -0,0 +1,137 @@
|
||||
package fileproc
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestFileTypeRegistry_ModificationMethods tests the modification methods of FileTypeRegistry.
|
||||
func TestFileTypeRegistry_ModificationMethods(t *testing.T) {
|
||||
// Create a new registry instance for testing
|
||||
registry := &FileTypeRegistry{
|
||||
imageExts: make(map[string]bool),
|
||||
binaryExts: make(map[string]bool),
|
||||
languageMap: make(map[string]string),
|
||||
}
|
||||
|
||||
// Test AddImageExtension
|
||||
t.Run("AddImageExtension", func(t *testing.T) {
|
||||
// Add a new image extension
|
||||
registry.AddImageExtension(".webp")
|
||||
if !registry.IsImage("test.webp") {
|
||||
t.Errorf("Expected .webp to be recognized as image after adding")
|
||||
}
|
||||
|
||||
// Test case insensitive addition
|
||||
registry.AddImageExtension(".AVIF")
|
||||
if !registry.IsImage("test.avif") {
|
||||
t.Errorf("Expected .avif to be recognized as image after adding .AVIF")
|
||||
}
|
||||
if !registry.IsImage("test.AVIF") {
|
||||
t.Errorf("Expected .AVIF to be recognized as image")
|
||||
}
|
||||
|
||||
// Test with dot prefix
|
||||
registry.AddImageExtension("heic")
|
||||
if registry.IsImage("test.heic") {
|
||||
t.Errorf("Expected extension without dot to not work")
|
||||
}
|
||||
|
||||
// Test with proper dot prefix
|
||||
registry.AddImageExtension(".heic")
|
||||
if !registry.IsImage("test.heic") {
|
||||
t.Errorf("Expected .heic to be recognized as image")
|
||||
}
|
||||
})
|
||||
|
||||
// Test AddBinaryExtension
|
||||
t.Run("AddBinaryExtension", func(t *testing.T) {
|
||||
// Add a new binary extension
|
||||
registry.AddBinaryExtension(".custom")
|
||||
if !registry.IsBinary("file.custom") {
|
||||
t.Errorf("Expected .custom to be recognized as binary after adding")
|
||||
}
|
||||
|
||||
// Test case insensitive addition
|
||||
registry.AddBinaryExtension(".SPECIAL")
|
||||
if !registry.IsBinary("file.special") {
|
||||
t.Errorf("Expected .special to be recognized as binary after adding .SPECIAL")
|
||||
}
|
||||
if !registry.IsBinary("file.SPECIAL") {
|
||||
t.Errorf("Expected .SPECIAL to be recognized as binary")
|
||||
}
|
||||
|
||||
// Test with dot prefix
|
||||
registry.AddBinaryExtension("bin")
|
||||
if registry.IsBinary("file.bin") {
|
||||
t.Errorf("Expected extension without dot to not work")
|
||||
}
|
||||
|
||||
// Test with proper dot prefix
|
||||
registry.AddBinaryExtension(".bin")
|
||||
if !registry.IsBinary("file.bin") {
|
||||
t.Errorf("Expected .bin to be recognized as binary")
|
||||
}
|
||||
})
|
||||
|
||||
// Test AddLanguageMapping
|
||||
t.Run("AddLanguageMapping", func(t *testing.T) {
|
||||
// Add a new language mapping
|
||||
registry.AddLanguageMapping(".xyz", "CustomLang")
|
||||
if lang := registry.GetLanguage("file.xyz"); lang != "CustomLang" {
|
||||
t.Errorf("Expected CustomLang, got %s", lang)
|
||||
}
|
||||
|
||||
// Test case insensitive addition
|
||||
registry.AddLanguageMapping(".ABC", "UpperLang")
|
||||
if lang := registry.GetLanguage("file.abc"); lang != "UpperLang" {
|
||||
t.Errorf("Expected UpperLang, got %s", lang)
|
||||
}
|
||||
if lang := registry.GetLanguage("file.ABC"); lang != "UpperLang" {
|
||||
t.Errorf("Expected UpperLang for uppercase, got %s", lang)
|
||||
}
|
||||
|
||||
// Test with dot prefix
|
||||
registry.AddLanguageMapping("nolang", "NoLang")
|
||||
if lang := registry.GetLanguage("file.nolang"); lang == "NoLang" {
|
||||
t.Errorf("Expected extension without dot to not work")
|
||||
}
|
||||
|
||||
// Test with proper dot prefix
|
||||
registry.AddLanguageMapping(".nolang", "NoLang")
|
||||
if lang := registry.GetLanguage("file.nolang"); lang != "NoLang" {
|
||||
t.Errorf("Expected NoLang, got %s", lang)
|
||||
}
|
||||
|
||||
// Test overriding existing mapping
|
||||
registry.AddLanguageMapping(".xyz", "NewCustomLang")
|
||||
if lang := registry.GetLanguage("file.xyz"); lang != "NewCustomLang" {
|
||||
t.Errorf("Expected NewCustomLang after override, got %s", lang)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// TestFileTypeRegistry_DefaultRegistryConsistency tests default registry behavior.
|
||||
func TestFileTypeRegistry_DefaultRegistryConsistency(t *testing.T) {
|
||||
registry := GetDefaultRegistry()
|
||||
|
||||
// Test that registry methods work consistently
|
||||
if !registry.IsImage("test.png") {
|
||||
t.Error("Expected .png to be recognized as image")
|
||||
}
|
||||
if !registry.IsBinary("test.exe") {
|
||||
t.Error("Expected .exe to be recognized as binary")
|
||||
}
|
||||
if lang := registry.GetLanguage("test.go"); lang != "go" {
|
||||
t.Errorf("Expected go, got %s", lang)
|
||||
}
|
||||
|
||||
// Test that multiple calls return consistent results
|
||||
for i := 0; i < 5; i++ {
|
||||
if !registry.IsImage("test.jpg") {
|
||||
t.Errorf("Iteration %d: Expected .jpg to be recognized as image", i)
|
||||
}
|
||||
if registry.IsBinary("test.txt") {
|
||||
t.Errorf("Iteration %d: Expected .txt to not be recognized as binary", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,827 +0,0 @@
|
||||
package fileproc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestFileTypeRegistry_ModificationMethods tests the modification methods of FileTypeRegistry.
|
||||
func TestFileTypeRegistry_ModificationMethods(t *testing.T) {
|
||||
// Create a new registry instance for testing
|
||||
registry := &FileTypeRegistry{
|
||||
imageExts: make(map[string]bool),
|
||||
binaryExts: make(map[string]bool),
|
||||
languageMap: make(map[string]string),
|
||||
}
|
||||
|
||||
// Test AddImageExtension
|
||||
t.Run("AddImageExtension", func(t *testing.T) {
|
||||
// Add a new image extension
|
||||
registry.AddImageExtension(".webp")
|
||||
if !registry.IsImage("test.webp") {
|
||||
t.Errorf("Expected .webp to be recognized as image after adding")
|
||||
}
|
||||
|
||||
// Test case insensitive addition
|
||||
registry.AddImageExtension(".AVIF")
|
||||
if !registry.IsImage("test.avif") {
|
||||
t.Errorf("Expected .avif to be recognized as image after adding .AVIF")
|
||||
}
|
||||
if !registry.IsImage("test.AVIF") {
|
||||
t.Errorf("Expected .AVIF to be recognized as image")
|
||||
}
|
||||
|
||||
// Test with dot prefix
|
||||
registry.AddImageExtension("heic")
|
||||
if registry.IsImage("test.heic") {
|
||||
t.Errorf("Expected extension without dot to not work")
|
||||
}
|
||||
|
||||
// Test with proper dot prefix
|
||||
registry.AddImageExtension(".heic")
|
||||
if !registry.IsImage("test.heic") {
|
||||
t.Errorf("Expected .heic to be recognized as image")
|
||||
}
|
||||
})
|
||||
|
||||
// Test AddBinaryExtension
|
||||
t.Run("AddBinaryExtension", func(t *testing.T) {
|
||||
// Add a new binary extension
|
||||
registry.AddBinaryExtension(".custom")
|
||||
if !registry.IsBinary("test.custom") {
|
||||
t.Errorf("Expected .custom to be recognized as binary after adding")
|
||||
}
|
||||
|
||||
// Test case insensitive addition
|
||||
registry.AddBinaryExtension(".NEWBIN")
|
||||
if !registry.IsBinary("test.newbin") {
|
||||
t.Errorf("Expected .newbin to be recognized as binary after adding .NEWBIN")
|
||||
}
|
||||
if !registry.IsBinary("test.NEWBIN") {
|
||||
t.Errorf("Expected .NEWBIN to be recognized as binary")
|
||||
}
|
||||
|
||||
// Test overwriting existing extension
|
||||
registry.AddBinaryExtension(".custom")
|
||||
if !registry.IsBinary("test.custom") {
|
||||
t.Errorf("Expected .custom to still be recognized as binary after re-adding")
|
||||
}
|
||||
})
|
||||
|
||||
// Test AddLanguageMapping
|
||||
t.Run("AddLanguageMapping", func(t *testing.T) {
|
||||
// Add a new language mapping
|
||||
registry.AddLanguageMapping(".zig", "zig")
|
||||
if registry.GetLanguage("test.zig") != "zig" {
|
||||
t.Errorf("Expected .zig to map to 'zig', got '%s'", registry.GetLanguage("test.zig"))
|
||||
}
|
||||
|
||||
// Test case insensitive addition
|
||||
registry.AddLanguageMapping(".V", "vlang")
|
||||
if registry.GetLanguage("test.v") != "vlang" {
|
||||
t.Errorf("Expected .v to map to 'vlang' after adding .V, got '%s'", registry.GetLanguage("test.v"))
|
||||
}
|
||||
if registry.GetLanguage("test.V") != "vlang" {
|
||||
t.Errorf("Expected .V to map to 'vlang', got '%s'", registry.GetLanguage("test.V"))
|
||||
}
|
||||
|
||||
// Test overwriting existing mapping
|
||||
registry.AddLanguageMapping(".zig", "ziglang")
|
||||
if registry.GetLanguage("test.zig") != "ziglang" {
|
||||
t.Errorf("Expected .zig to map to 'ziglang' after update, got '%s'", registry.GetLanguage("test.zig"))
|
||||
}
|
||||
|
||||
// Test empty language
|
||||
registry.AddLanguageMapping(".empty", "")
|
||||
if registry.GetLanguage("test.empty") != "" {
|
||||
t.Errorf("Expected .empty to map to empty string, got '%s'", registry.GetLanguage("test.empty"))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// TestFileTypeRegistry_LanguageDetection tests the language detection functionality.
|
||||
func TestFileTypeRegistry_LanguageDetection(t *testing.T) {
|
||||
registry := GetDefaultRegistry()
|
||||
|
||||
tests := []struct {
|
||||
filename string
|
||||
expected string
|
||||
}{
|
||||
// Programming languages
|
||||
{"main.go", "go"},
|
||||
{"script.py", "python"},
|
||||
{"app.js", "javascript"},
|
||||
{"component.tsx", "typescript"},
|
||||
{"service.ts", "typescript"},
|
||||
{"App.java", "java"},
|
||||
{"program.c", "c"},
|
||||
{"program.cpp", "cpp"},
|
||||
{"header.h", "c"},
|
||||
{"header.hpp", "cpp"},
|
||||
{"main.rs", "rust"},
|
||||
{"script.rb", "ruby"},
|
||||
{"index.php", "php"},
|
||||
{"app.swift", "swift"},
|
||||
{"MainActivity.kt", "kotlin"},
|
||||
{"Main.scala", "scala"},
|
||||
{"analysis.r", "r"},
|
||||
{"ViewController.m", "objc"},
|
||||
{"ViewController.mm", "objcpp"},
|
||||
{"Program.cs", "csharp"},
|
||||
{"Module.vb", "vbnet"},
|
||||
{"program.fs", "fsharp"},
|
||||
{"script.lua", "lua"},
|
||||
{"script.pl", "perl"},
|
||||
|
||||
// Shell scripts
|
||||
{"script.sh", "bash"},
|
||||
{"script.bash", "bash"},
|
||||
{"script.zsh", "zsh"},
|
||||
{"script.fish", "fish"},
|
||||
{"script.ps1", "powershell"},
|
||||
{"script.bat", "batch"},
|
||||
{"script.cmd", "batch"},
|
||||
|
||||
// Data and markup
|
||||
{"query.sql", "sql"},
|
||||
{"index.html", "html"},
|
||||
{"page.htm", "html"},
|
||||
{"data.xml", "xml"},
|
||||
{"style.css", "css"},
|
||||
{"style.scss", "scss"},
|
||||
{"style.sass", "sass"},
|
||||
{"style.less", "less"},
|
||||
{"data.json", "json"},
|
||||
{"config.yaml", "yaml"},
|
||||
{"config.yml", "yaml"},
|
||||
{"config.toml", "toml"},
|
||||
{"README.md", "markdown"},
|
||||
{"doc.rst", "rst"},
|
||||
{"paper.tex", "latex"},
|
||||
|
||||
// Modern languages
|
||||
{"main.dart", "dart"},
|
||||
{"Main.elm", "elm"},
|
||||
{"core.clj", "clojure"},
|
||||
{"server.ex", "elixir"},
|
||||
{"test.exs", "elixir"},
|
||||
{"server.erl", "erlang"},
|
||||
{"header.hrl", "erlang"},
|
||||
{"main.hs", "haskell"},
|
||||
{"module.ml", "ocaml"},
|
||||
{"interface.mli", "ocaml"},
|
||||
{"main.nim", "nim"},
|
||||
{"config.nims", "nim"},
|
||||
|
||||
// Web frameworks
|
||||
{"Component.vue", "vue"},
|
||||
{"Component.jsx", "javascript"},
|
||||
|
||||
// Case sensitivity tests
|
||||
{"MAIN.GO", "go"},
|
||||
{"Script.PY", "python"},
|
||||
{"APP.JS", "javascript"},
|
||||
|
||||
// Edge cases
|
||||
{"", ""}, // Empty filename
|
||||
{"a", ""}, // Too short (less than minExtensionLength)
|
||||
{"noext", ""}, // No extension
|
||||
{".hidden", ""}, // Hidden file with no name
|
||||
{"file.", ""}, // Extension is just a dot
|
||||
{"file.unknown", ""}, // Unknown extension
|
||||
{"file.123", ""}, // Numeric extension
|
||||
{"a.b", ""}, // Very short filename and extension
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.filename, func(t *testing.T) {
|
||||
result := registry.GetLanguage(tt.filename)
|
||||
if result != tt.expected {
|
||||
t.Errorf("GetLanguage(%q) = %q, expected %q", tt.filename, result, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestFileTypeRegistry_ImageDetection tests the image detection functionality.
|
||||
func TestFileTypeRegistry_ImageDetection(t *testing.T) {
|
||||
registry := GetDefaultRegistry()
|
||||
|
||||
tests := []struct {
|
||||
filename string
|
||||
expected bool
|
||||
}{
|
||||
// Common image formats
|
||||
{"photo.png", true},
|
||||
{"image.jpg", true},
|
||||
{"picture.jpeg", true},
|
||||
{"animation.gif", true},
|
||||
{"bitmap.bmp", true},
|
||||
{"image.tiff", true},
|
||||
{"scan.tif", true},
|
||||
{"vector.svg", true},
|
||||
{"modern.webp", true},
|
||||
{"favicon.ico", true},
|
||||
|
||||
// Case sensitivity tests
|
||||
{"PHOTO.PNG", true},
|
||||
{"IMAGE.JPG", true},
|
||||
{"PICTURE.JPEG", true},
|
||||
|
||||
// Non-image files
|
||||
{"document.txt", false},
|
||||
{"script.js", false},
|
||||
{"data.json", false},
|
||||
{"archive.zip", false},
|
||||
{"executable.exe", false},
|
||||
|
||||
// Edge cases
|
||||
{"", false}, // Empty filename
|
||||
{"image", false}, // No extension
|
||||
{".png", true}, // Just extension
|
||||
{"file.png.bak", false}, // Multiple extensions
|
||||
{"image.unknown", false}, // Unknown extension
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.filename, func(t *testing.T) {
|
||||
result := registry.IsImage(tt.filename)
|
||||
if result != tt.expected {
|
||||
t.Errorf("IsImage(%q) = %t, expected %t", tt.filename, result, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestFileTypeRegistry_BinaryDetection tests the binary detection functionality.
|
||||
func TestFileTypeRegistry_BinaryDetection(t *testing.T) {
|
||||
registry := GetDefaultRegistry()
|
||||
|
||||
tests := []struct {
|
||||
filename string
|
||||
expected bool
|
||||
}{
|
||||
// Executable files
|
||||
{"program.exe", true},
|
||||
{"library.dll", true},
|
||||
{"libfoo.so", true},
|
||||
{"framework.dylib", true},
|
||||
{"data.bin", true},
|
||||
|
||||
// Object and library files
|
||||
{"object.o", true},
|
||||
{"archive.a", true},
|
||||
{"library.lib", true},
|
||||
{"application.jar", true},
|
||||
{"bytecode.class", true},
|
||||
{"compiled.pyc", true},
|
||||
{"optimized.pyo", true},
|
||||
|
||||
// System files
|
||||
{".DS_Store", true},
|
||||
|
||||
// Document files (treated as binary)
|
||||
{"document.pdf", true},
|
||||
|
||||
// Archive files
|
||||
{"archive.zip", true},
|
||||
{"backup.tar", true},
|
||||
{"compressed.gz", true},
|
||||
{"data.bz2", true},
|
||||
{"package.xz", true},
|
||||
{"archive.7z", true},
|
||||
{"backup.rar", true},
|
||||
|
||||
// Font files
|
||||
{"font.ttf", true},
|
||||
{"font.otf", true},
|
||||
{"font.woff", true},
|
||||
{"font.woff2", true},
|
||||
|
||||
// Media files
|
||||
{"song.mp3", true},
|
||||
{"video.mp4", true},
|
||||
{"movie.avi", true},
|
||||
{"clip.mov", true},
|
||||
{"video.wmv", true},
|
||||
{"animation.flv", true},
|
||||
{"modern.webm", true},
|
||||
{"audio.ogg", true},
|
||||
{"sound.wav", true},
|
||||
{"music.flac", true},
|
||||
|
||||
// Database files
|
||||
{"data.dat", true},
|
||||
{"database.db", true},
|
||||
{"app.sqlite", true},
|
||||
|
||||
// Case sensitivity tests
|
||||
{"PROGRAM.EXE", true},
|
||||
{"LIBRARY.DLL", true},
|
||||
|
||||
// Non-binary files
|
||||
{"document.txt", false},
|
||||
{"script.js", false},
|
||||
{"data.json", false},
|
||||
{"style.css", false},
|
||||
{"page.html", false},
|
||||
|
||||
// Edge cases
|
||||
{"", false}, // Empty filename
|
||||
{"binary", false}, // No extension
|
||||
{".exe", true}, // Just extension
|
||||
{"file.exe.bak", false}, // Multiple extensions
|
||||
{"file.unknown", false}, // Unknown extension
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.filename, func(t *testing.T) {
|
||||
result := registry.IsBinary(tt.filename)
|
||||
if result != tt.expected {
|
||||
t.Errorf("IsBinary(%q) = %t, expected %t", tt.filename, result, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestFileTypeRegistry_DefaultRegistryConsistency tests that the default registry is consistent.
|
||||
func TestFileTypeRegistry_DefaultRegistryConsistency(t *testing.T) {
|
||||
// Get registry multiple times and ensure it's the same instance
|
||||
registry1 := GetDefaultRegistry()
|
||||
registry2 := GetDefaultRegistry()
|
||||
registry3 := getRegistry()
|
||||
|
||||
if registry1 != registry2 {
|
||||
t.Error("GetDefaultRegistry() should return the same instance")
|
||||
}
|
||||
if registry1 != registry3 {
|
||||
t.Error("getRegistry() should return the same instance as GetDefaultRegistry()")
|
||||
}
|
||||
|
||||
// Test that global functions use the same registry
|
||||
filename := "test.go"
|
||||
if IsImage(filename) != registry1.IsImage(filename) {
|
||||
t.Error("IsImage() global function should match registry method")
|
||||
}
|
||||
if IsBinary(filename) != registry1.IsBinary(filename) {
|
||||
t.Error("IsBinary() global function should match registry method")
|
||||
}
|
||||
if GetLanguage(filename) != registry1.GetLanguage(filename) {
|
||||
t.Error("GetLanguage() global function should match registry method")
|
||||
}
|
||||
}
|
||||
|
||||
// TestFileTypeRegistry_ThreadSafety tests the thread safety of the FileTypeRegistry.
|
||||
func TestFileTypeRegistry_ThreadSafety(t *testing.T) {
|
||||
const numGoroutines = 100
|
||||
const numOperationsPerGoroutine = 100
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Test concurrent read operations
|
||||
t.Run("ConcurrentReads", func(t *testing.T) {
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
wg.Add(1)
|
||||
go func(id int) {
|
||||
defer wg.Done()
|
||||
registry := GetDefaultRegistry()
|
||||
|
||||
for j := 0; j < numOperationsPerGoroutine; j++ {
|
||||
// Test various file detection operations
|
||||
_ = registry.IsImage("test.png")
|
||||
_ = registry.IsBinary("test.exe")
|
||||
_ = registry.GetLanguage("test.go")
|
||||
|
||||
// Test global functions too
|
||||
_ = IsImage("image.jpg")
|
||||
_ = IsBinary("binary.dll")
|
||||
_ = GetLanguage("script.py")
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
wg.Wait()
|
||||
})
|
||||
|
||||
// Test concurrent registry access (singleton creation)
|
||||
t.Run("ConcurrentRegistryAccess", func(t *testing.T) {
|
||||
// Reset the registry to test concurrent initialization
|
||||
// Note: This is not safe in a real application, but needed for testing
|
||||
registryOnce = sync.Once{}
|
||||
registry = nil
|
||||
|
||||
registries := make([]*FileTypeRegistry, numGoroutines)
|
||||
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
wg.Add(1)
|
||||
go func(id int) {
|
||||
defer wg.Done()
|
||||
registries[id] = GetDefaultRegistry()
|
||||
}(i)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Verify all goroutines got the same registry instance
|
||||
firstRegistry := registries[0]
|
||||
for i := 1; i < numGoroutines; i++ {
|
||||
if registries[i] != firstRegistry {
|
||||
t.Errorf("Registry %d is different from registry 0", i)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// Test concurrent modifications on separate registry instances
|
||||
t.Run("ConcurrentModifications", func(t *testing.T) {
|
||||
// Create separate registry instances for each goroutine to test modification thread safety
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
wg.Add(1)
|
||||
go func(id int) {
|
||||
defer wg.Done()
|
||||
|
||||
// Create a new registry instance for this goroutine
|
||||
registry := &FileTypeRegistry{
|
||||
imageExts: make(map[string]bool),
|
||||
binaryExts: make(map[string]bool),
|
||||
languageMap: make(map[string]string),
|
||||
}
|
||||
|
||||
for j := 0; j < numOperationsPerGoroutine; j++ {
|
||||
// Add unique extensions for this goroutine
|
||||
extSuffix := fmt.Sprintf("_%d_%d", id, j)
|
||||
|
||||
registry.AddImageExtension(".img" + extSuffix)
|
||||
registry.AddBinaryExtension(".bin" + extSuffix)
|
||||
registry.AddLanguageMapping(".lang"+extSuffix, "lang"+extSuffix)
|
||||
|
||||
// Verify the additions worked
|
||||
if !registry.IsImage("test.img" + extSuffix) {
|
||||
t.Errorf("Failed to add image extension .img%s", extSuffix)
|
||||
}
|
||||
if !registry.IsBinary("test.bin" + extSuffix) {
|
||||
t.Errorf("Failed to add binary extension .bin%s", extSuffix)
|
||||
}
|
||||
if registry.GetLanguage("test.lang"+extSuffix) != "lang"+extSuffix {
|
||||
t.Errorf("Failed to add language mapping .lang%s", extSuffix)
|
||||
}
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
wg.Wait()
|
||||
})
|
||||
}
|
||||
|
||||
// TestFileTypeRegistry_EdgeCases tests edge cases and boundary conditions.
|
||||
func TestFileTypeRegistry_EdgeCases(t *testing.T) {
|
||||
registry := GetDefaultRegistry()
|
||||
|
||||
// Test various edge cases for filename handling
|
||||
edgeCases := []struct {
|
||||
name string
|
||||
filename string
|
||||
desc string
|
||||
}{
|
||||
{"empty", "", "empty filename"},
|
||||
{"single_char", "a", "single character filename"},
|
||||
{"just_dot", ".", "just a dot"},
|
||||
{"double_dot", "..", "double dot"},
|
||||
{"hidden_file", ".hidden", "hidden file"},
|
||||
{"hidden_with_ext", ".hidden.txt", "hidden file with extension"},
|
||||
{"multiple_dots", "file.tar.gz", "multiple extensions"},
|
||||
{"trailing_dot", "file.", "trailing dot"},
|
||||
{"unicode", "файл.txt", "unicode filename"},
|
||||
{"spaces", "my file.txt", "filename with spaces"},
|
||||
{"special_chars", "file@#$.txt", "filename with special characters"},
|
||||
{"very_long", "very_long_filename_with_many_characters_in_it.extension", "very long filename"},
|
||||
{"no_basename", ".gitignore", "dotfile with no basename"},
|
||||
{"case_mixed", "FiLe.ExT", "mixed case"},
|
||||
}
|
||||
|
||||
for _, tc := range edgeCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// These should not panic
|
||||
_ = registry.IsImage(tc.filename)
|
||||
_ = registry.IsBinary(tc.filename)
|
||||
_ = registry.GetLanguage(tc.filename)
|
||||
|
||||
// Global functions should also not panic
|
||||
_ = IsImage(tc.filename)
|
||||
_ = IsBinary(tc.filename)
|
||||
_ = GetLanguage(tc.filename)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestFileTypeRegistry_MinimumExtensionLength tests the minimum extension length requirement.
|
||||
func TestFileTypeRegistry_MinimumExtensionLength(t *testing.T) {
|
||||
registry := GetDefaultRegistry()
|
||||
|
||||
tests := []struct {
|
||||
filename string
|
||||
expected string
|
||||
}{
|
||||
{"", ""}, // Empty filename
|
||||
{"a", ""}, // Single character (less than minExtensionLength)
|
||||
{"ab", ""}, // Two characters, no extension
|
||||
{"a.b", ""}, // Extension too short, but filename too short anyway
|
||||
{"ab.c", "c"}, // Valid: filename >= minExtensionLength and .c is valid extension
|
||||
{"a.go", "go"}, // Valid extension
|
||||
{"ab.py", "python"}, // Valid extension
|
||||
{"a.unknown", ""}, // Valid length but unknown extension
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.filename, func(t *testing.T) {
|
||||
result := registry.GetLanguage(tt.filename)
|
||||
if result != tt.expected {
|
||||
t.Errorf("GetLanguage(%q) = %q, expected %q", tt.filename, result, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkFileTypeRegistry tests performance of the registry operations.
|
||||
func BenchmarkFileTypeRegistry_IsImage(b *testing.B) {
|
||||
registry := GetDefaultRegistry()
|
||||
filename := "test.png"
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = registry.IsImage(filename)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkFileTypeRegistry_IsBinary(b *testing.B) {
|
||||
registry := GetDefaultRegistry()
|
||||
filename := "test.exe"
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = registry.IsBinary(filename)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkFileTypeRegistry_GetLanguage(b *testing.B) {
|
||||
registry := GetDefaultRegistry()
|
||||
filename := "test.go"
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = registry.GetLanguage(filename)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkFileTypeRegistry_GlobalFunctions(b *testing.B) {
|
||||
filename := "test.go"
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = IsImage(filename)
|
||||
_ = IsBinary(filename)
|
||||
_ = GetLanguage(filename)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkFileTypeRegistry_ConcurrentAccess(b *testing.B) {
|
||||
filename := "test.go"
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
_ = IsImage(filename)
|
||||
_ = IsBinary(filename)
|
||||
_ = GetLanguage(filename)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// TestFileTypeRegistry_Configuration tests the configuration functionality.
|
||||
func TestFileTypeRegistry_Configuration(t *testing.T) {
|
||||
// Create a new registry instance for testing
|
||||
registry := &FileTypeRegistry{
|
||||
imageExts: make(map[string]bool),
|
||||
binaryExts: make(map[string]bool),
|
||||
languageMap: make(map[string]string),
|
||||
}
|
||||
|
||||
// Test ApplyCustomExtensions
|
||||
t.Run("ApplyCustomExtensions", func(t *testing.T) {
|
||||
customImages := []string{".webp", ".avif", ".heic"}
|
||||
customBinary := []string{".custom", ".mybin"}
|
||||
customLanguages := map[string]string{
|
||||
".zig": "zig",
|
||||
".odin": "odin",
|
||||
".v": "vlang",
|
||||
}
|
||||
|
||||
registry.ApplyCustomExtensions(customImages, customBinary, customLanguages)
|
||||
|
||||
// Test custom image extensions
|
||||
for _, ext := range customImages {
|
||||
if !registry.IsImage("test" + ext) {
|
||||
t.Errorf("Expected %s to be recognized as image", ext)
|
||||
}
|
||||
}
|
||||
|
||||
// Test custom binary extensions
|
||||
for _, ext := range customBinary {
|
||||
if !registry.IsBinary("test" + ext) {
|
||||
t.Errorf("Expected %s to be recognized as binary", ext)
|
||||
}
|
||||
}
|
||||
|
||||
// Test custom language mappings
|
||||
for ext, expectedLang := range customLanguages {
|
||||
if lang := registry.GetLanguage("test" + ext); lang != expectedLang {
|
||||
t.Errorf("Expected %s to map to %s, got %s", ext, expectedLang, lang)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// Test DisableExtensions
|
||||
t.Run("DisableExtensions", func(t *testing.T) {
|
||||
// Add some extensions first
|
||||
registry.AddImageExtension(".png")
|
||||
registry.AddImageExtension(".jpg")
|
||||
registry.AddBinaryExtension(".exe")
|
||||
registry.AddBinaryExtension(".dll")
|
||||
registry.AddLanguageMapping(".go", "go")
|
||||
registry.AddLanguageMapping(".py", "python")
|
||||
|
||||
// Verify they work
|
||||
if !registry.IsImage("test.png") {
|
||||
t.Error("Expected .png to be image before disabling")
|
||||
}
|
||||
if !registry.IsBinary("test.exe") {
|
||||
t.Error("Expected .exe to be binary before disabling")
|
||||
}
|
||||
if registry.GetLanguage("test.go") != "go" {
|
||||
t.Error("Expected .go to map to go before disabling")
|
||||
}
|
||||
|
||||
// Disable some extensions
|
||||
disabledImages := []string{".png"}
|
||||
disabledBinary := []string{".exe"}
|
||||
disabledLanguages := []string{".go"}
|
||||
|
||||
registry.DisableExtensions(disabledImages, disabledBinary, disabledLanguages)
|
||||
|
||||
// Test that disabled extensions no longer work
|
||||
if registry.IsImage("test.png") {
|
||||
t.Error("Expected .png to not be image after disabling")
|
||||
}
|
||||
if registry.IsBinary("test.exe") {
|
||||
t.Error("Expected .exe to not be binary after disabling")
|
||||
}
|
||||
if registry.GetLanguage("test.go") != "" {
|
||||
t.Error("Expected .go to not map to language after disabling")
|
||||
}
|
||||
|
||||
// Test that non-disabled extensions still work
|
||||
if !registry.IsImage("test.jpg") {
|
||||
t.Error("Expected .jpg to still be image after disabling .png")
|
||||
}
|
||||
if !registry.IsBinary("test.dll") {
|
||||
t.Error("Expected .dll to still be binary after disabling .exe")
|
||||
}
|
||||
if registry.GetLanguage("test.py") != "python" {
|
||||
t.Error("Expected .py to still map to python after disabling .go")
|
||||
}
|
||||
})
|
||||
|
||||
// Test empty values handling
|
||||
t.Run("EmptyValuesHandling", func(t *testing.T) {
|
||||
registry := &FileTypeRegistry{
|
||||
imageExts: make(map[string]bool),
|
||||
binaryExts: make(map[string]bool),
|
||||
languageMap: make(map[string]string),
|
||||
}
|
||||
|
||||
// Test with empty values
|
||||
customImages := []string{"", ".valid", ""}
|
||||
customBinary := []string{"", ".valid"}
|
||||
customLanguages := map[string]string{
|
||||
"": "invalid",
|
||||
".valid": "",
|
||||
".good": "good",
|
||||
}
|
||||
|
||||
registry.ApplyCustomExtensions(customImages, customBinary, customLanguages)
|
||||
|
||||
// Only valid entries should be added
|
||||
if registry.IsImage("test.") {
|
||||
t.Error("Expected empty extension to not be added as image")
|
||||
}
|
||||
if !registry.IsImage("test.valid") {
|
||||
t.Error("Expected .valid to be added as image")
|
||||
}
|
||||
if registry.IsBinary("test.") {
|
||||
t.Error("Expected empty extension to not be added as binary")
|
||||
}
|
||||
if !registry.IsBinary("test.valid") {
|
||||
t.Error("Expected .valid to be added as binary")
|
||||
}
|
||||
if registry.GetLanguage("test.") != "" {
|
||||
t.Error("Expected empty extension to not be added as language")
|
||||
}
|
||||
if registry.GetLanguage("test.valid") != "" {
|
||||
t.Error("Expected .valid with empty language to not be added")
|
||||
}
|
||||
if registry.GetLanguage("test.good") != "good" {
|
||||
t.Error("Expected .good to map to good")
|
||||
}
|
||||
})
|
||||
|
||||
// Test case insensitive handling
|
||||
t.Run("CaseInsensitiveHandling", func(t *testing.T) {
|
||||
registry := &FileTypeRegistry{
|
||||
imageExts: make(map[string]bool),
|
||||
binaryExts: make(map[string]bool),
|
||||
languageMap: make(map[string]string),
|
||||
}
|
||||
|
||||
customImages := []string{".WEBP", ".Avif"}
|
||||
customBinary := []string{".CUSTOM", ".MyBin"}
|
||||
customLanguages := map[string]string{
|
||||
".ZIG": "zig",
|
||||
".Odin": "odin",
|
||||
}
|
||||
|
||||
registry.ApplyCustomExtensions(customImages, customBinary, customLanguages)
|
||||
|
||||
// Test that both upper and lower case work
|
||||
if !registry.IsImage("test.webp") {
|
||||
t.Error("Expected .webp (lowercase) to work after adding .WEBP")
|
||||
}
|
||||
if !registry.IsImage("test.WEBP") {
|
||||
t.Error("Expected .WEBP (uppercase) to work")
|
||||
}
|
||||
if !registry.IsBinary("test.custom") {
|
||||
t.Error("Expected .custom (lowercase) to work after adding .CUSTOM")
|
||||
}
|
||||
if !registry.IsBinary("test.CUSTOM") {
|
||||
t.Error("Expected .CUSTOM (uppercase) to work")
|
||||
}
|
||||
if registry.GetLanguage("test.zig") != "zig" {
|
||||
t.Error("Expected .zig (lowercase) to work after adding .ZIG")
|
||||
}
|
||||
if registry.GetLanguage("test.ZIG") != "zig" {
|
||||
t.Error("Expected .ZIG (uppercase) to work")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// TestConfigureFromSettings tests the global configuration function.
|
||||
func TestConfigureFromSettings(t *testing.T) {
|
||||
// Reset registry to ensure clean state
|
||||
registryOnce = sync.Once{}
|
||||
registry = nil
|
||||
|
||||
// Test configuration application
|
||||
customImages := []string{".webp", ".avif"}
|
||||
customBinary := []string{".custom"}
|
||||
customLanguages := map[string]string{".zig": "zig"}
|
||||
disabledImages := []string{".gif"} // Disable default extension
|
||||
disabledBinary := []string{".exe"} // Disable default extension
|
||||
disabledLanguages := []string{".rb"} // Disable default extension
|
||||
|
||||
ConfigureFromSettings(
|
||||
customImages,
|
||||
customBinary,
|
||||
customLanguages,
|
||||
disabledImages,
|
||||
disabledBinary,
|
||||
disabledLanguages,
|
||||
)
|
||||
|
||||
// Test that custom extensions work
|
||||
if !IsImage("test.webp") {
|
||||
t.Error("Expected custom image extension .webp to work")
|
||||
}
|
||||
if !IsBinary("test.custom") {
|
||||
t.Error("Expected custom binary extension .custom to work")
|
||||
}
|
||||
if GetLanguage("test.zig") != "zig" {
|
||||
t.Error("Expected custom language .zig to work")
|
||||
}
|
||||
|
||||
// Test that disabled extensions don't work
|
||||
if IsImage("test.gif") {
|
||||
t.Error("Expected disabled image extension .gif to not work")
|
||||
}
|
||||
if IsBinary("test.exe") {
|
||||
t.Error("Expected disabled binary extension .exe to not work")
|
||||
}
|
||||
if GetLanguage("test.rb") != "" {
|
||||
t.Error("Expected disabled language extension .rb to not work")
|
||||
}
|
||||
|
||||
// Test that non-disabled defaults still work
|
||||
if !IsImage("test.png") {
|
||||
t.Error("Expected non-disabled image extension .png to still work")
|
||||
}
|
||||
if !IsBinary("test.dll") {
|
||||
t.Error("Expected non-disabled binary extension .dll to still work")
|
||||
}
|
||||
if GetLanguage("test.go") != "go" {
|
||||
t.Error("Expected non-disabled language extension .go to still work")
|
||||
}
|
||||
}
|
||||
@@ -31,9 +31,9 @@ func (w *JSONWriter) Start(prefix, suffix string) error {
|
||||
}
|
||||
|
||||
// Write escaped prefix
|
||||
escapedPrefix := escapeJSONString(prefix)
|
||||
if _, err := w.outFile.WriteString(escapedPrefix); err != nil {
|
||||
return utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIOWrite, "failed to write JSON prefix")
|
||||
escapedPrefix := utils.EscapeForJSON(prefix)
|
||||
if err := utils.WriteWithErrorWrap(w.outFile, escapedPrefix, "failed to write JSON prefix", ""); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := w.outFile.WriteString(`","suffix":"`); err != nil {
|
||||
@@ -41,9 +41,9 @@ func (w *JSONWriter) Start(prefix, suffix string) error {
|
||||
}
|
||||
|
||||
// Write escaped suffix
|
||||
escapedSuffix := escapeJSONString(suffix)
|
||||
if _, err := w.outFile.WriteString(escapedSuffix); err != nil {
|
||||
return utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIOWrite, "failed to write JSON suffix")
|
||||
escapedSuffix := utils.EscapeForJSON(suffix)
|
||||
if err := utils.WriteWithErrorWrap(w.outFile, escapedSuffix, "failed to write JSON suffix", ""); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := w.outFile.WriteString(`","files":[`); err != nil {
|
||||
@@ -79,12 +79,12 @@ func (w *JSONWriter) Close() error {
|
||||
|
||||
// writeStreaming writes a large file as JSON in streaming chunks.
|
||||
func (w *JSONWriter) writeStreaming(req WriteRequest) error {
|
||||
defer w.closeReader(req.Reader, req.Path)
|
||||
defer utils.SafeCloseReader(req.Reader, req.Path)
|
||||
|
||||
language := detectLanguage(req.Path)
|
||||
|
||||
// Write file start
|
||||
escapedPath := escapeJSONString(req.Path)
|
||||
escapedPath := utils.EscapeForJSON(req.Path)
|
||||
if _, err := fmt.Fprintf(w.outFile, `{"path":"%s","language":"%s","content":"`, escapedPath, language); err != nil {
|
||||
return utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIOWrite, "failed to write JSON file start").WithFilePath(req.Path)
|
||||
}
|
||||
@@ -124,43 +124,13 @@ func (w *JSONWriter) writeInline(req WriteRequest) error {
|
||||
|
||||
// streamJSONContent streams content with JSON escaping.
|
||||
func (w *JSONWriter) streamJSONContent(reader io.Reader, path string) error {
|
||||
buf := make([]byte, StreamChunkSize)
|
||||
for {
|
||||
n, err := reader.Read(buf)
|
||||
if n > 0 {
|
||||
escaped := escapeJSONString(string(buf[:n]))
|
||||
if _, writeErr := w.outFile.WriteString(escaped); writeErr != nil {
|
||||
return utils.WrapError(writeErr, utils.ErrorTypeIO, utils.CodeIOWrite, "failed to write JSON chunk").WithFilePath(path)
|
||||
}
|
||||
}
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIORead, "failed to read JSON chunk").WithFilePath(path)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return utils.StreamContent(reader, w.outFile, StreamChunkSize, path, func(chunk []byte) []byte {
|
||||
escaped := utils.EscapeForJSON(string(chunk))
|
||||
return []byte(escaped)
|
||||
})
|
||||
}
|
||||
|
||||
// closeReader safely closes a reader if it implements io.Closer.
|
||||
func (w *JSONWriter) closeReader(reader io.Reader, path string) {
|
||||
if closer, ok := reader.(io.Closer); ok {
|
||||
if err := closer.Close(); err != nil {
|
||||
utils.LogError(
|
||||
"Failed to close file reader",
|
||||
utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIOClose, "failed to close file reader").WithFilePath(path),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// escapeJSONString escapes a string for JSON output.
|
||||
func escapeJSONString(s string) string {
|
||||
// Use json.Marshal to properly escape the string, then remove the quotes
|
||||
escaped, _ := json.Marshal(s)
|
||||
return string(escaped[1 : len(escaped)-1]) // Remove surrounding quotes
|
||||
}
|
||||
|
||||
// startJSONWriter handles JSON format output with streaming support.
|
||||
func startJSONWriter(outFile *os.File, writeCh <-chan WriteRequest, done chan<- struct{}, prefix, suffix string) {
|
||||
|
||||
@@ -1,423 +0,0 @@
|
||||
// Package fileproc provides resource monitoring and limit enforcement for security.
|
||||
package fileproc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/ivuorinen/gibidify/config"
|
||||
"github.com/ivuorinen/gibidify/utils"
|
||||
)
|
||||
|
||||
// ResourceMonitor monitors resource usage and enforces limits to prevent DoS attacks.
|
||||
type ResourceMonitor struct {
|
||||
enabled bool
|
||||
maxFiles int
|
||||
maxTotalSize int64
|
||||
fileProcessingTimeout time.Duration
|
||||
overallTimeout time.Duration
|
||||
maxConcurrentReads int
|
||||
rateLimitFilesPerSec int
|
||||
hardMemoryLimitMB int
|
||||
enableGracefulDegr bool
|
||||
enableResourceMon bool
|
||||
|
||||
// Current state tracking
|
||||
filesProcessed int64
|
||||
totalSizeProcessed int64
|
||||
concurrentReads int64
|
||||
startTime time.Time
|
||||
lastRateLimitCheck time.Time
|
||||
hardMemoryLimitBytes int64
|
||||
|
||||
// Rate limiting
|
||||
rateLimiter *time.Ticker
|
||||
rateLimitChan chan struct{}
|
||||
|
||||
// Synchronization
|
||||
mu sync.RWMutex
|
||||
violationLogged map[string]bool
|
||||
degradationActive bool
|
||||
emergencyStopRequested bool
|
||||
}
|
||||
|
||||
// ResourceMetrics holds comprehensive resource usage metrics.
|
||||
type ResourceMetrics struct {
|
||||
FilesProcessed int64 `json:"files_processed"`
|
||||
TotalSizeProcessed int64 `json:"total_size_processed"`
|
||||
ConcurrentReads int64 `json:"concurrent_reads"`
|
||||
ProcessingDuration time.Duration `json:"processing_duration"`
|
||||
AverageFileSize float64 `json:"average_file_size"`
|
||||
ProcessingRate float64 `json:"processing_rate_files_per_sec"`
|
||||
MemoryUsageMB int64 `json:"memory_usage_mb"`
|
||||
MaxMemoryUsageMB int64 `json:"max_memory_usage_mb"`
|
||||
ViolationsDetected []string `json:"violations_detected"`
|
||||
DegradationActive bool `json:"degradation_active"`
|
||||
EmergencyStopActive bool `json:"emergency_stop_active"`
|
||||
LastUpdated time.Time `json:"last_updated"`
|
||||
}
|
||||
|
||||
// ResourceViolation represents a detected resource limit violation.
|
||||
type ResourceViolation struct {
|
||||
Type string `json:"type"`
|
||||
Message string `json:"message"`
|
||||
Current interface{} `json:"current"`
|
||||
Limit interface{} `json:"limit"`
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
Context map[string]interface{} `json:"context"`
|
||||
}
|
||||
|
||||
// NewResourceMonitor creates a new resource monitor with configuration.
|
||||
func NewResourceMonitor() *ResourceMonitor {
|
||||
rm := &ResourceMonitor{
|
||||
enabled: config.GetResourceLimitsEnabled(),
|
||||
maxFiles: config.GetMaxFiles(),
|
||||
maxTotalSize: config.GetMaxTotalSize(),
|
||||
fileProcessingTimeout: time.Duration(config.GetFileProcessingTimeoutSec()) * time.Second,
|
||||
overallTimeout: time.Duration(config.GetOverallTimeoutSec()) * time.Second,
|
||||
maxConcurrentReads: config.GetMaxConcurrentReads(),
|
||||
rateLimitFilesPerSec: config.GetRateLimitFilesPerSec(),
|
||||
hardMemoryLimitMB: config.GetHardMemoryLimitMB(),
|
||||
enableGracefulDegr: config.GetEnableGracefulDegradation(),
|
||||
enableResourceMon: config.GetEnableResourceMonitoring(),
|
||||
startTime: time.Now(),
|
||||
lastRateLimitCheck: time.Now(),
|
||||
violationLogged: make(map[string]bool),
|
||||
hardMemoryLimitBytes: int64(config.GetHardMemoryLimitMB()) * 1024 * 1024,
|
||||
}
|
||||
|
||||
// Initialize rate limiter if rate limiting is enabled
|
||||
if rm.enabled && rm.rateLimitFilesPerSec > 0 {
|
||||
interval := time.Second / time.Duration(rm.rateLimitFilesPerSec)
|
||||
rm.rateLimiter = time.NewTicker(interval)
|
||||
rm.rateLimitChan = make(chan struct{}, rm.rateLimitFilesPerSec)
|
||||
|
||||
// Pre-fill the rate limit channel
|
||||
for i := 0; i < rm.rateLimitFilesPerSec; i++ {
|
||||
select {
|
||||
case rm.rateLimitChan <- struct{}{}:
|
||||
default:
|
||||
goto rateLimitFull
|
||||
}
|
||||
}
|
||||
rateLimitFull:
|
||||
|
||||
// Start rate limiter refill goroutine
|
||||
go rm.rateLimiterRefill()
|
||||
}
|
||||
|
||||
return rm
|
||||
}
|
||||
|
||||
// ValidateFileProcessing checks if a file can be processed based on resource limits.
|
||||
func (rm *ResourceMonitor) ValidateFileProcessing(filePath string, fileSize int64) error {
|
||||
if !rm.enabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
rm.mu.RLock()
|
||||
defer rm.mu.RUnlock()
|
||||
|
||||
// Check if emergency stop is active
|
||||
if rm.emergencyStopRequested {
|
||||
return utils.NewStructuredError(
|
||||
utils.ErrorTypeValidation,
|
||||
utils.CodeResourceLimitMemory,
|
||||
"processing stopped due to emergency memory condition",
|
||||
filePath,
|
||||
map[string]interface{}{
|
||||
"emergency_stop_active": true,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// Check file count limit
|
||||
currentFiles := atomic.LoadInt64(&rm.filesProcessed)
|
||||
if int(currentFiles) >= rm.maxFiles {
|
||||
return utils.NewStructuredError(
|
||||
utils.ErrorTypeValidation,
|
||||
utils.CodeResourceLimitFiles,
|
||||
"maximum file count limit exceeded",
|
||||
filePath,
|
||||
map[string]interface{}{
|
||||
"current_files": currentFiles,
|
||||
"max_files": rm.maxFiles,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// Check total size limit
|
||||
currentTotalSize := atomic.LoadInt64(&rm.totalSizeProcessed)
|
||||
if currentTotalSize+fileSize > rm.maxTotalSize {
|
||||
return utils.NewStructuredError(
|
||||
utils.ErrorTypeValidation,
|
||||
utils.CodeResourceLimitTotalSize,
|
||||
"maximum total size limit would be exceeded",
|
||||
filePath,
|
||||
map[string]interface{}{
|
||||
"current_total_size": currentTotalSize,
|
||||
"file_size": fileSize,
|
||||
"max_total_size": rm.maxTotalSize,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// Check overall timeout
|
||||
if time.Since(rm.startTime) > rm.overallTimeout {
|
||||
return utils.NewStructuredError(
|
||||
utils.ErrorTypeValidation,
|
||||
utils.CodeResourceLimitTimeout,
|
||||
"overall processing timeout exceeded",
|
||||
filePath,
|
||||
map[string]interface{}{
|
||||
"processing_duration": time.Since(rm.startTime),
|
||||
"overall_timeout": rm.overallTimeout,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AcquireReadSlot attempts to acquire a slot for concurrent file reading.
|
||||
func (rm *ResourceMonitor) AcquireReadSlot(ctx context.Context) error {
|
||||
if !rm.enabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Wait for available read slot
|
||||
for {
|
||||
currentReads := atomic.LoadInt64(&rm.concurrentReads)
|
||||
if currentReads < int64(rm.maxConcurrentReads) {
|
||||
if atomic.CompareAndSwapInt64(&rm.concurrentReads, currentReads, currentReads+1) {
|
||||
break
|
||||
}
|
||||
// CAS failed, retry
|
||||
continue
|
||||
}
|
||||
|
||||
// Wait and retry
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-time.After(time.Millisecond):
|
||||
// Continue loop
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReleaseReadSlot releases a concurrent reading slot.
|
||||
func (rm *ResourceMonitor) ReleaseReadSlot() {
|
||||
if rm.enabled {
|
||||
atomic.AddInt64(&rm.concurrentReads, -1)
|
||||
}
|
||||
}
|
||||
|
||||
// WaitForRateLimit waits for rate limiting if enabled.
|
||||
func (rm *ResourceMonitor) WaitForRateLimit(ctx context.Context) error {
|
||||
if !rm.enabled || rm.rateLimitFilesPerSec <= 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-rm.rateLimitChan:
|
||||
return nil
|
||||
case <-time.After(time.Second): // Fallback timeout
|
||||
logrus.Warn("Rate limiting timeout exceeded, continuing without rate limit")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// CheckHardMemoryLimit checks if hard memory limit is exceeded and takes action.
|
||||
func (rm *ResourceMonitor) CheckHardMemoryLimit() error {
|
||||
if !rm.enabled || rm.hardMemoryLimitMB <= 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var m runtime.MemStats
|
||||
runtime.ReadMemStats(&m)
|
||||
currentMemory := int64(m.Alloc)
|
||||
|
||||
if currentMemory > rm.hardMemoryLimitBytes {
|
||||
rm.mu.Lock()
|
||||
defer rm.mu.Unlock()
|
||||
|
||||
// Log violation if not already logged
|
||||
violationKey := "hard_memory_limit"
|
||||
if !rm.violationLogged[violationKey] {
|
||||
logrus.Errorf("Hard memory limit exceeded: %dMB > %dMB",
|
||||
currentMemory/1024/1024, rm.hardMemoryLimitMB)
|
||||
rm.violationLogged[violationKey] = true
|
||||
}
|
||||
|
||||
if rm.enableGracefulDegr {
|
||||
// Force garbage collection
|
||||
runtime.GC()
|
||||
|
||||
// Check again after GC
|
||||
runtime.ReadMemStats(&m)
|
||||
currentMemory = int64(m.Alloc)
|
||||
|
||||
if currentMemory > rm.hardMemoryLimitBytes {
|
||||
// Still over limit, activate emergency stop
|
||||
rm.emergencyStopRequested = true
|
||||
return utils.NewStructuredError(
|
||||
utils.ErrorTypeValidation,
|
||||
utils.CodeResourceLimitMemory,
|
||||
"hard memory limit exceeded, emergency stop activated",
|
||||
"",
|
||||
map[string]interface{}{
|
||||
"current_memory_mb": currentMemory / 1024 / 1024,
|
||||
"limit_mb": rm.hardMemoryLimitMB,
|
||||
"emergency_stop": true,
|
||||
},
|
||||
)
|
||||
} else {
|
||||
// Memory freed by GC, continue with degradation
|
||||
rm.degradationActive = true
|
||||
logrus.Info("Memory freed by garbage collection, continuing with degradation mode")
|
||||
}
|
||||
} else {
|
||||
// No graceful degradation, hard stop
|
||||
return utils.NewStructuredError(
|
||||
utils.ErrorTypeValidation,
|
||||
utils.CodeResourceLimitMemory,
|
||||
"hard memory limit exceeded",
|
||||
"",
|
||||
map[string]interface{}{
|
||||
"current_memory_mb": currentMemory / 1024 / 1024,
|
||||
"limit_mb": rm.hardMemoryLimitMB,
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RecordFileProcessed records that a file has been successfully processed.
|
||||
func (rm *ResourceMonitor) RecordFileProcessed(fileSize int64) {
|
||||
if rm.enabled {
|
||||
atomic.AddInt64(&rm.filesProcessed, 1)
|
||||
atomic.AddInt64(&rm.totalSizeProcessed, fileSize)
|
||||
}
|
||||
}
|
||||
|
||||
// GetMetrics returns current resource usage metrics.
|
||||
func (rm *ResourceMonitor) GetMetrics() ResourceMetrics {
|
||||
if !rm.enableResourceMon {
|
||||
return ResourceMetrics{}
|
||||
}
|
||||
|
||||
rm.mu.RLock()
|
||||
defer rm.mu.RUnlock()
|
||||
|
||||
var m runtime.MemStats
|
||||
runtime.ReadMemStats(&m)
|
||||
|
||||
filesProcessed := atomic.LoadInt64(&rm.filesProcessed)
|
||||
totalSize := atomic.LoadInt64(&rm.totalSizeProcessed)
|
||||
duration := time.Since(rm.startTime)
|
||||
|
||||
avgFileSize := float64(0)
|
||||
if filesProcessed > 0 {
|
||||
avgFileSize = float64(totalSize) / float64(filesProcessed)
|
||||
}
|
||||
|
||||
processingRate := float64(0)
|
||||
if duration.Seconds() > 0 {
|
||||
processingRate = float64(filesProcessed) / duration.Seconds()
|
||||
}
|
||||
|
||||
// Collect violations
|
||||
violations := make([]string, 0, len(rm.violationLogged))
|
||||
for violation := range rm.violationLogged {
|
||||
violations = append(violations, violation)
|
||||
}
|
||||
|
||||
return ResourceMetrics{
|
||||
FilesProcessed: filesProcessed,
|
||||
TotalSizeProcessed: totalSize,
|
||||
ConcurrentReads: atomic.LoadInt64(&rm.concurrentReads),
|
||||
ProcessingDuration: duration,
|
||||
AverageFileSize: avgFileSize,
|
||||
ProcessingRate: processingRate,
|
||||
MemoryUsageMB: int64(m.Alloc) / 1024 / 1024,
|
||||
MaxMemoryUsageMB: int64(rm.hardMemoryLimitMB),
|
||||
ViolationsDetected: violations,
|
||||
DegradationActive: rm.degradationActive,
|
||||
EmergencyStopActive: rm.emergencyStopRequested,
|
||||
LastUpdated: time.Now(),
|
||||
}
|
||||
}
|
||||
|
||||
// IsEmergencyStopActive returns whether emergency stop is active.
|
||||
func (rm *ResourceMonitor) IsEmergencyStopActive() bool {
|
||||
rm.mu.RLock()
|
||||
defer rm.mu.RUnlock()
|
||||
return rm.emergencyStopRequested
|
||||
}
|
||||
|
||||
// IsDegradationActive returns whether degradation mode is active.
|
||||
func (rm *ResourceMonitor) IsDegradationActive() bool {
|
||||
rm.mu.RLock()
|
||||
defer rm.mu.RUnlock()
|
||||
return rm.degradationActive
|
||||
}
|
||||
|
||||
// LogResourceInfo logs current resource limit configuration.
|
||||
func (rm *ResourceMonitor) LogResourceInfo() {
|
||||
if rm.enabled {
|
||||
logrus.Infof("Resource limits enabled: maxFiles=%d, maxTotalSize=%dMB, fileTimeout=%ds, overallTimeout=%ds",
|
||||
rm.maxFiles, rm.maxTotalSize/1024/1024, int(rm.fileProcessingTimeout.Seconds()), int(rm.overallTimeout.Seconds()))
|
||||
logrus.Infof("Resource limits: maxConcurrentReads=%d, rateLimitFPS=%d, hardMemoryMB=%d",
|
||||
rm.maxConcurrentReads, rm.rateLimitFilesPerSec, rm.hardMemoryLimitMB)
|
||||
logrus.Infof("Resource features: gracefulDegradation=%v, monitoring=%v",
|
||||
rm.enableGracefulDegr, rm.enableResourceMon)
|
||||
} else {
|
||||
logrus.Info("Resource limits disabled")
|
||||
}
|
||||
}
|
||||
|
||||
// Close cleans up the resource monitor.
|
||||
func (rm *ResourceMonitor) Close() {
|
||||
if rm.rateLimiter != nil {
|
||||
rm.rateLimiter.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
// rateLimiterRefill refills the rate limiting channel periodically.
|
||||
func (rm *ResourceMonitor) rateLimiterRefill() {
|
||||
for range rm.rateLimiter.C {
|
||||
select {
|
||||
case rm.rateLimitChan <- struct{}{}:
|
||||
default:
|
||||
// Channel is full, skip
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// CreateFileProcessingContext creates a context with file processing timeout.
|
||||
func (rm *ResourceMonitor) CreateFileProcessingContext(parent context.Context) (context.Context, context.CancelFunc) {
|
||||
if !rm.enabled || rm.fileProcessingTimeout <= 0 {
|
||||
return parent, func() {}
|
||||
}
|
||||
return context.WithTimeout(parent, rm.fileProcessingTimeout)
|
||||
}
|
||||
|
||||
// CreateOverallProcessingContext creates a context with overall processing timeout.
|
||||
func (rm *ResourceMonitor) CreateOverallProcessingContext(parent context.Context) (context.Context, context.CancelFunc) {
|
||||
if !rm.enabled || rm.overallTimeout <= 0 {
|
||||
return parent, func() {}
|
||||
}
|
||||
return context.WithTimeout(parent, rm.overallTimeout)
|
||||
}
|
||||
59
fileproc/resource_monitor_concurrency.go
Normal file
59
fileproc/resource_monitor_concurrency.go
Normal file
@@ -0,0 +1,59 @@
|
||||
package fileproc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// AcquireReadSlot attempts to acquire a slot for concurrent file reading.
|
||||
func (rm *ResourceMonitor) AcquireReadSlot(ctx context.Context) error {
|
||||
if !rm.enabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Wait for available read slot
|
||||
for {
|
||||
currentReads := atomic.LoadInt64(&rm.concurrentReads)
|
||||
if currentReads < int64(rm.maxConcurrentReads) {
|
||||
if atomic.CompareAndSwapInt64(&rm.concurrentReads, currentReads, currentReads+1) {
|
||||
break
|
||||
}
|
||||
// CAS failed, retry
|
||||
continue
|
||||
}
|
||||
|
||||
// Wait and retry
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-time.After(time.Millisecond):
|
||||
// Continue loop
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReleaseReadSlot releases a concurrent reading slot.
|
||||
func (rm *ResourceMonitor) ReleaseReadSlot() {
|
||||
if rm.enabled {
|
||||
atomic.AddInt64(&rm.concurrentReads, -1)
|
||||
}
|
||||
}
|
||||
|
||||
// CreateFileProcessingContext creates a context with file processing timeout.
|
||||
func (rm *ResourceMonitor) CreateFileProcessingContext(parent context.Context) (context.Context, context.CancelFunc) {
|
||||
if !rm.enabled || rm.fileProcessingTimeout <= 0 {
|
||||
return parent, func() {}
|
||||
}
|
||||
return context.WithTimeout(parent, rm.fileProcessingTimeout)
|
||||
}
|
||||
|
||||
// CreateOverallProcessingContext creates a context with overall processing timeout.
|
||||
func (rm *ResourceMonitor) CreateOverallProcessingContext(parent context.Context) (context.Context, context.CancelFunc) {
|
||||
if !rm.enabled || rm.overallTimeout <= 0 {
|
||||
return parent, func() {}
|
||||
}
|
||||
return context.WithTimeout(parent, rm.overallTimeout)
|
||||
}
|
||||
95
fileproc/resource_monitor_concurrency_test.go
Normal file
95
fileproc/resource_monitor_concurrency_test.go
Normal file
@@ -0,0 +1,95 @@
|
||||
package fileproc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"github.com/ivuorinen/gibidify/testutil"
|
||||
)
|
||||
|
||||
func TestResourceMonitor_ConcurrentReadsLimit(t *testing.T) {
|
||||
testutil.ResetViperConfig(t, "")
|
||||
|
||||
// Set a low concurrent reads limit for testing
|
||||
viper.Set("resourceLimits.enabled", true)
|
||||
viper.Set("resourceLimits.maxConcurrentReads", 2)
|
||||
|
||||
rm := NewResourceMonitor()
|
||||
defer rm.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
|
||||
defer cancel()
|
||||
|
||||
// First read slot should succeed
|
||||
err := rm.AcquireReadSlot(ctx)
|
||||
if err != nil {
|
||||
t.Errorf("Expected no error for first read slot, got %v", err)
|
||||
}
|
||||
|
||||
// Second read slot should succeed
|
||||
err = rm.AcquireReadSlot(ctx)
|
||||
if err != nil {
|
||||
t.Errorf("Expected no error for second read slot, got %v", err)
|
||||
}
|
||||
|
||||
// Third read slot should timeout (context deadline exceeded)
|
||||
err = rm.AcquireReadSlot(ctx)
|
||||
if err == nil {
|
||||
t.Error("Expected timeout error for third read slot, got nil")
|
||||
}
|
||||
|
||||
// Release one slot and try again
|
||||
rm.ReleaseReadSlot()
|
||||
|
||||
// Create new context for the next attempt
|
||||
ctx2, cancel2 := context.WithTimeout(context.Background(), 100*time.Millisecond)
|
||||
defer cancel2()
|
||||
|
||||
err = rm.AcquireReadSlot(ctx2)
|
||||
if err != nil {
|
||||
t.Errorf("Expected no error after releasing a slot, got %v", err)
|
||||
}
|
||||
|
||||
// Clean up remaining slots
|
||||
rm.ReleaseReadSlot()
|
||||
rm.ReleaseReadSlot()
|
||||
}
|
||||
|
||||
func TestResourceMonitor_TimeoutContexts(t *testing.T) {
|
||||
testutil.ResetViperConfig(t, "")
|
||||
|
||||
// Set short timeouts for testing
|
||||
viper.Set("resourceLimits.enabled", true)
|
||||
viper.Set("resourceLimits.fileProcessingTimeoutSec", 1) // 1 second
|
||||
viper.Set("resourceLimits.overallTimeoutSec", 2) // 2 seconds
|
||||
|
||||
rm := NewResourceMonitor()
|
||||
defer rm.Close()
|
||||
|
||||
parentCtx := context.Background()
|
||||
|
||||
// Test file processing context
|
||||
fileCtx, fileCancel := rm.CreateFileProcessingContext(parentCtx)
|
||||
defer fileCancel()
|
||||
|
||||
deadline, ok := fileCtx.Deadline()
|
||||
if !ok {
|
||||
t.Error("Expected file processing context to have a deadline")
|
||||
} else if time.Until(deadline) > time.Second+100*time.Millisecond {
|
||||
t.Error("File processing timeout appears to be too long")
|
||||
}
|
||||
|
||||
// Test overall processing context
|
||||
overallCtx, overallCancel := rm.CreateOverallProcessingContext(parentCtx)
|
||||
defer overallCancel()
|
||||
|
||||
deadline, ok = overallCtx.Deadline()
|
||||
if !ok {
|
||||
t.Error("Expected overall processing context to have a deadline")
|
||||
} else if time.Until(deadline) > 2*time.Second+100*time.Millisecond {
|
||||
t.Error("Overall processing timeout appears to be too long")
|
||||
}
|
||||
}
|
||||
81
fileproc/resource_monitor_integration_test.go
Normal file
81
fileproc/resource_monitor_integration_test.go
Normal file
@@ -0,0 +1,81 @@
|
||||
package fileproc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"github.com/ivuorinen/gibidify/testutil"
|
||||
)
|
||||
|
||||
func TestResourceMonitor_Integration(t *testing.T) {
|
||||
// Create temporary test directory
|
||||
tempDir := t.TempDir()
|
||||
|
||||
// Create test files
|
||||
testFiles := []string{"test1.txt", "test2.txt", "test3.txt"}
|
||||
for _, filename := range testFiles {
|
||||
testutil.CreateTestFile(t, tempDir, filename, []byte("test content"))
|
||||
}
|
||||
|
||||
testutil.ResetViperConfig(t, "")
|
||||
|
||||
// Configure resource limits
|
||||
viper.Set("resourceLimits.enabled", true)
|
||||
viper.Set("resourceLimits.maxFiles", 5)
|
||||
viper.Set("resourceLimits.maxTotalSize", 1024*1024) // 1MB
|
||||
viper.Set("resourceLimits.fileProcessingTimeoutSec", 10)
|
||||
viper.Set("resourceLimits.maxConcurrentReads", 3)
|
||||
|
||||
rm := NewResourceMonitor()
|
||||
defer rm.Close()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Test file processing workflow
|
||||
for _, filename := range testFiles {
|
||||
filePath := filepath.Join(tempDir, filename)
|
||||
fileInfo, err := os.Stat(filePath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to stat test file %s: %v", filePath, err)
|
||||
}
|
||||
|
||||
// Validate file can be processed
|
||||
err = rm.ValidateFileProcessing(filePath, fileInfo.Size())
|
||||
if err != nil {
|
||||
t.Errorf("Failed to validate file %s: %v", filePath, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Acquire read slot
|
||||
err = rm.AcquireReadSlot(ctx)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to acquire read slot for %s: %v", filePath, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Check memory limits
|
||||
err = rm.CheckHardMemoryLimit()
|
||||
if err != nil {
|
||||
t.Errorf("Memory limit check failed for %s: %v", filePath, err)
|
||||
}
|
||||
|
||||
// Record processing
|
||||
rm.RecordFileProcessed(fileInfo.Size())
|
||||
|
||||
// Release read slot
|
||||
rm.ReleaseReadSlot()
|
||||
}
|
||||
|
||||
// Verify final metrics
|
||||
metrics := rm.GetMetrics()
|
||||
if metrics.FilesProcessed != int64(len(testFiles)) {
|
||||
t.Errorf("Expected %d files processed, got %d", len(testFiles), metrics.FilesProcessed)
|
||||
}
|
||||
|
||||
// Test resource limit logging
|
||||
rm.LogResourceInfo()
|
||||
}
|
||||
79
fileproc/resource_monitor_metrics.go
Normal file
79
fileproc/resource_monitor_metrics.go
Normal file
@@ -0,0 +1,79 @@
|
||||
package fileproc
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// RecordFileProcessed records that a file has been successfully processed.
|
||||
func (rm *ResourceMonitor) RecordFileProcessed(fileSize int64) {
|
||||
if rm.enabled {
|
||||
atomic.AddInt64(&rm.filesProcessed, 1)
|
||||
atomic.AddInt64(&rm.totalSizeProcessed, fileSize)
|
||||
}
|
||||
}
|
||||
|
||||
// GetMetrics returns current resource usage metrics.
|
||||
func (rm *ResourceMonitor) GetMetrics() ResourceMetrics {
|
||||
if !rm.enableResourceMon {
|
||||
return ResourceMetrics{}
|
||||
}
|
||||
|
||||
rm.mu.RLock()
|
||||
defer rm.mu.RUnlock()
|
||||
|
||||
var m runtime.MemStats
|
||||
runtime.ReadMemStats(&m)
|
||||
|
||||
filesProcessed := atomic.LoadInt64(&rm.filesProcessed)
|
||||
totalSize := atomic.LoadInt64(&rm.totalSizeProcessed)
|
||||
duration := time.Since(rm.startTime)
|
||||
|
||||
avgFileSize := float64(0)
|
||||
if filesProcessed > 0 {
|
||||
avgFileSize = float64(totalSize) / float64(filesProcessed)
|
||||
}
|
||||
|
||||
processingRate := float64(0)
|
||||
if duration.Seconds() > 0 {
|
||||
processingRate = float64(filesProcessed) / duration.Seconds()
|
||||
}
|
||||
|
||||
// Collect violations
|
||||
violations := make([]string, 0, len(rm.violationLogged))
|
||||
for violation := range rm.violationLogged {
|
||||
violations = append(violations, violation)
|
||||
}
|
||||
|
||||
return ResourceMetrics{
|
||||
FilesProcessed: filesProcessed,
|
||||
TotalSizeProcessed: totalSize,
|
||||
ConcurrentReads: atomic.LoadInt64(&rm.concurrentReads),
|
||||
ProcessingDuration: duration,
|
||||
AverageFileSize: avgFileSize,
|
||||
ProcessingRate: processingRate,
|
||||
MemoryUsageMB: int64(m.Alloc) / 1024 / 1024,
|
||||
MaxMemoryUsageMB: int64(rm.hardMemoryLimitMB),
|
||||
ViolationsDetected: violations,
|
||||
DegradationActive: rm.degradationActive,
|
||||
EmergencyStopActive: rm.emergencyStopRequested,
|
||||
LastUpdated: time.Now(),
|
||||
}
|
||||
}
|
||||
|
||||
// LogResourceInfo logs current resource limit configuration.
|
||||
func (rm *ResourceMonitor) LogResourceInfo() {
|
||||
if rm.enabled {
|
||||
logrus.Infof("Resource limits enabled: maxFiles=%d, maxTotalSize=%dMB, fileTimeout=%ds, overallTimeout=%ds",
|
||||
rm.maxFiles, rm.maxTotalSize/1024/1024, int(rm.fileProcessingTimeout.Seconds()), int(rm.overallTimeout.Seconds()))
|
||||
logrus.Infof("Resource limits: maxConcurrentReads=%d, rateLimitFPS=%d, hardMemoryMB=%d",
|
||||
rm.maxConcurrentReads, rm.rateLimitFilesPerSec, rm.hardMemoryLimitMB)
|
||||
logrus.Infof("Resource features: gracefulDegradation=%v, monitoring=%v",
|
||||
rm.enableGracefulDegr, rm.enableResourceMon)
|
||||
} else {
|
||||
logrus.Info("Resource limits disabled")
|
||||
}
|
||||
}
|
||||
49
fileproc/resource_monitor_metrics_test.go
Normal file
49
fileproc/resource_monitor_metrics_test.go
Normal file
@@ -0,0 +1,49 @@
|
||||
package fileproc
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"github.com/ivuorinen/gibidify/testutil"
|
||||
)
|
||||
|
||||
func TestResourceMonitor_Metrics(t *testing.T) {
|
||||
testutil.ResetViperConfig(t, "")
|
||||
|
||||
viper.Set("resourceLimits.enabled", true)
|
||||
viper.Set("resourceLimits.enableResourceMonitoring", true)
|
||||
|
||||
rm := NewResourceMonitor()
|
||||
defer rm.Close()
|
||||
|
||||
// Process some files to generate metrics
|
||||
rm.RecordFileProcessed(1000)
|
||||
rm.RecordFileProcessed(2000)
|
||||
rm.RecordFileProcessed(500)
|
||||
|
||||
metrics := rm.GetMetrics()
|
||||
|
||||
// Verify metrics
|
||||
if metrics.FilesProcessed != 3 {
|
||||
t.Errorf("Expected 3 files processed, got %d", metrics.FilesProcessed)
|
||||
}
|
||||
|
||||
if metrics.TotalSizeProcessed != 3500 {
|
||||
t.Errorf("Expected total size 3500, got %d", metrics.TotalSizeProcessed)
|
||||
}
|
||||
|
||||
expectedAvgSize := float64(3500) / float64(3)
|
||||
if metrics.AverageFileSize != expectedAvgSize {
|
||||
t.Errorf("Expected average file size %.2f, got %.2f", expectedAvgSize, metrics.AverageFileSize)
|
||||
}
|
||||
|
||||
if metrics.ProcessingRate <= 0 {
|
||||
t.Error("Expected positive processing rate")
|
||||
}
|
||||
|
||||
if !metrics.LastUpdated.After(time.Now().Add(-time.Second)) {
|
||||
t.Error("Expected recent LastUpdated timestamp")
|
||||
}
|
||||
}
|
||||
36
fileproc/resource_monitor_rate_limiting.go
Normal file
36
fileproc/resource_monitor_rate_limiting.go
Normal file
@@ -0,0 +1,36 @@
|
||||
package fileproc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// WaitForRateLimit waits for rate limiting if enabled.
|
||||
func (rm *ResourceMonitor) WaitForRateLimit(ctx context.Context) error {
|
||||
if !rm.enabled || rm.rateLimitFilesPerSec <= 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-rm.rateLimitChan:
|
||||
return nil
|
||||
case <-time.After(time.Second): // Fallback timeout
|
||||
logrus.Warn("Rate limiting timeout exceeded, continuing without rate limit")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// rateLimiterRefill refills the rate limiting channel periodically.
|
||||
func (rm *ResourceMonitor) rateLimiterRefill() {
|
||||
for range rm.rateLimiter.C {
|
||||
select {
|
||||
case rm.rateLimitChan <- struct{}{}:
|
||||
default:
|
||||
// Channel is full, skip
|
||||
}
|
||||
}
|
||||
}
|
||||
40
fileproc/resource_monitor_rate_limiting_test.go
Normal file
40
fileproc/resource_monitor_rate_limiting_test.go
Normal file
@@ -0,0 +1,40 @@
|
||||
package fileproc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"github.com/ivuorinen/gibidify/testutil"
|
||||
)
|
||||
|
||||
func TestResourceMonitor_RateLimiting(t *testing.T) {
|
||||
testutil.ResetViperConfig(t, "")
|
||||
|
||||
// Enable rate limiting with a low rate for testing
|
||||
viper.Set("resourceLimits.enabled", true)
|
||||
viper.Set("resourceLimits.rateLimitFilesPerSec", 5) // 5 files per second
|
||||
|
||||
rm := NewResourceMonitor()
|
||||
defer rm.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// First few requests should succeed quickly
|
||||
start := time.Now()
|
||||
for i := 0; i < 3; i++ {
|
||||
err := rm.WaitForRateLimit(ctx)
|
||||
if err != nil {
|
||||
t.Errorf("Expected no error for rate limit wait %d, got %v", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Should have taken some time due to rate limiting
|
||||
duration := time.Since(start)
|
||||
if duration < 200*time.Millisecond {
|
||||
t.Logf("Rate limiting may not be working as expected, took only %v", duration)
|
||||
}
|
||||
}
|
||||
22
fileproc/resource_monitor_state.go
Normal file
22
fileproc/resource_monitor_state.go
Normal file
@@ -0,0 +1,22 @@
|
||||
package fileproc
|
||||
|
||||
// IsEmergencyStopActive returns whether emergency stop is active.
|
||||
func (rm *ResourceMonitor) IsEmergencyStopActive() bool {
|
||||
rm.mu.RLock()
|
||||
defer rm.mu.RUnlock()
|
||||
return rm.emergencyStopRequested
|
||||
}
|
||||
|
||||
// IsDegradationActive returns whether degradation mode is active.
|
||||
func (rm *ResourceMonitor) IsDegradationActive() bool {
|
||||
rm.mu.RLock()
|
||||
defer rm.mu.RUnlock()
|
||||
return rm.degradationActive
|
||||
}
|
||||
|
||||
// Close cleans up the resource monitor.
|
||||
func (rm *ResourceMonitor) Close() {
|
||||
if rm.rateLimiter != nil {
|
||||
rm.rateLimiter.Stop()
|
||||
}
|
||||
}
|
||||
@@ -1,377 +0,0 @@
|
||||
// Package fileproc provides tests for resource monitoring functionality.
|
||||
package fileproc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"github.com/ivuorinen/gibidify/config"
|
||||
"github.com/ivuorinen/gibidify/testutil"
|
||||
"github.com/ivuorinen/gibidify/utils"
|
||||
)
|
||||
|
||||
func TestResourceMonitor_NewResourceMonitor(t *testing.T) {
|
||||
// Reset viper for clean test state
|
||||
testutil.ResetViperConfig(t, "")
|
||||
|
||||
rm := NewResourceMonitor()
|
||||
if rm == nil {
|
||||
t.Fatal("NewResourceMonitor() returned nil")
|
||||
}
|
||||
|
||||
// Test default values are set correctly
|
||||
if !rm.enabled {
|
||||
t.Error("Expected resource monitor to be enabled by default")
|
||||
}
|
||||
|
||||
if rm.maxFiles != config.DefaultMaxFiles {
|
||||
t.Errorf("Expected maxFiles to be %d, got %d", config.DefaultMaxFiles, rm.maxFiles)
|
||||
}
|
||||
|
||||
if rm.maxTotalSize != config.DefaultMaxTotalSize {
|
||||
t.Errorf("Expected maxTotalSize to be %d, got %d", config.DefaultMaxTotalSize, rm.maxTotalSize)
|
||||
}
|
||||
|
||||
if rm.fileProcessingTimeout != time.Duration(config.DefaultFileProcessingTimeoutSec)*time.Second {
|
||||
t.Errorf("Expected fileProcessingTimeout to be %v, got %v",
|
||||
time.Duration(config.DefaultFileProcessingTimeoutSec)*time.Second, rm.fileProcessingTimeout)
|
||||
}
|
||||
|
||||
// Clean up
|
||||
rm.Close()
|
||||
}
|
||||
|
||||
func TestResourceMonitor_DisabledResourceLimits(t *testing.T) {
|
||||
// Reset viper for clean test state
|
||||
testutil.ResetViperConfig(t, "")
|
||||
|
||||
// Set resource limits disabled
|
||||
viper.Set("resourceLimits.enabled", false)
|
||||
|
||||
rm := NewResourceMonitor()
|
||||
defer rm.Close()
|
||||
|
||||
// Test that validation passes when disabled
|
||||
err := rm.ValidateFileProcessing("/tmp/test.txt", 1000)
|
||||
if err != nil {
|
||||
t.Errorf("Expected no error when resource limits disabled, got %v", err)
|
||||
}
|
||||
|
||||
// Test that read slot acquisition works when disabled
|
||||
ctx := context.Background()
|
||||
err = rm.AcquireReadSlot(ctx)
|
||||
if err != nil {
|
||||
t.Errorf("Expected no error when acquiring read slot with disabled limits, got %v", err)
|
||||
}
|
||||
rm.ReleaseReadSlot()
|
||||
|
||||
// Test that rate limiting is bypassed when disabled
|
||||
err = rm.WaitForRateLimit(ctx)
|
||||
if err != nil {
|
||||
t.Errorf("Expected no error when rate limiting disabled, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResourceMonitor_FileCountLimit(t *testing.T) {
|
||||
testutil.ResetViperConfig(t, "")
|
||||
|
||||
// Set a very low file count limit for testing
|
||||
viper.Set("resourceLimits.enabled", true)
|
||||
viper.Set("resourceLimits.maxFiles", 2)
|
||||
|
||||
rm := NewResourceMonitor()
|
||||
defer rm.Close()
|
||||
|
||||
// First file should pass
|
||||
err := rm.ValidateFileProcessing("/tmp/file1.txt", 100)
|
||||
if err != nil {
|
||||
t.Errorf("Expected no error for first file, got %v", err)
|
||||
}
|
||||
rm.RecordFileProcessed(100)
|
||||
|
||||
// Second file should pass
|
||||
err = rm.ValidateFileProcessing("/tmp/file2.txt", 100)
|
||||
if err != nil {
|
||||
t.Errorf("Expected no error for second file, got %v", err)
|
||||
}
|
||||
rm.RecordFileProcessed(100)
|
||||
|
||||
// Third file should fail
|
||||
err = rm.ValidateFileProcessing("/tmp/file3.txt", 100)
|
||||
if err == nil {
|
||||
t.Error("Expected error for third file (exceeds limit), got nil")
|
||||
}
|
||||
|
||||
// Verify it's the correct error type
|
||||
structErr, ok := err.(*utils.StructuredError)
|
||||
if !ok {
|
||||
t.Errorf("Expected StructuredError, got %T", err)
|
||||
} else if structErr.Code != utils.CodeResourceLimitFiles {
|
||||
t.Errorf("Expected error code %s, got %s", utils.CodeResourceLimitFiles, structErr.Code)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResourceMonitor_TotalSizeLimit(t *testing.T) {
|
||||
testutil.ResetViperConfig(t, "")
|
||||
|
||||
// Set a low total size limit for testing (1KB)
|
||||
viper.Set("resourceLimits.enabled", true)
|
||||
viper.Set("resourceLimits.maxTotalSize", 1024)
|
||||
|
||||
rm := NewResourceMonitor()
|
||||
defer rm.Close()
|
||||
|
||||
// First small file should pass
|
||||
err := rm.ValidateFileProcessing("/tmp/small.txt", 500)
|
||||
if err != nil {
|
||||
t.Errorf("Expected no error for small file, got %v", err)
|
||||
}
|
||||
rm.RecordFileProcessed(500)
|
||||
|
||||
// Second small file should pass
|
||||
err = rm.ValidateFileProcessing("/tmp/small2.txt", 400)
|
||||
if err != nil {
|
||||
t.Errorf("Expected no error for second small file, got %v", err)
|
||||
}
|
||||
rm.RecordFileProcessed(400)
|
||||
|
||||
// Large file that would exceed limit should fail
|
||||
err = rm.ValidateFileProcessing("/tmp/large.txt", 200)
|
||||
if err == nil {
|
||||
t.Error("Expected error for file that would exceed size limit, got nil")
|
||||
}
|
||||
|
||||
// Verify it's the correct error type
|
||||
structErr, ok := err.(*utils.StructuredError)
|
||||
if !ok {
|
||||
t.Errorf("Expected StructuredError, got %T", err)
|
||||
} else if structErr.Code != utils.CodeResourceLimitTotalSize {
|
||||
t.Errorf("Expected error code %s, got %s", utils.CodeResourceLimitTotalSize, structErr.Code)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResourceMonitor_ConcurrentReadsLimit(t *testing.T) {
|
||||
testutil.ResetViperConfig(t, "")
|
||||
|
||||
// Set a low concurrent reads limit for testing
|
||||
viper.Set("resourceLimits.enabled", true)
|
||||
viper.Set("resourceLimits.maxConcurrentReads", 2)
|
||||
|
||||
rm := NewResourceMonitor()
|
||||
defer rm.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
|
||||
defer cancel()
|
||||
|
||||
// First read slot should succeed
|
||||
err := rm.AcquireReadSlot(ctx)
|
||||
if err != nil {
|
||||
t.Errorf("Expected no error for first read slot, got %v", err)
|
||||
}
|
||||
|
||||
// Second read slot should succeed
|
||||
err = rm.AcquireReadSlot(ctx)
|
||||
if err != nil {
|
||||
t.Errorf("Expected no error for second read slot, got %v", err)
|
||||
}
|
||||
|
||||
// Third read slot should timeout (context deadline exceeded)
|
||||
err = rm.AcquireReadSlot(ctx)
|
||||
if err == nil {
|
||||
t.Error("Expected timeout error for third read slot, got nil")
|
||||
}
|
||||
|
||||
// Release one slot and try again
|
||||
rm.ReleaseReadSlot()
|
||||
|
||||
// Create new context for the next attempt
|
||||
ctx2, cancel2 := context.WithTimeout(context.Background(), 100*time.Millisecond)
|
||||
defer cancel2()
|
||||
|
||||
err = rm.AcquireReadSlot(ctx2)
|
||||
if err != nil {
|
||||
t.Errorf("Expected no error after releasing a slot, got %v", err)
|
||||
}
|
||||
|
||||
// Clean up remaining slots
|
||||
rm.ReleaseReadSlot()
|
||||
rm.ReleaseReadSlot()
|
||||
}
|
||||
|
||||
func TestResourceMonitor_TimeoutContexts(t *testing.T) {
|
||||
testutil.ResetViperConfig(t, "")
|
||||
|
||||
// Set short timeouts for testing
|
||||
viper.Set("resourceLimits.enabled", true)
|
||||
viper.Set("resourceLimits.fileProcessingTimeoutSec", 1) // 1 second
|
||||
viper.Set("resourceLimits.overallTimeoutSec", 2) // 2 seconds
|
||||
|
||||
rm := NewResourceMonitor()
|
||||
defer rm.Close()
|
||||
|
||||
parentCtx := context.Background()
|
||||
|
||||
// Test file processing context
|
||||
fileCtx, fileCancel := rm.CreateFileProcessingContext(parentCtx)
|
||||
defer fileCancel()
|
||||
|
||||
deadline, ok := fileCtx.Deadline()
|
||||
if !ok {
|
||||
t.Error("Expected file processing context to have a deadline")
|
||||
} else if time.Until(deadline) > time.Second+100*time.Millisecond {
|
||||
t.Error("File processing timeout appears to be too long")
|
||||
}
|
||||
|
||||
// Test overall processing context
|
||||
overallCtx, overallCancel := rm.CreateOverallProcessingContext(parentCtx)
|
||||
defer overallCancel()
|
||||
|
||||
deadline, ok = overallCtx.Deadline()
|
||||
if !ok {
|
||||
t.Error("Expected overall processing context to have a deadline")
|
||||
} else if time.Until(deadline) > 2*time.Second+100*time.Millisecond {
|
||||
t.Error("Overall processing timeout appears to be too long")
|
||||
}
|
||||
}
|
||||
|
||||
func TestResourceMonitor_RateLimiting(t *testing.T) {
|
||||
testutil.ResetViperConfig(t, "")
|
||||
|
||||
// Enable rate limiting with a low rate for testing
|
||||
viper.Set("resourceLimits.enabled", true)
|
||||
viper.Set("resourceLimits.rateLimitFilesPerSec", 5) // 5 files per second
|
||||
|
||||
rm := NewResourceMonitor()
|
||||
defer rm.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// First few requests should succeed quickly
|
||||
start := time.Now()
|
||||
for i := 0; i < 3; i++ {
|
||||
err := rm.WaitForRateLimit(ctx)
|
||||
if err != nil {
|
||||
t.Errorf("Expected no error for rate limit wait %d, got %v", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Should have taken some time due to rate limiting
|
||||
duration := time.Since(start)
|
||||
if duration < 200*time.Millisecond {
|
||||
t.Logf("Rate limiting may not be working as expected, took only %v", duration)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResourceMonitor_Metrics(t *testing.T) {
|
||||
testutil.ResetViperConfig(t, "")
|
||||
|
||||
viper.Set("resourceLimits.enabled", true)
|
||||
viper.Set("resourceLimits.enableResourceMonitoring", true)
|
||||
|
||||
rm := NewResourceMonitor()
|
||||
defer rm.Close()
|
||||
|
||||
// Process some files to generate metrics
|
||||
rm.RecordFileProcessed(1000)
|
||||
rm.RecordFileProcessed(2000)
|
||||
rm.RecordFileProcessed(500)
|
||||
|
||||
metrics := rm.GetMetrics()
|
||||
|
||||
// Verify metrics
|
||||
if metrics.FilesProcessed != 3 {
|
||||
t.Errorf("Expected 3 files processed, got %d", metrics.FilesProcessed)
|
||||
}
|
||||
|
||||
if metrics.TotalSizeProcessed != 3500 {
|
||||
t.Errorf("Expected total size 3500, got %d", metrics.TotalSizeProcessed)
|
||||
}
|
||||
|
||||
expectedAvgSize := float64(3500) / float64(3)
|
||||
if metrics.AverageFileSize != expectedAvgSize {
|
||||
t.Errorf("Expected average file size %.2f, got %.2f", expectedAvgSize, metrics.AverageFileSize)
|
||||
}
|
||||
|
||||
if metrics.ProcessingRate <= 0 {
|
||||
t.Error("Expected positive processing rate")
|
||||
}
|
||||
|
||||
if !metrics.LastUpdated.After(time.Now().Add(-time.Second)) {
|
||||
t.Error("Expected recent LastUpdated timestamp")
|
||||
}
|
||||
}
|
||||
|
||||
func TestResourceMonitor_Integration(t *testing.T) {
|
||||
// Create temporary test directory
|
||||
tempDir := t.TempDir()
|
||||
|
||||
// Create test files
|
||||
testFiles := []string{"test1.txt", "test2.txt", "test3.txt"}
|
||||
for _, filename := range testFiles {
|
||||
testutil.CreateTestFile(t, tempDir, filename, []byte("test content"))
|
||||
}
|
||||
|
||||
testutil.ResetViperConfig(t, "")
|
||||
|
||||
// Configure resource limits
|
||||
viper.Set("resourceLimits.enabled", true)
|
||||
viper.Set("resourceLimits.maxFiles", 5)
|
||||
viper.Set("resourceLimits.maxTotalSize", 1024*1024) // 1MB
|
||||
viper.Set("resourceLimits.fileProcessingTimeoutSec", 10)
|
||||
viper.Set("resourceLimits.maxConcurrentReads", 3)
|
||||
|
||||
rm := NewResourceMonitor()
|
||||
defer rm.Close()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Test file processing workflow
|
||||
for _, filename := range testFiles {
|
||||
filePath := filepath.Join(tempDir, filename)
|
||||
fileInfo, err := os.Stat(filePath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to stat test file %s: %v", filePath, err)
|
||||
}
|
||||
|
||||
// Validate file can be processed
|
||||
err = rm.ValidateFileProcessing(filePath, fileInfo.Size())
|
||||
if err != nil {
|
||||
t.Errorf("Failed to validate file %s: %v", filePath, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Acquire read slot
|
||||
err = rm.AcquireReadSlot(ctx)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to acquire read slot for %s: %v", filePath, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Check memory limits
|
||||
err = rm.CheckHardMemoryLimit()
|
||||
if err != nil {
|
||||
t.Errorf("Memory limit check failed for %s: %v", filePath, err)
|
||||
}
|
||||
|
||||
// Record processing
|
||||
rm.RecordFileProcessed(fileInfo.Size())
|
||||
|
||||
// Release read slot
|
||||
rm.ReleaseReadSlot()
|
||||
}
|
||||
|
||||
// Verify final metrics
|
||||
metrics := rm.GetMetrics()
|
||||
if metrics.FilesProcessed != int64(len(testFiles)) {
|
||||
t.Errorf("Expected %d files processed, got %d", len(testFiles), metrics.FilesProcessed)
|
||||
}
|
||||
|
||||
// Test resource limit logging
|
||||
rm.LogResourceInfo()
|
||||
}
|
||||
108
fileproc/resource_monitor_types.go
Normal file
108
fileproc/resource_monitor_types.go
Normal file
@@ -0,0 +1,108 @@
|
||||
package fileproc
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ivuorinen/gibidify/config"
|
||||
)
|
||||
|
||||
// ResourceMonitor monitors resource usage and enforces limits to prevent DoS attacks.
|
||||
type ResourceMonitor struct {
|
||||
enabled bool
|
||||
maxFiles int
|
||||
maxTotalSize int64
|
||||
fileProcessingTimeout time.Duration
|
||||
overallTimeout time.Duration
|
||||
maxConcurrentReads int
|
||||
rateLimitFilesPerSec int
|
||||
hardMemoryLimitMB int
|
||||
enableGracefulDegr bool
|
||||
enableResourceMon bool
|
||||
|
||||
// Current state tracking
|
||||
filesProcessed int64
|
||||
totalSizeProcessed int64
|
||||
concurrentReads int64
|
||||
startTime time.Time
|
||||
lastRateLimitCheck time.Time
|
||||
hardMemoryLimitBytes int64
|
||||
|
||||
// Rate limiting
|
||||
rateLimiter *time.Ticker
|
||||
rateLimitChan chan struct{}
|
||||
|
||||
// Synchronization
|
||||
mu sync.RWMutex
|
||||
violationLogged map[string]bool
|
||||
degradationActive bool
|
||||
emergencyStopRequested bool
|
||||
}
|
||||
|
||||
// ResourceMetrics holds comprehensive resource usage metrics.
|
||||
type ResourceMetrics struct {
|
||||
FilesProcessed int64 `json:"files_processed"`
|
||||
TotalSizeProcessed int64 `json:"total_size_processed"`
|
||||
ConcurrentReads int64 `json:"concurrent_reads"`
|
||||
ProcessingDuration time.Duration `json:"processing_duration"`
|
||||
AverageFileSize float64 `json:"average_file_size"`
|
||||
ProcessingRate float64 `json:"processing_rate_files_per_sec"`
|
||||
MemoryUsageMB int64 `json:"memory_usage_mb"`
|
||||
MaxMemoryUsageMB int64 `json:"max_memory_usage_mb"`
|
||||
ViolationsDetected []string `json:"violations_detected"`
|
||||
DegradationActive bool `json:"degradation_active"`
|
||||
EmergencyStopActive bool `json:"emergency_stop_active"`
|
||||
LastUpdated time.Time `json:"last_updated"`
|
||||
}
|
||||
|
||||
// ResourceViolation represents a detected resource limit violation.
|
||||
type ResourceViolation struct {
|
||||
Type string `json:"type"`
|
||||
Message string `json:"message"`
|
||||
Current interface{} `json:"current"`
|
||||
Limit interface{} `json:"limit"`
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
Context map[string]interface{} `json:"context"`
|
||||
}
|
||||
|
||||
// NewResourceMonitor creates a new resource monitor with configuration.
|
||||
func NewResourceMonitor() *ResourceMonitor {
|
||||
rm := &ResourceMonitor{
|
||||
enabled: config.GetResourceLimitsEnabled(),
|
||||
maxFiles: config.GetMaxFiles(),
|
||||
maxTotalSize: config.GetMaxTotalSize(),
|
||||
fileProcessingTimeout: time.Duration(config.GetFileProcessingTimeoutSec()) * time.Second,
|
||||
overallTimeout: time.Duration(config.GetOverallTimeoutSec()) * time.Second,
|
||||
maxConcurrentReads: config.GetMaxConcurrentReads(),
|
||||
rateLimitFilesPerSec: config.GetRateLimitFilesPerSec(),
|
||||
hardMemoryLimitMB: config.GetHardMemoryLimitMB(),
|
||||
enableGracefulDegr: config.GetEnableGracefulDegradation(),
|
||||
enableResourceMon: config.GetEnableResourceMonitoring(),
|
||||
startTime: time.Now(),
|
||||
lastRateLimitCheck: time.Now(),
|
||||
violationLogged: make(map[string]bool),
|
||||
hardMemoryLimitBytes: int64(config.GetHardMemoryLimitMB()) * 1024 * 1024,
|
||||
}
|
||||
|
||||
// Initialize rate limiter if rate limiting is enabled
|
||||
if rm.enabled && rm.rateLimitFilesPerSec > 0 {
|
||||
interval := time.Second / time.Duration(rm.rateLimitFilesPerSec)
|
||||
rm.rateLimiter = time.NewTicker(interval)
|
||||
rm.rateLimitChan = make(chan struct{}, rm.rateLimitFilesPerSec)
|
||||
|
||||
// Pre-fill the rate limit channel
|
||||
for i := 0; i < rm.rateLimitFilesPerSec; i++ {
|
||||
select {
|
||||
case rm.rateLimitChan <- struct{}{}:
|
||||
default:
|
||||
goto rateLimitFull
|
||||
}
|
||||
}
|
||||
rateLimitFull:
|
||||
|
||||
// Start rate limiter refill goroutine
|
||||
go rm.rateLimiterRefill()
|
||||
}
|
||||
|
||||
return rm
|
||||
}
|
||||
74
fileproc/resource_monitor_types_test.go
Normal file
74
fileproc/resource_monitor_types_test.go
Normal file
@@ -0,0 +1,74 @@
|
||||
package fileproc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"github.com/ivuorinen/gibidify/config"
|
||||
"github.com/ivuorinen/gibidify/testutil"
|
||||
)
|
||||
|
||||
func TestResourceMonitor_NewResourceMonitor(t *testing.T) {
|
||||
// Reset viper for clean test state
|
||||
testutil.ResetViperConfig(t, "")
|
||||
|
||||
rm := NewResourceMonitor()
|
||||
if rm == nil {
|
||||
t.Fatal("NewResourceMonitor() returned nil")
|
||||
}
|
||||
|
||||
// Test default values are set correctly
|
||||
if !rm.enabled {
|
||||
t.Error("Expected resource monitor to be enabled by default")
|
||||
}
|
||||
|
||||
if rm.maxFiles != config.DefaultMaxFiles {
|
||||
t.Errorf("Expected maxFiles to be %d, got %d", config.DefaultMaxFiles, rm.maxFiles)
|
||||
}
|
||||
|
||||
if rm.maxTotalSize != config.DefaultMaxTotalSize {
|
||||
t.Errorf("Expected maxTotalSize to be %d, got %d", config.DefaultMaxTotalSize, rm.maxTotalSize)
|
||||
}
|
||||
|
||||
if rm.fileProcessingTimeout != time.Duration(config.DefaultFileProcessingTimeoutSec)*time.Second {
|
||||
t.Errorf("Expected fileProcessingTimeout to be %v, got %v",
|
||||
time.Duration(config.DefaultFileProcessingTimeoutSec)*time.Second, rm.fileProcessingTimeout)
|
||||
}
|
||||
|
||||
// Clean up
|
||||
rm.Close()
|
||||
}
|
||||
|
||||
func TestResourceMonitor_DisabledResourceLimits(t *testing.T) {
|
||||
// Reset viper for clean test state
|
||||
testutil.ResetViperConfig(t, "")
|
||||
|
||||
// Set resource limits disabled
|
||||
viper.Set("resourceLimits.enabled", false)
|
||||
|
||||
rm := NewResourceMonitor()
|
||||
defer rm.Close()
|
||||
|
||||
// Test that validation passes when disabled
|
||||
err := rm.ValidateFileProcessing("/tmp/test.txt", 1000)
|
||||
if err != nil {
|
||||
t.Errorf("Expected no error when resource limits disabled, got %v", err)
|
||||
}
|
||||
|
||||
// Test that read slot acquisition works when disabled
|
||||
ctx := context.Background()
|
||||
err = rm.AcquireReadSlot(ctx)
|
||||
if err != nil {
|
||||
t.Errorf("Expected no error when acquiring read slot with disabled limits, got %v", err)
|
||||
}
|
||||
rm.ReleaseReadSlot()
|
||||
|
||||
// Test that rate limiting is bypassed when disabled
|
||||
err = rm.WaitForRateLimit(ctx)
|
||||
if err != nil {
|
||||
t.Errorf("Expected no error when rate limiting disabled, got %v", err)
|
||||
}
|
||||
}
|
||||
148
fileproc/resource_monitor_validation.go
Normal file
148
fileproc/resource_monitor_validation.go
Normal file
@@ -0,0 +1,148 @@
|
||||
package fileproc
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/ivuorinen/gibidify/utils"
|
||||
)
|
||||
|
||||
// ValidateFileProcessing checks if a file can be processed based on resource limits.
|
||||
func (rm *ResourceMonitor) ValidateFileProcessing(filePath string, fileSize int64) error {
|
||||
if !rm.enabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
rm.mu.RLock()
|
||||
defer rm.mu.RUnlock()
|
||||
|
||||
// Check if emergency stop is active
|
||||
if rm.emergencyStopRequested {
|
||||
return utils.NewStructuredError(
|
||||
utils.ErrorTypeValidation,
|
||||
utils.CodeResourceLimitMemory,
|
||||
"processing stopped due to emergency memory condition",
|
||||
filePath,
|
||||
map[string]interface{}{
|
||||
"emergency_stop_active": true,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// Check file count limit
|
||||
currentFiles := atomic.LoadInt64(&rm.filesProcessed)
|
||||
if int(currentFiles) >= rm.maxFiles {
|
||||
return utils.NewStructuredError(
|
||||
utils.ErrorTypeValidation,
|
||||
utils.CodeResourceLimitFiles,
|
||||
"maximum file count limit exceeded",
|
||||
filePath,
|
||||
map[string]interface{}{
|
||||
"current_files": currentFiles,
|
||||
"max_files": rm.maxFiles,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// Check total size limit
|
||||
currentTotalSize := atomic.LoadInt64(&rm.totalSizeProcessed)
|
||||
if currentTotalSize+fileSize > rm.maxTotalSize {
|
||||
return utils.NewStructuredError(
|
||||
utils.ErrorTypeValidation,
|
||||
utils.CodeResourceLimitTotalSize,
|
||||
"maximum total size limit would be exceeded",
|
||||
filePath,
|
||||
map[string]interface{}{
|
||||
"current_total_size": currentTotalSize,
|
||||
"file_size": fileSize,
|
||||
"max_total_size": rm.maxTotalSize,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// Check overall timeout
|
||||
if time.Since(rm.startTime) > rm.overallTimeout {
|
||||
return utils.NewStructuredError(
|
||||
utils.ErrorTypeValidation,
|
||||
utils.CodeResourceLimitTimeout,
|
||||
"overall processing timeout exceeded",
|
||||
filePath,
|
||||
map[string]interface{}{
|
||||
"processing_duration": time.Since(rm.startTime),
|
||||
"overall_timeout": rm.overallTimeout,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckHardMemoryLimit checks if hard memory limit is exceeded and takes action.
|
||||
func (rm *ResourceMonitor) CheckHardMemoryLimit() error {
|
||||
if !rm.enabled || rm.hardMemoryLimitMB <= 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var m runtime.MemStats
|
||||
runtime.ReadMemStats(&m)
|
||||
currentMemory := int64(m.Alloc)
|
||||
|
||||
if currentMemory > rm.hardMemoryLimitBytes {
|
||||
rm.mu.Lock()
|
||||
defer rm.mu.Unlock()
|
||||
|
||||
// Log violation if not already logged
|
||||
violationKey := "hard_memory_limit"
|
||||
if !rm.violationLogged[violationKey] {
|
||||
logrus.Errorf("Hard memory limit exceeded: %dMB > %dMB",
|
||||
currentMemory/1024/1024, rm.hardMemoryLimitMB)
|
||||
rm.violationLogged[violationKey] = true
|
||||
}
|
||||
|
||||
if rm.enableGracefulDegr {
|
||||
// Force garbage collection
|
||||
runtime.GC()
|
||||
|
||||
// Check again after GC
|
||||
runtime.ReadMemStats(&m)
|
||||
currentMemory = int64(m.Alloc)
|
||||
|
||||
if currentMemory > rm.hardMemoryLimitBytes {
|
||||
// Still over limit, activate emergency stop
|
||||
rm.emergencyStopRequested = true
|
||||
return utils.NewStructuredError(
|
||||
utils.ErrorTypeValidation,
|
||||
utils.CodeResourceLimitMemory,
|
||||
"hard memory limit exceeded, emergency stop activated",
|
||||
"",
|
||||
map[string]interface{}{
|
||||
"current_memory_mb": currentMemory / 1024 / 1024,
|
||||
"limit_mb": rm.hardMemoryLimitMB,
|
||||
"emergency_stop": true,
|
||||
},
|
||||
)
|
||||
} else {
|
||||
// Memory freed by GC, continue with degradation
|
||||
rm.degradationActive = true
|
||||
logrus.Info("Memory freed by garbage collection, continuing with degradation mode")
|
||||
}
|
||||
} else {
|
||||
// No graceful degradation, hard stop
|
||||
return utils.NewStructuredError(
|
||||
utils.ErrorTypeValidation,
|
||||
utils.CodeResourceLimitMemory,
|
||||
"hard memory limit exceeded",
|
||||
"",
|
||||
map[string]interface{}{
|
||||
"current_memory_mb": currentMemory / 1024 / 1024,
|
||||
"limit_mb": rm.hardMemoryLimitMB,
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
88
fileproc/resource_monitor_validation_test.go
Normal file
88
fileproc/resource_monitor_validation_test.go
Normal file
@@ -0,0 +1,88 @@
|
||||
package fileproc
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"github.com/ivuorinen/gibidify/testutil"
|
||||
"github.com/ivuorinen/gibidify/utils"
|
||||
)
|
||||
|
||||
func TestResourceMonitor_FileCountLimit(t *testing.T) {
|
||||
testutil.ResetViperConfig(t, "")
|
||||
|
||||
// Set a very low file count limit for testing
|
||||
viper.Set("resourceLimits.enabled", true)
|
||||
viper.Set("resourceLimits.maxFiles", 2)
|
||||
|
||||
rm := NewResourceMonitor()
|
||||
defer rm.Close()
|
||||
|
||||
// First file should pass
|
||||
err := rm.ValidateFileProcessing("/tmp/file1.txt", 100)
|
||||
if err != nil {
|
||||
t.Errorf("Expected no error for first file, got %v", err)
|
||||
}
|
||||
rm.RecordFileProcessed(100)
|
||||
|
||||
// Second file should pass
|
||||
err = rm.ValidateFileProcessing("/tmp/file2.txt", 100)
|
||||
if err != nil {
|
||||
t.Errorf("Expected no error for second file, got %v", err)
|
||||
}
|
||||
rm.RecordFileProcessed(100)
|
||||
|
||||
// Third file should fail
|
||||
err = rm.ValidateFileProcessing("/tmp/file3.txt", 100)
|
||||
if err == nil {
|
||||
t.Error("Expected error for third file (exceeds limit), got nil")
|
||||
}
|
||||
|
||||
// Verify it's the correct error type
|
||||
structErr, ok := err.(*utils.StructuredError)
|
||||
if !ok {
|
||||
t.Errorf("Expected StructuredError, got %T", err)
|
||||
} else if structErr.Code != utils.CodeResourceLimitFiles {
|
||||
t.Errorf("Expected error code %s, got %s", utils.CodeResourceLimitFiles, structErr.Code)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResourceMonitor_TotalSizeLimit(t *testing.T) {
|
||||
testutil.ResetViperConfig(t, "")
|
||||
|
||||
// Set a low total size limit for testing (1KB)
|
||||
viper.Set("resourceLimits.enabled", true)
|
||||
viper.Set("resourceLimits.maxTotalSize", 1024)
|
||||
|
||||
rm := NewResourceMonitor()
|
||||
defer rm.Close()
|
||||
|
||||
// First small file should pass
|
||||
err := rm.ValidateFileProcessing("/tmp/small.txt", 500)
|
||||
if err != nil {
|
||||
t.Errorf("Expected no error for small file, got %v", err)
|
||||
}
|
||||
rm.RecordFileProcessed(500)
|
||||
|
||||
// Second small file should pass
|
||||
err = rm.ValidateFileProcessing("/tmp/small2.txt", 400)
|
||||
if err != nil {
|
||||
t.Errorf("Expected no error for second small file, got %v", err)
|
||||
}
|
||||
rm.RecordFileProcessed(400)
|
||||
|
||||
// Large file that would exceed limit should fail
|
||||
err = rm.ValidateFileProcessing("/tmp/large.txt", 200)
|
||||
if err == nil {
|
||||
t.Error("Expected error for file that would exceed size limit, got nil")
|
||||
}
|
||||
|
||||
// Verify it's the correct error type
|
||||
structErr, ok := err.(*utils.StructuredError)
|
||||
if !ok {
|
||||
t.Errorf("Expected StructuredError, got %T", err)
|
||||
} else if structErr.Code != utils.CodeResourceLimitTotalSize {
|
||||
t.Errorf("Expected error code %s, got %s", utils.CodeResourceLimitTotalSize, structErr.Code)
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user