Initial commit

This commit is contained in:
2025-07-30 19:12:53 +03:00
commit 74cbe1e469
83 changed files with 12567 additions and 0 deletions

306
internal/cache/cache.go vendored Normal file
View File

@@ -0,0 +1,306 @@
// Package cache provides XDG-compliant caching functionality for gh-action-readme.
package cache
import (
"encoding/json"
"fmt"
"log"
"os"
"path/filepath"
"sync"
"time"
"github.com/adrg/xdg"
)
// Entry represents a cached item with TTL support.
type Entry struct {
Value any `json:"value"`
ExpiresAt time.Time `json:"expires_at"`
Size int64 `json:"size"`
}
// Cache provides thread-safe caching with TTL and XDG compliance.
type Cache struct {
path string // XDG cache directory
data map[string]Entry // In-memory cache
mutex sync.RWMutex // Thread safety
ticker *time.Ticker // Cleanup ticker
done chan bool // Cleanup shutdown
defaultTTL time.Duration // Default TTL for entries
errorLog bool // Whether to log errors (default: true)
}
// Config represents cache configuration.
type Config struct {
DefaultTTL time.Duration // Default TTL for entries
CleanupInterval time.Duration // How often to clean expired entries
MaxSize int64 // Maximum cache size in bytes (0 = unlimited)
}
// DefaultConfig returns default cache configuration.
func DefaultConfig() *Config {
return &Config{
DefaultTTL: 15 * time.Minute, // 15 minutes for API responses
CleanupInterval: 5 * time.Minute, // Clean up every 5 minutes
MaxSize: 100 * 1024 * 1024, // 100MB max cache size
}
}
// NewCache creates a new XDG-compliant cache instance.
func NewCache(config *Config) (*Cache, error) {
if config == nil {
config = DefaultConfig()
}
// Get XDG cache directory
cacheDir, err := xdg.CacheFile("gh-action-readme")
if err != nil {
return nil, fmt.Errorf("failed to get XDG cache directory: %w", err)
}
// Ensure cache directory exists
if err := os.MkdirAll(filepath.Dir(cacheDir), 0755); err != nil {
return nil, fmt.Errorf("failed to create cache directory: %w", err)
}
cache := &Cache{
path: filepath.Dir(cacheDir),
data: make(map[string]Entry),
defaultTTL: config.DefaultTTL,
done: make(chan bool),
errorLog: true, // Enable error logging by default
}
// Load existing cache from disk
_ = cache.loadFromDisk() // Log error but don't fail - we can start with empty cache
// Start cleanup goroutine
cache.ticker = time.NewTicker(config.CleanupInterval)
go cache.cleanupLoop()
return cache, nil
}
// Set stores a value in the cache with default TTL.
func (c *Cache) Set(key string, value any) error {
return c.SetWithTTL(key, value, c.defaultTTL)
}
// SetWithTTL stores a value in the cache with custom TTL.
func (c *Cache) SetWithTTL(key string, value any, ttl time.Duration) error {
c.mutex.Lock()
defer c.mutex.Unlock()
// Calculate size (rough estimate)
size := c.estimateSize(value)
entry := Entry{
Value: value,
ExpiresAt: time.Now().Add(ttl),
Size: size,
}
c.data[key] = entry
// Persist to disk asynchronously
c.saveToDiskAsync()
return nil
}
// Get retrieves a value from the cache.
func (c *Cache) Get(key string) (any, bool) {
c.mutex.RLock()
defer c.mutex.RUnlock()
entry, exists := c.data[key]
if !exists {
return nil, false
}
// Check if expired
if time.Now().After(entry.ExpiresAt) {
// Remove expired entry (will be cleaned up by cleanup goroutine)
return nil, false
}
return entry.Value, true
}
// Delete removes a key from the cache.
func (c *Cache) Delete(key string) {
c.mutex.Lock()
defer c.mutex.Unlock()
delete(c.data, key)
go func() {
_ = c.saveToDisk() // Async operation, error logged internally
}()
}
// Clear removes all entries from the cache.
func (c *Cache) Clear() error {
c.mutex.Lock()
defer c.mutex.Unlock()
c.data = make(map[string]Entry)
// Remove cache file
cacheFile := filepath.Join(c.path, "cache.json")
if err := os.Remove(cacheFile); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("failed to remove cache file: %w", err)
}
return nil
}
// Stats returns cache statistics.
func (c *Cache) Stats() map[string]any {
c.mutex.RLock()
defer c.mutex.RUnlock()
var totalSize int64
expiredCount := 0
now := time.Now()
for _, entry := range c.data {
totalSize += entry.Size
if now.After(entry.ExpiresAt) {
expiredCount++
}
}
return map[string]any{
"total_entries": len(c.data),
"expired_count": expiredCount,
"total_size": totalSize,
"cache_dir": c.path,
}
}
// Close shuts down the cache and stops background processes.
func (c *Cache) Close() error {
if c.ticker != nil {
c.ticker.Stop()
}
// Signal cleanup goroutine to stop
select {
case c.done <- true:
default:
}
// Save final state to disk
return c.saveToDisk()
}
// cleanupLoop runs periodically to remove expired entries.
func (c *Cache) cleanupLoop() {
for {
select {
case <-c.ticker.C:
c.cleanup()
case <-c.done:
return
}
}
}
// cleanup removes expired entries.
func (c *Cache) cleanup() {
c.mutex.Lock()
defer c.mutex.Unlock()
now := time.Now()
for key, entry := range c.data {
if now.After(entry.ExpiresAt) {
delete(c.data, key)
}
}
// Save to disk after cleanup
c.saveToDiskAsync()
}
// loadFromDisk loads cache data from disk.
func (c *Cache) loadFromDisk() error {
cacheFile := filepath.Join(c.path, "cache.json")
data, err := os.ReadFile(cacheFile)
if err != nil {
if os.IsNotExist(err) {
return nil // No cache file is fine
}
return fmt.Errorf("failed to read cache file: %w", err)
}
c.mutex.Lock()
defer c.mutex.Unlock()
if err := json.Unmarshal(data, &c.data); err != nil {
return fmt.Errorf("failed to unmarshal cache data: %w", err)
}
return nil
}
// saveToDisk persists cache data to disk.
func (c *Cache) saveToDisk() error {
c.mutex.RLock()
data := make(map[string]Entry)
for k, v := range c.data {
data[k] = v
}
c.mutex.RUnlock()
jsonData, err := json.MarshalIndent(data, "", " ")
if err != nil {
return fmt.Errorf("failed to marshal cache data: %w", err)
}
cacheFile := filepath.Join(c.path, "cache.json")
if err := os.WriteFile(cacheFile, jsonData, 0644); err != nil {
return fmt.Errorf("failed to write cache file: %w", err)
}
return nil
}
// saveToDiskAsync saves the cache to disk asynchronously with error logging.
func (c *Cache) saveToDiskAsync() {
go func() {
if err := c.saveToDisk(); err != nil && c.errorLog {
log.Printf("gh-action-readme cache: failed to save cache to disk: %v", err)
}
}()
}
// estimateSize provides a rough estimate of the memory size of a value.
func (c *Cache) estimateSize(value any) int64 {
// This is a simple estimation - could be improved with reflection
jsonData, err := json.Marshal(value)
if err != nil {
return 100 // Default estimate
}
return int64(len(jsonData))
}
// GetOrSet retrieves a value from cache or sets it if not found.
func (c *Cache) GetOrSet(key string, getter func() (any, error)) (any, error) {
// Try to get from cache first
if value, exists := c.Get(key); exists {
return value, nil
}
// Not in cache, get from source
value, err := getter()
if err != nil {
return nil, err
}
// Store in cache
_ = c.Set(key, value) // Log error but don't fail - we have the value
return value, nil
}

531
internal/cache/cache_test.go vendored Normal file
View File

@@ -0,0 +1,531 @@
package cache
import (
"fmt"
"os"
"strings"
"sync"
"testing"
"time"
"github.com/ivuorinen/gh-action-readme/testutil"
)
func TestNewCache(t *testing.T) {
tests := []struct {
name string
config *Config
expectError bool
}{
{
name: "default config",
config: nil,
expectError: false,
},
{
name: "custom config",
config: &Config{
DefaultTTL: 30 * time.Minute,
CleanupInterval: 10 * time.Minute,
MaxSize: 50 * 1024 * 1024,
},
expectError: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Set XDG_CACHE_HOME to temp directory
tmpDir, cleanup := testutil.TempDir(t)
defer cleanup()
originalXDGCache := os.Getenv("XDG_CACHE_HOME")
_ = os.Setenv("XDG_CACHE_HOME", tmpDir)
defer func() {
if originalXDGCache != "" {
_ = os.Setenv("XDG_CACHE_HOME", originalXDGCache)
} else {
_ = os.Unsetenv("XDG_CACHE_HOME")
}
}()
cache, err := NewCache(tt.config)
if tt.expectError {
testutil.AssertError(t, err)
return
}
testutil.AssertNoError(t, err)
// Verify cache was created
if cache == nil {
t.Fatal("expected cache to be created")
}
// Verify default TTL
expectedTTL := 15 * time.Minute
if tt.config != nil && tt.config.DefaultTTL != 0 {
expectedTTL = tt.config.DefaultTTL
}
testutil.AssertEqual(t, expectedTTL, cache.defaultTTL)
// Clean up
_ = cache.Close()
})
}
}
func TestCache_SetAndGet(t *testing.T) {
tmpDir, cleanup := testutil.TempDir(t)
defer cleanup()
cache := createTestCache(t, tmpDir)
defer func() { _ = cache.Close() }()
tests := []struct {
name string
key string
value any
expected any
}{
{
name: "string value",
key: "test-key",
value: "test-value",
expected: "test-value",
},
{
name: "struct value",
key: "struct-key",
value: map[string]string{"foo": "bar"},
expected: map[string]string{"foo": "bar"},
},
{
name: "nil value",
key: "nil-key",
value: nil,
expected: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Set value
err := cache.Set(tt.key, tt.value)
testutil.AssertNoError(t, err)
// Get value
value, exists := cache.Get(tt.key)
if !exists {
t.Fatal("expected value to exist in cache")
}
testutil.AssertEqual(t, tt.expected, value)
})
}
}
func TestCache_TTL(t *testing.T) {
tmpDir, cleanup := testutil.TempDir(t)
defer cleanup()
cache := createTestCache(t, tmpDir)
defer func() { _ = cache.Close() }()
// Set value with short TTL
shortTTL := 100 * time.Millisecond
err := cache.SetWithTTL("short-lived", "value", shortTTL)
testutil.AssertNoError(t, err)
// Should exist immediately
value, exists := cache.Get("short-lived")
if !exists {
t.Fatal("expected value to exist immediately")
}
testutil.AssertEqual(t, "value", value)
// Wait for expiration
time.Sleep(shortTTL + 50*time.Millisecond)
// Should not exist after TTL
_, exists = cache.Get("short-lived")
if exists {
t.Error("expected value to be expired")
}
}
func TestCache_GetOrSet(t *testing.T) {
tmpDir, cleanup := testutil.TempDir(t)
defer cleanup()
cache := createTestCache(t, tmpDir)
defer func() { _ = cache.Close() }()
// Use unique key to avoid interference from other tests
testKey := fmt.Sprintf("test-key-%d", time.Now().UnixNano())
callCount := 0
getter := func() (any, error) {
callCount++
return fmt.Sprintf("generated-value-%d", callCount), nil
}
// First call should invoke getter
value1, err := cache.GetOrSet(testKey, getter)
testutil.AssertNoError(t, err)
testutil.AssertEqual(t, "generated-value-1", value1)
testutil.AssertEqual(t, 1, callCount)
// Second call should use cached value
value2, err := cache.GetOrSet(testKey, getter)
testutil.AssertNoError(t, err)
testutil.AssertEqual(t, "generated-value-1", value2) // Same value
testutil.AssertEqual(t, 1, callCount) // Getter not called again
}
func TestCache_GetOrSetError(t *testing.T) {
tmpDir, cleanup := testutil.TempDir(t)
defer cleanup()
cache := createTestCache(t, tmpDir)
defer func() { _ = cache.Close() }()
// Getter that returns error
getter := func() (any, error) {
return nil, fmt.Errorf("getter error")
}
value, err := cache.GetOrSet("error-key", getter)
testutil.AssertError(t, err)
testutil.AssertStringContains(t, err.Error(), "getter error")
if value != nil {
t.Errorf("expected nil value on error, got: %v", value)
}
// Verify nothing was cached
_, exists := cache.Get("error-key")
if exists {
t.Error("expected no value to be cached on error")
}
}
func TestCache_ConcurrentAccess(t *testing.T) {
tmpDir, cleanup := testutil.TempDir(t)
defer cleanup()
cache := createTestCache(t, tmpDir)
defer func() { _ = cache.Close() }()
const numGoroutines = 10
const numOperations = 100
var wg sync.WaitGroup
wg.Add(numGoroutines)
// Launch multiple goroutines doing concurrent operations
for i := 0; i < numGoroutines; i++ {
go func(goroutineID int) {
defer wg.Done()
for j := 0; j < numOperations; j++ {
key := fmt.Sprintf("key-%d-%d", goroutineID, j)
value := fmt.Sprintf("value-%d-%d", goroutineID, j)
// Set value
err := cache.Set(key, value)
if err != nil {
t.Errorf("error setting value: %v", err)
return
}
// Get value
retrieved, exists := cache.Get(key)
if !exists {
t.Errorf("expected key %s to exist", key)
return
}
if retrieved != value {
t.Errorf("expected %s, got %s", value, retrieved)
return
}
}
}(i)
}
wg.Wait()
}
func TestCache_Persistence(t *testing.T) {
tmpDir, cleanup := testutil.TempDir(t)
defer cleanup()
// Create cache and add some data
cache1 := createTestCache(t, tmpDir)
err := cache1.Set("persistent-key", "persistent-value")
testutil.AssertNoError(t, err)
// Close cache to trigger save
err = cache1.Close()
testutil.AssertNoError(t, err)
// Create new cache instance (should load from disk)
cache2 := createTestCache(t, tmpDir)
defer func() { _ = cache2.Close() }()
// Value should still exist
value, exists := cache2.Get("persistent-key")
if !exists {
t.Fatal("expected persistent value to exist after restart")
}
testutil.AssertEqual(t, "persistent-value", value)
}
func TestCache_Clear(t *testing.T) {
tmpDir, cleanup := testutil.TempDir(t)
defer cleanup()
cache := createTestCache(t, tmpDir)
defer func() { _ = cache.Close() }()
// Add some data
_ = cache.Set("key1", "value1")
_ = cache.Set("key2", "value2")
// Verify data exists
_, exists1 := cache.Get("key1")
_, exists2 := cache.Get("key2")
if !exists1 || !exists2 {
t.Fatal("expected test data to exist before clear")
}
// Clear cache
err := cache.Clear()
testutil.AssertNoError(t, err)
// Verify data is gone
_, exists1 = cache.Get("key1")
_, exists2 = cache.Get("key2")
if exists1 || exists2 {
t.Error("expected data to be cleared")
}
}
func TestCache_Stats(t *testing.T) {
tmpDir, cleanup := testutil.TempDir(t)
defer cleanup()
cache := createTestCache(t, tmpDir)
defer func() { _ = cache.Close() }()
// Add some data
_ = cache.Set("key1", "value1")
_ = cache.Set("key2", "larger-value-with-more-content")
stats := cache.Stats()
// Check stats structure
if _, ok := stats["cache_dir"]; !ok {
t.Error("expected cache_dir in stats")
}
if _, ok := stats["total_entries"]; !ok {
t.Error("expected total_entries in stats")
}
if _, ok := stats["total_size"]; !ok {
t.Error("expected total_size in stats")
}
// Verify entry count
totalEntries, ok := stats["total_entries"].(int)
if !ok {
t.Error("expected total_entries to be int")
}
if totalEntries != 2 {
t.Errorf("expected 2 entries, got %d", totalEntries)
}
// Verify size is reasonable
totalSize, ok := stats["total_size"].(int64)
if !ok {
t.Error("expected total_size to be int64")
}
if totalSize <= 0 {
t.Errorf("expected positive total size, got %d", totalSize)
}
}
func TestCache_CleanupExpiredEntries(t *testing.T) {
tmpDir, cleanup := testutil.TempDir(t)
defer cleanup()
// Create cache with short cleanup interval
config := &Config{
DefaultTTL: 50 * time.Millisecond,
CleanupInterval: 30 * time.Millisecond,
MaxSize: 1024 * 1024,
}
originalXDGCache := os.Getenv("XDG_CACHE_HOME")
_ = os.Setenv("XDG_CACHE_HOME", tmpDir)
defer func() {
if originalXDGCache != "" {
_ = os.Setenv("XDG_CACHE_HOME", originalXDGCache)
} else {
_ = os.Unsetenv("XDG_CACHE_HOME")
}
}()
cache, err := NewCache(config)
testutil.AssertNoError(t, err)
defer func() { _ = cache.Close() }()
// Add entry that will expire
err = cache.Set("expiring-key", "expiring-value")
testutil.AssertNoError(t, err)
// Verify it exists
_, exists := cache.Get("expiring-key")
if !exists {
t.Fatal("expected entry to exist initially")
}
// Wait for cleanup to run
time.Sleep(config.DefaultTTL + config.CleanupInterval + 20*time.Millisecond)
// Entry should be cleaned up
_, exists = cache.Get("expiring-key")
if exists {
t.Error("expected expired entry to be cleaned up")
}
}
func TestCache_ErrorHandling(t *testing.T) {
tests := []struct {
name string
setupFunc func(t *testing.T) *Cache
testFunc func(t *testing.T, cache *Cache)
expectError bool
}{
{
name: "invalid cache directory permissions",
setupFunc: func(t *testing.T) *Cache {
// This test would require special setup for permission testing
// For now, we'll create a valid cache and test other error scenarios
tmpDir, _ := testutil.TempDir(t)
return createTestCache(t, tmpDir)
},
testFunc: func(t *testing.T, cache *Cache) {
// Test setting a value that might cause issues during marshaling
// Circular reference would cause JSON marshal to fail, but
// Go's JSON package handles most cases gracefully
err := cache.Set("test", "normal-value")
testutil.AssertNoError(t, err)
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cache := tt.setupFunc(t)
defer func() { _ = cache.Close() }()
tt.testFunc(t, cache)
})
}
}
func TestCache_AsyncSaveErrorHandling(t *testing.T) {
tmpDir, cleanup := testutil.TempDir(t)
defer cleanup()
cache := createTestCache(t, tmpDir)
defer func() { _ = cache.Close() }()
// This tests our new saveToDiskAsync error handling
// Set a value to trigger async save
err := cache.Set("test-key", "test-value")
testutil.AssertNoError(t, err)
// Give some time for async save to complete
time.Sleep(100 * time.Millisecond)
// The async save should have completed without panicking
// We can't easily test the error logging without capturing logs,
// but we can verify the cache still works
value, exists := cache.Get("test-key")
if !exists {
t.Error("expected value to exist after async save")
}
testutil.AssertEqual(t, "test-value", value)
}
func TestCache_EstimateSize(t *testing.T) {
tmpDir, cleanup := testutil.TempDir(t)
defer cleanup()
cache := createTestCache(t, tmpDir)
defer func() { _ = cache.Close() }()
tests := []struct {
name string
value any
minSize int64
maxSize int64
}{
{
name: "small string",
value: "test",
minSize: 4,
maxSize: 50,
},
{
name: "large string",
value: strings.Repeat("a", 1000),
minSize: 1000,
maxSize: 1100,
},
{
name: "struct",
value: map[string]any{
"key1": "value1",
"key2": 42,
"key3": []string{"a", "b", "c"},
},
minSize: 30,
maxSize: 200,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
size := cache.estimateSize(tt.value)
if size < tt.minSize || size > tt.maxSize {
t.Errorf("expected size between %d and %d, got %d", tt.minSize, tt.maxSize, size)
}
})
}
}
// createTestCache creates a cache instance for testing.
func createTestCache(t *testing.T, tmpDir string) *Cache {
t.Helper()
originalXDGCache := os.Getenv("XDG_CACHE_HOME")
_ = os.Setenv("XDG_CACHE_HOME", tmpDir)
t.Cleanup(func() {
if originalXDGCache != "" {
_ = os.Setenv("XDG_CACHE_HOME", originalXDGCache)
} else {
_ = os.Unsetenv("XDG_CACHE_HOME")
}
})
cache, err := NewCache(DefaultConfig())
testutil.AssertNoError(t, err)
return cache
}