Files
gibidify/testutil/verification_test.go
copilot-swe-agent[bot] e9bd694685 fix(security): prevent integer overflow in uint64 to int64 conversions
Add overflow checks before converting uint64 memory values to int64
to prevent potential integer overflow issues identified by gosec (G115).

- Add math.MaxInt64 checks in fileproc/backpressure.go
- Add math.MaxInt64 checks in fileproc/resource_monitor_validation.go
- Add math.MaxInt64 checks in fileproc/resource_monitor_metrics.go
- Add math.MaxInt64 check in benchmark/benchmark.go with nosec annotation

Co-authored-by: ivuorinen <11024+ivuorinen@users.noreply.github.com>
2025-10-04 23:17:02 +00:00

108 lines
3.2 KiB
Go

package testutil
import (
"errors"
"os"
"testing"
)
func TestVerifyContentContains(t *testing.T) {
// Test successful verification
t.Run("all substrings present", func(t *testing.T) {
content := "This is a test file with multiple lines"
VerifyContentContains(t, content, []string{"test file", "multiple lines"})
// If we get here, the test passed
})
// Test empty expected substrings
t.Run("empty expected substrings", func(t *testing.T) {
content := "Any content"
VerifyContentContains(t, content, []string{})
// Should pass with no expected strings
})
// For failure cases, we'll test indirectly by verifying behavior
t.Run("verify error reporting", func(t *testing.T) {
// We can't easily test the failure case directly since it calls t.Errorf
// But we can at least verify the function doesn't panic
defer func() {
if r := recover(); r != nil {
t.Errorf("VerifyContentContains panicked: %v", r)
}
}()
// This would normally fail but we're just checking it doesn't panic
content := "test"
expected := []string{"not found"}
// Create a sub-test that we expect to fail
t.Run("expected_failure", func(t *testing.T) {
t.Skip("Skipping actual failure test")
VerifyContentContains(t, content, expected)
})
})
}
func TestMustSucceed(t *testing.T) {
// Test with nil error (should succeed)
t.Run("nil error", func(t *testing.T) {
MustSucceed(t, nil, "successful operation")
// If we get here, the test passed
})
// Test error behavior without causing test failure
t.Run("verify error handling", func(t *testing.T) {
// We can't test the failure case directly since it calls t.Fatalf
// But we can verify the function exists and is callable
defer func() {
if r := recover(); r != nil {
t.Errorf("MustSucceed panicked: %v", r)
}
}()
// Create a sub-test that we expect to fail
t.Run("expected_failure", func(t *testing.T) {
t.Skip("Skipping actual failure test")
MustSucceed(t, errors.New("test error"), "failed operation")
})
})
}
func TestCloseFile(t *testing.T) {
// Test closing a normal file
t.Run("close normal file", func(t *testing.T) {
file, err := os.CreateTemp(t.TempDir(), "test")
if err != nil {
t.Fatalf("Failed to create test file: %v", err)
}
CloseFile(t, file)
// Verify file is closed by trying to write to it
_, writeErr := file.Write([]byte("test"))
if writeErr == nil {
t.Error("Expected write to fail on closed file")
}
})
// Test that CloseFile doesn't panic on already closed files
// Note: We can't easily test the error case without causing test failure
// since CloseFile calls t.Errorf, which is the expected behavior
t.Run("verify CloseFile function exists and is callable", func(t *testing.T) {
// This test just verifies the function signature and basic functionality
// The error case is tested in integration tests where failures are expected
file, err := os.CreateTemp(t.TempDir(), "test")
if err != nil {
t.Fatalf("Failed to create test file: %v", err)
}
// Test normal case - file should close successfully
CloseFile(t, file)
// Verify file is closed
_, writeErr := file.Write([]byte("test"))
if writeErr == nil {
t.Error("Expected write to fail on closed file")
}
})
}