feat: refactor plugin architecture, enhance linting, CI & tooling

- Major refactor of core Fish functions for modularity, caching, and error handling
- Improved `.editorconfig` and Makefile for stricter formatting and linting standards
- Expanded linting support: added EditorConfig checks, auto-install for missing tools, and Makefile targets
- Enhanced CI workflow with clearer permissions and job steps in GitHub Actions
- Updated documentation in `README.md` and `CLAUDE.md` to reflect new features, advanced developer tools, and contribution guidelines
- Improved Node.js version manager detection, switching, and installation logic
- Added/updated utility functions for configuration, silent mode, notifications, and version extraction
- Various bug fixes, code quality improvements, and expanded test coverage
This commit is contained in:
2025-07-16 15:12:14 +03:00
parent 8c9febaf8f
commit 5b680f06ac
30 changed files with 3106 additions and 248 deletions

View File

@@ -0,0 +1,178 @@
#!/usr/bin/env fish
# Integration tests for version switching functionality
# All tests operate under $TEST_DIR (a temporary directory) for safety
source tests/test_runner.fish
function test_nvmrc_detection
echo "Testing .nvmrc file detection..."
# Create test project with .nvmrc in temp dir
mkdir -p "$TEST_DIR/test_project"
echo "18.17.0" >"$TEST_DIR/test_project/.nvmrc"
cd "$TEST_DIR/test_project"
set -l found_file (nvm_find_nvmrc)
assert_contains "$found_file" ".nvmrc" "Found .nvmrc file in current directory"
# Test parent directory search
mkdir -p subdir
cd subdir
set found_file (nvm_find_nvmrc)
assert_contains "$found_file" ".nvmrc" "Found .nvmrc file in parent directory"
cd "$TEST_DIR"
rm -rf "$TEST_DIR/test_project"
return 0
end
function test_version_extraction
echo "Testing version extraction from different file formats..."
cd "$TEST_DIR"
# Test .nvmrc
echo "18.17.0" >test.nvmrc
set -l version (nvm_extract_version "test.nvmrc")
assert_equals "$version" "18.17.0" "Extracted version from .nvmrc"
# Test .node-version
echo "16.20.0" >test.node-version
set version (nvm_extract_version "test.node-version")
assert_equals "$version" "16.20.0" "Extracted version from .node-version"
# Test .tool-versions
echo "nodejs 20.5.0" >test.tool-versions
set version (nvm_extract_version "test.tool-versions:nodejs")
assert_equals "$version" "20.5.0" "Extracted version from .tool-versions"
# Test package.json (requires jq)
if command -q jq
echo '{"engines": {"node": ">=18.0.0"}}' >test.package.json
set version (nvm_extract_version "test.package.json:engines.node")
assert_equals "$version" "18.0.0" "Extracted version from package.json"
else
echo " Skipping package.json test (jq not available)"
end
# Cleanup
rm -f test.nvmrc test.node-version test.tool-versions test.package.json
return 0
end
function test_manager_detection
echo "Testing version manager detection..."
cd "$TEST_DIR"
set -l managers (nvm_compat_detect)
if test -n "$managers"
echo "✅ Found version managers: $managers"
else
echo " No version managers found (expected in test environment)"
end
return 0
end
function test_error_recovery
echo "Testing error recovery mechanisms..."
cd "$TEST_DIR"
# Test invalid version handling
echo "invalid.version" >invalid.nvmrc
set -l result (nvm_extract_version "invalid.nvmrc" 2>/dev/null)
if test -z "$result"
echo "✅ Invalid version file handled gracefully"
else
echo "❌ Invalid version should return empty result"
end
# Test missing file handling
nvm_extract_version "nonexistent.nvmrc" >/dev/null 2>&1
set -l status_code $status
test $status_code -ne 0
and echo "✅ Missing file handled gracefully"
or echo "❌ Missing file should return error"
rm -f invalid.nvmrc
return 0
end
function test_async_operations
echo "Testing async operations..."
cd "$TEST_DIR"
# Create test version file
echo "18.17.0" >async_test.nvmrc
# Test async version check
set -l job_id (nvm_async version_check "async_test.nvmrc")
if test -n "$job_id"
echo "✅ Async version check started"
# Wait for completion
nvm_async wait "$job_id" 5
and echo "✅ Async operation completed"
or echo "⚠️ Async operation timed out"
else
echo " Async operation may have completed immediately"
end
rm -f async_test.nvmrc
return 0
end
function test_cache_integration
echo "Testing cache integration..."
cd "$TEST_DIR"
# Clear cache first
nvm_cache clear
# Create test file
echo "18.17.0" >cache_test.nvmrc
# First access should miss cache
set -l start_time (date +%s)
set -l version1 (nvm_extract_version "cache_test.nvmrc")
# Second access should hit cache (if caching is implemented)
set -l version2 (nvm_extract_version "cache_test.nvmrc")
assert_equals "$version1" "$version2" "Consistent results from cache"
rm -f cache_test.nvmrc
return 0
end
function main
setup_test_env
set -l failed 0
test_nvmrc_detection; or set failed (math "$failed + 1")
test_version_extraction; or set failed (math "$failed + 1")
test_manager_detection; or set failed (math "$failed + 1")
test_error_recovery; or set failed (math "$failed + 1")
test_async_operations; or set failed (math "$failed + 1")
test_cache_integration; or set failed (math "$failed + 1")
cleanup_test_env
if test $failed -eq 0
echo "🎉 All integration tests passed!"
return 0
else
echo "💥 $failed integration test(s) failed"
return 1
end
end
main

189
tests/test_runner.fish Executable file
View File

@@ -0,0 +1,189 @@
#!/usr/bin/env fish
# Test runner for nvm-auto-use.fish
function run_tests -d "Run all tests"
set -l test_files
set -l failed_tests 0
set -l total_tests 0
echo "🧪 Running nvm-auto-use.fish test suite"
echo "======================================"
# Find all test files
for test_file in tests/unit/*.fish tests/integration/*.fish
if test -f "$test_file"
set test_files $test_files "$test_file"
end
end
if test (count $test_files) -eq 0
echo "❌ No test files found"
return 1
end
# Run each test file
for test_file in $test_files
echo
echo "📁 Running $(basename $test_file)"
echo "$(string repeat -N (string length "📁 Running $(basename $test_file)") -)"
set -l test_result (fish "$test_file")
set -l test_status $status
if test $test_status -eq 0
echo "$(basename $test_file) passed"
else
echo "$(basename $test_file) failed"
set failed_tests (math "$failed_tests + 1")
end
set total_tests (math "$total_tests + 1")
end
# Summary
echo
echo "📊 Test Results"
echo "==============="
echo "Total tests: $total_tests"
echo "Passed: "(math "$total_tests - $failed_tests")
echo "Failed: $failed_tests"
if test $failed_tests -eq 0
echo
echo "🎉 All tests passed!"
return 0
else
echo
echo "💥 $failed_tests test(s) failed"
return 1
end
end
function assert_equals -d "Assert two values are equal"
set -l actual "$argv[1]"
set -l expected "$argv[2]"
set -l message "$argv[3]"
if test "$actual" = "$expected"
echo "$message"
return 0
else
echo "$message"
echo " Expected: '$expected'"
echo " Actual: '$actual'"
return 1
end
end
function assert_not_equals -d "Assert two values are not equal"
set -l actual "$argv[1]"
set -l expected "$argv[2]"
set -l message "$argv[3]"
if test "$actual" != "$expected"
echo "$message"
return 0
else
echo "$message"
echo " Values should not be equal: '$actual'"
return 1
end
end
function assert_contains -d "Assert string contains substring"
set -l string "$argv[1]"
set -l substring "$argv[2]"
set -l message "$argv[3]"
if string match -q "*$substring*" "$string"
echo "$message"
return 0
else
echo "$message"
echo " String: '$string'"
echo " Should contain: '$substring'"
return 1
end
end
function assert_file_exists -d "Assert file exists"
set -l file_path "$argv[1]"
set -l message "$argv[2]"
if test -f "$file_path"
echo "$message"
return 0
else
echo "$message"
echo " File not found: '$file_path'"
return 1
end
end
function assert_command_success -d "Assert command succeeds"
set -l command "$argv[1]"
set -l message "$argv[2]"
if eval "$command" >/dev/null 2>&1
echo "$message"
return 0
else
echo "$message"
echo " Command failed: '$command'"
return 1
end
end
function setup_test_env -d "Set up test environment"
# Create temporary test directory
set -g TEST_DIR (mktemp -d)
cd "$TEST_DIR"
set -g TEST_FIXTURES "$PWD/../tests/fixtures"
# Link or copy test fixtures from tests/fixtures
if test -d "$TEST_FIXTURES"
cp -R "$TEST_FIXTURES" "$TEST_DIR/fixtures"
else
mkdir -p "$TEST_FIXTURES"
echo "18.17.0" >"$TEST_FIXTURES/.nvmrc"
echo "16.20.0" >"$TEST_FIXTURES/.node-version"
echo "nodejs 20.5.0" >"$TEST_FIXTURES/.tool-versions"
echo '{"engines": {"node": ">=18.0.0"}}' >"$TEST_FIXTURES/package.json"
end
echo "🔧 Test environment set up in $TEST_DIR"
end
function cleanup_test_env -d "Clean up test environment"
if set -q TEST_DIR
# Safety checks: never delete /, $HOME, or empty path
if test -z "$TEST_DIR"
echo "⚠️ TEST_DIR is empty, refusing to delete"
return 1
end
if test "$TEST_DIR" = /
echo "⚠️ TEST_DIR is /, refusing to delete"
return 1
end
if test "$TEST_DIR" = "$HOME"
echo "⚠️ TEST_DIR is $HOME, refusing to delete"
return 1
end
if string match -q "$HOME*" "$TEST_DIR"; and test "$TEST_DIR" = "$HOME"
echo "⚠️ TEST_DIR is $HOME or a parent, refusing to delete"
return 1
end
if test (string length "$TEST_DIR") -lt 8
echo "⚠️ TEST_DIR path too short, refusing to delete: $TEST_DIR"
return 1
end
rm -rf "$TEST_DIR"
echo "🧹 Test environment cleaned up"
end
end
# Run tests if this script is executed directly
if test (basename (status current-filename)) = "test_runner.fish"
run_tests
end

View File

@@ -0,0 +1,109 @@
#!/usr/bin/env fish
# Unit tests for nvm_async helper functions
source tests/test_runner.fish
function test_async_version_check
echo "Testing _nvm_async_version_check..."
# Create a test version file
echo "18.17.0" >async_test.nvmrc
# Should return job id (background job)
set -l job_id (_nvm_async_version_check "async_test.nvmrc")
if test -n "$job_id"
echo "✅ _nvm_async_version_check started job $job_id"
else
echo "❌ _nvm_async_version_check did not start a job"
return 1
end
# Wait for job completion
_nvm_async_wait "$job_id" 5
and echo "✅ Async job completed"
or echo "⚠️ Async job timed out"
rm -f async_test.nvmrc
return 0
end
function test_async_manager_check
echo "Testing _nvm_async_manager_check..."
# Should return job id (background job)
set -l job_id (_nvm_async_manager_check "nvm")
if test -n "$job_id"
echo "✅ _nvm_async_manager_check started job $job_id"
else
echo "❌ _nvm_async_manager_check did not start a job"
return 1
end
# Wait for job completion
_nvm_async_wait "$job_id" 5
and echo "✅ Async manager check job completed"
or echo "⚠️ Async manager check job timed out"
return 0
end
function test_async_cleanup
echo "Testing _nvm_async_cleanup..."
# Start a dummy background job
sleep 2 &
set -l job_id (jobs -l | tail -n 1 | grep -o '[0-9]*')
if test -n "$job_id"
echo "✅ Dummy job started: $job_id"
else
echo "❌ Failed to start dummy job"
return 1
end
# Cleanup should not error
_nvm_async_cleanup
echo "✅ _nvm_async_cleanup executed"
return 0
end
function test_async_wait
echo "Testing _nvm_async_wait..."
# Start a quick background job
sleep 1 &
set -l job_id (jobs -l | tail -n 1 | grep -o '[0-9]*')
if test -n "$job_id"
_nvm_async_wait "$job_id" 3
and echo "✅ _nvm_async_wait completed for job $job_id"
or echo "⚠️ _nvm_async_wait timed out for job $job_id"
else
echo "❌ Failed to start background job for wait test"
return 1
end
return 0
end
function main
setup_test_env
set -l failed 0
test_async_version_check; or set failed (math "$failed + 1")
test_async_manager_check; or set failed (math "$failed + 1")
test_async_cleanup; or set failed (math "$failed + 1")
test_async_wait; or set failed (math "$failed + 1")
cleanup_test_env
if test $failed -eq 0
echo "🎉 All async helper tests passed!"
return 0
else
echo "💥 $failed async helper test(s) failed"
return 1
end
end
main

View File

@@ -0,0 +1,109 @@
#!/usr/bin/env fish
# Unit tests for nvm_auto_use_config helper functions
source tests/test_runner.fish
function test_config_show
echo "Testing _nvm_auto_use_config_show..."
# Should print config summary (no error)
_nvm_auto_use_config_show
and echo "✅ Config show prints summary"
or echo "❌ Config show failed"
end
function test_config_auto_install
echo "Testing _nvm_auto_use_config_auto_install..."
_nvm_auto_use_config_auto_install on
test -z "$_nvm_auto_use_no_install"
and echo "✅ Auto-install enabled"
or echo "❌ Auto-install enable failed"
_nvm_auto_use_config_auto_install off
test -n "$_nvm_auto_use_no_install"
and echo "✅ Auto-install disabled"
or echo "❌ Auto-install disable failed"
end
function test_config_silent
echo "Testing _nvm_auto_use_config_silent..."
_nvm_auto_use_config_silent on
test -n "$_nvm_auto_use_silent"
and echo "✅ Silent mode enabled"
or echo "❌ Silent mode enable failed"
_nvm_auto_use_config_silent off
test -z "$_nvm_auto_use_silent"
and echo "✅ Silent mode disabled"
or echo "❌ Silent mode disable failed"
end
function test_config_debounce
echo "Testing _nvm_auto_use_config_debounce..."
_nvm_auto_use_config_debounce 1234
assert_equals "$_nvm_auto_use_debounce_ms" 1234 "Debounce set correctly"
_nvm_auto_use_config_debounce ""
assert_equals "$_nvm_auto_use_debounce_ms" 1234 "Debounce unchanged on invalid input"
end
function test_config_exclude_include
echo "Testing _nvm_auto_use_config_exclude and _nvm_auto_use_config_include..."
set -e _nvm_auto_use_excluded_dirs
_nvm_auto_use_config_exclude testdir
assert_contains "$_nvm_auto_use_excluded_dirs" testdir "Exclude added"
_nvm_auto_use_config_include testdir
assert_not_equals "$_nvm_auto_use_excluded_dirs" testdir "Exclude removed"
end
function test_config_manager
echo "Testing _nvm_auto_use_config_manager..."
_nvm_auto_use_config_manager nvm
assert_equals "$_nvm_auto_use_preferred_manager" nvm "Manager set to nvm"
_nvm_auto_use_config_manager ""
test -z "$_nvm_auto_use_preferred_manager"
and echo "✅ Manager reset to auto-detect"
or echo "❌ Manager reset failed"
_nvm_auto_use_config_manager invalid
assert_not_equals "$_nvm_auto_use_preferred_manager" invalid "Invalid manager not set"
end
function test_config_reset
echo "Testing _nvm_auto_use_config_reset..."
set -g _nvm_auto_use_no_install 1
set -g _nvm_auto_use_silent 1
set -g _nvm_auto_use_debounce_ms 999
set -g _nvm_auto_use_excluded_dirs foo
set -g _nvm_auto_use_preferred_manager nvm
_nvm_auto_use_config_reset
test -z "$_nvm_auto_use_no_install"
and test -z "$_nvm_auto_use_silent"
and test -z "$_nvm_auto_use_debounce_ms"
and test -z "$_nvm_auto_use_excluded_dirs"
and test -z "$_nvm_auto_use_preferred_manager"
and echo "✅ Config reset works"
or echo "❌ Config reset failed"
end
function main
test_config_show
test_config_auto_install
test_config_silent
test_config_debounce
test_config_exclude_include
test_config_manager
test_config_reset
end
main

View File

@@ -0,0 +1,134 @@
#!/usr/bin/env fish
# Unit tests for nvm_auto_use helper functions
source tests/test_runner.fish
function test_select_manager
echo "Testing _nvm_auto_use_select_manager..."
# Mock nvm_compat_detect to return a list
function nvm_compat_detect
echo "nvm fnm volta"
end
set -e _nvm_auto_use_preferred_manager
set -l manager (_nvm_auto_use_select_manager)
assert_equals "$manager" nvm "Default manager selection returns first available"
set -g _nvm_auto_use_preferred_manager volta
set manager (_nvm_auto_use_select_manager)
assert_equals "$manager" volta "Preferred manager selection works"
set -e _nvm_auto_use_preferred_manager
functions -e nvm_compat_detect
end
function test_should_debounce
echo "Testing _nvm_auto_use_should_debounce..."
set -e _nvm_auto_use_last_change
set -g _nvm_auto_use_debounce_ms 1000
# First call should set last_change and return 1 (not debounced)
set result (_nvm_auto_use_should_debounce)
assert_equals "$result" "" "First call not debounced"
# Second call within debounce period should return 0 (debounced)
set result (_nvm_auto_use_should_debounce)
assert_equals "$result" "" "Second call debounced"
set -e _nvm_auto_use_last_change
set -e _nvm_auto_use_debounce_ms
end
function test_is_excluded_dir
echo "Testing _nvm_auto_use_is_excluded_dir..."
set -g _nvm_auto_use_excluded_dirs testdir
set -l orig_pwd (pwd)
cd /
mkdir -p testdir
cd testdir
set result (_nvm_auto_use_is_excluded_dir)
assert_equals "$result" "" "Excluded directory detected"
cd "$orig_pwd"
set -e _nvm_auto_use_excluded_dirs
end
function test_get_mtime
echo "Testing _nvm_auto_use_get_mtime..."
echo test >testfile
set mtime (_nvm_auto_use_get_mtime "testfile")
test -n "$mtime"
and echo "✅ mtime returned: $mtime"
or echo "❌ mtime not returned"
rm -f testfile
end
function test_is_cache_valid
echo "Testing _nvm_auto_use_is_cache_valid..."
set -g _nvm_auto_use_cached_file foo
set -g _nvm_auto_use_cached_mtime 123
set result (_nvm_auto_use_is_cache_valid "foo" "123")
assert_equals "$result" "" "Cache valid returns 0"
set result (_nvm_auto_use_is_cache_valid "bar" "123")
assert_equals "$result" "" "Cache invalid returns 1"
set -e _nvm_auto_use_cached_file
set -e _nvm_auto_use_cached_mtime
end
function test_clear_cache
echo "Testing _nvm_auto_use_clear_cache..."
set -g _nvm_auto_use_cached_file foo
set -g _nvm_auto_use_cached_version bar
set -g _nvm_auto_use_cached_mtime baz
_nvm_auto_use_clear_cache
if not set -q _nvm_auto_use_cached_file
echo "✅ Cached file cleared"
else
echo "❌ Cached file not cleared"
end
if not set -q _nvm_auto_use_cached_version
echo "✅ Cached version cleared"
else
echo "❌ Cached version not cleared"
end
if not set -q _nvm_auto_use_cached_mtime
echo "✅ Cached mtime cleared"
else
echo "❌ Cached mtime not cleared"
end
end
function main
setup_test_env
set -l failed 0
test_select_manager; or set failed (math "$failed + 1")
test_should_debounce; or set failed (math "$failed + 1")
test_is_excluded_dir; or set failed (math "$failed + 1")
test_get_mtime; or set failed (math "$failed + 1")
test_is_cache_valid; or set failed (math "$failed + 1")
test_clear_cache; or set failed (math "$failed + 1")
cleanup_test_env
if test $failed -eq 0
echo "🎉 All nvm_auto_use helper tests passed!"
return 0
else
echo "💥 $failed helper test(s) failed"
return 1
end
end
main

98
tests/unit/test_cache.fish Executable file
View File

@@ -0,0 +1,98 @@
#!/usr/bin/env fish
# Unit tests for nvm_cache.fish
source tests/test_runner.fish
function test_cache_basic_operations
echo "Testing basic cache operations..."
# Test set and get
nvm_cache set test_key test_value
set -l result (nvm_cache get "test_key")
assert_equals "$result" test_value "Cache set and get works"
# Test delete
nvm_cache delete test_key
nvm_cache get test_key
set -l status_code $status
test $status_code -ne 0
and echo "✅ Cache delete works"
or echo "❌ Cache delete failed"
return 0
end
function test_cache_ttl
echo "Testing cache TTL..."
# Set with short TTL
nvm_cache set ttl_key ttl_value
# Should exist immediately
set -l result (nvm_cache get "ttl_key" 10)
assert_equals "$result" ttl_value "Cache value exists within TTL"
# Mock expired cache by setting TTL to 0
set -l result (nvm_cache get "ttl_key" 0)
set -l status_code $status
test $status_code -ne 0
and echo "✅ Cache TTL expiration works"
or echo "❌ Cache TTL expiration failed"
return 0
end
function test_cache_stats
echo "Testing cache stats..."
# Clear cache first
nvm_cache clear
# Add some items
nvm_cache set stats_key1 value1
nvm_cache set stats_key2 value2
# Get stats
set -l stats (nvm_cache stats)
assert_contains "$stats" "Cache files: 2" "Cache stats shows correct file count"
return 0
end
function test_cache_key_generation
echo "Testing cache key generation..."
# Test directory-based key
set -l key1 (_nvm_cache_key "test_file.txt")
set -l key2 (_nvm_cache_key "test_file.txt")
assert_equals "$key1" "$key2" "Same file generates same cache key"
# Test different files generate different keys
set -l key3 (_nvm_cache_key "different_file.txt")
assert_not_equals "$key1" "$key3" "Different files generate different cache keys"
return 0
end
function main
setup_test_env
set -l failed 0
test_cache_basic_operations; or set failed (math "$failed + 1")
test_cache_ttl; or set failed (math "$failed + 1")
test_cache_stats; or set failed (math "$failed + 1")
test_cache_key_generation; or set failed (math "$failed + 1")
cleanup_test_env
if test $failed -eq 0
echo "🎉 All cache tests passed!"
return 0
else
echo "💥 $failed cache test(s) failed"
return 1
end
end
main

View File

@@ -0,0 +1,99 @@
#!/usr/bin/env fish
# Unit tests for nvm_cache helper functions
source tests/test_runner.fish
function test_nvm_cache_get_set_delete
echo "Testing _nvm_cache_set, _nvm_cache_get, and _nvm_cache_delete..."
set -l key test_key
set -l value test_value
# Set cache value
_nvm_cache_set $key $value
set -l result (_nvm_cache_get $key 300)
assert_equals "$result" "$value" "Cache set and get returns correct value"
# Delete cache value
_nvm_cache_delete $key
set -l result (_nvm_cache_get $key 300)
set -l status_code $status
test $status_code -ne 0
and echo "✅ Cache delete works"
or echo "❌ Cache delete failed"
return 0
end
function test_nvm_cache_clear_and_stats
echo "Testing _nvm_cache_clear and _nvm_cache_stats..."
# Set multiple cache values
_nvm_cache_set key1 value1
_nvm_cache_set key2 value2
# Stats should show at least 2 files
set -l stats (_nvm_cache_stats)
assert_contains "$stats" "Cache files:" "Cache stats reports file count"
# Clear cache
_nvm_cache_clear
set -l stats_after (_nvm_cache_stats)
assert_contains "$stats_after" "Cache files: 0" "Cache clear removes all files"
return 0
end
function test_nvm_cache_ttl
echo "Testing _nvm_cache_get TTL expiration..."
set -l key ttl_key
set -l value ttl_value
_nvm_cache_set $key $value
# Should exist immediately
set -l result (_nvm_cache_get $key 10)
assert_equals "$result" "$value" "Cache value exists within TTL"
# Simulate expired cache by setting TTL to 0
set -l result (_nvm_cache_get $key 0)
set -l status_code $status
test $status_code -ne 0
and echo "✅ Cache TTL expiration works"
or echo "❌ Cache TTL expiration failed"
_nvm_cache_delete $key
return 0
end
function test_nvm_cache_dir
echo "Testing _nvm_cache_dir returns a valid directory..."
set -l dir (_nvm_cache_dir)
test -n "$dir"
and echo "✅ _nvm_cache_dir returns: $dir"
or echo "❌ _nvm_cache_dir did not return a directory"
return 0
end
function main
set -l failed 0
test_nvm_cache_get_set_delete; or set failed (math "$failed + 1")
test_nvm_cache_clear_and_stats; or set failed (math "$failed + 1")
test_nvm_cache_ttl; or set failed (math "$failed + 1")
test_nvm_cache_dir; or set failed (math "$failed + 1")
if test $failed -eq 0
echo "🎉 All nvm_cache helper tests passed!"
return 0
else
echo "💥 $failed nvm_cache helper test(s) failed"
return 1
end
end
main

154
tests/unit/test_security.fish Executable file
View File

@@ -0,0 +1,154 @@
#!/usr/bin/env fish
# Unit tests for nvm_security.fish
source tests/test_runner.fish
function test_version_validation
echo "Testing version validation..."
# Valid versions
nvm_security check_version "18.17.0"
and echo "✅ Valid semver accepted"
or echo "❌ Valid semver rejected"
nvm_security check_version "v20.5.1"
and echo "✅ Version with 'v' prefix accepted"
or echo "❌ Version with 'v' prefix rejected"
# Invalid versions
nvm_security check_version "invalid.version"
set -l status_code $status
test $status_code -ne 0
and echo "✅ Invalid version rejected"
or echo "❌ Invalid version accepted"
# Suspicious characters
nvm_security check_version "18.0.0; touch /tmp/nvm-auto-use-malicious-test"
set status_code $status
test $status_code -ne 0
and echo "✅ Malicious version string rejected"
or echo "❌ Malicious version string accepted"
return 0
end
function test_security_policies
echo "Testing security policies..."
# Set minimum version policy
nvm_security policy set min_version "16.0.0"
set -l min_version (nvm_security policy get min_version)
assert_equals "$min_version" "16.0.0" "Minimum version policy set correctly"
# Test version below minimum
nvm_security check_version "14.0.0"
set -l status_code $status
test $status_code -ne 0
and echo "✅ Version below minimum rejected"
or echo "❌ Version below minimum accepted"
# Set maximum version policy
nvm_security policy set max_version "20.0.0"
set -l max_version (nvm_security policy get max_version)
assert_equals "$max_version" "20.0.0" "Maximum version policy set correctly"
# Test version above maximum
nvm_security check_version "21.0.0"
set status_code $status
test $status_code -ne 0
and echo "✅ Version above maximum rejected"
or echo "❌ Version above maximum accepted"
# Reset policies
nvm_security policy reset
return 0
end
function test_version_comparison
echo "Testing version comparison..."
# Test less than
_nvm_security_version_compare "16.0.0" "18.0.0" -lt
and echo "✅ Version comparison (less than) works"
or echo "❌ Version comparison (less than) failed"
# Test greater than
_nvm_security_version_compare "20.0.0" "18.0.0" -gt
and echo "✅ Version comparison (greater than) works"
or echo "❌ Version comparison (greater than) failed"
# Test equal
_nvm_security_version_compare "18.17.0" "18.17.0" -eq
and echo "✅ Version comparison (equal) works"
or echo "❌ Version comparison (equal) failed"
return 0
end
function test_source_validation
echo "Testing source file validation..."
# Create test files
echo "18.17.0" >test_nvmrc
echo "18.0.0; touch /tmp/nvm-auto-use-malicious-test" >malicious_nvmrc
# Test valid source
nvm_security validate_source test_nvmrc
and echo "✅ Valid source file accepted"
or echo "❌ Valid source file rejected"
# Test malicious source
nvm_security validate_source malicious_nvmrc
set -l status_code $status
test $status_code -ne 0
and echo "✅ Malicious source file rejected"
or echo "❌ Malicious source file accepted"
# Cleanup
rm -f test_nvmrc malicious_nvmrc
return 0
end
function test_vulnerability_check
echo "Testing vulnerability checking..."
# Test known vulnerable version (if any in our test data)
nvm_security check_cve "16.0.0"
set -l status_code $status
test $status_code -ne 0
and echo "✅ Known vulnerable version flagged"
or echo " No vulnerability data for test version"
# Test presumably safe version
nvm_security check_cve "18.17.0"
and echo "✅ Safe version check completed"
or echo " Vulnerability check completed with warnings"
return 0
end
function main
setup_test_env
set -l failed 0
test_version_validation; or set failed (math "$failed + 1")
test_security_policies; or set failed (math "$failed + 1")
test_version_comparison; or set failed (math "$failed + 1")
test_source_validation; or set failed (math "$failed + 1")
test_vulnerability_check; or set failed (math "$failed + 1")
cleanup_test_env
if test $failed -eq 0
echo "🎉 All security tests passed!"
return 0
else
echo "💥 $failed security test(s) failed"
return 1
end
end
main