mirror of
https://github.com/ivuorinen/actions.git
synced 2026-01-26 03:23:59 +00:00
chore: add tests, update docs and actions (#299)
* docs: update documentation * feat: validate-inputs has it's own pyproject * security: mask DOCKERHUB_PASSWORD * chore: add tokens, checkout, recrete docs, integration tests * fix: add `statuses: write` permission to pr-lint
This commit is contained in:
550
_tests/README.md
550
_tests/README.md
@@ -1,6 +1,6 @@
|
||||
# GitHub Actions Testing Framework
|
||||
|
||||
A comprehensive testing framework for validating GitHub Actions in this monorepo. This guide covers everything from basic usage to advanced testing patterns.
|
||||
A comprehensive testing framework for validating GitHub Actions in this monorepo using ShellSpec and Python-based input validation.
|
||||
|
||||
## 🚀 Quick Start
|
||||
|
||||
@@ -36,16 +36,15 @@ brew install act # macOS
|
||||
|
||||
The testing framework uses a **multi-level testing strategy**:
|
||||
|
||||
1. **Unit Tests** - Fast validation of action logic, inputs, and outputs
|
||||
1. **Unit Tests** - Fast validation of action logic, inputs, and outputs using Python validation
|
||||
2. **Integration Tests** - Test actions in realistic workflow environments
|
||||
3. **External Usage Tests** - Validate actions work as `ivuorinen/actions/action-name@main`
|
||||
|
||||
### Technology Stack
|
||||
|
||||
- **Primary Framework**: [ShellSpec](https://shellspec.info/) - BDD testing for shell scripts
|
||||
- **Validation**: Python-based input validation via `validate-inputs/validator.py`
|
||||
- **Local Execution**: [nektos/act](https://github.com/nektos/act) - Run GitHub Actions locally
|
||||
- **Coverage**: kcov integration for shell script coverage
|
||||
- **Mocking**: Custom GitHub API and service mocks
|
||||
- **CI Integration**: GitHub Actions workflows
|
||||
|
||||
### Directory Structure
|
||||
@@ -54,19 +53,20 @@ The testing framework uses a **multi-level testing strategy**:
|
||||
_tests/
|
||||
├── README.md # This documentation
|
||||
├── run-tests.sh # Main test runner script
|
||||
├── framework/ # Core testing utilities
|
||||
│ ├── setup.sh # Test environment setup
|
||||
│ ├── utils.sh # Common testing functions
|
||||
│ ├── validation_helpers.sh # Validation helper functions
|
||||
│ ├── validation.py # Python validation utilities
|
||||
│ └── mocks/ # Mock services (GitHub API, etc.)
|
||||
├── unit/ # Unit tests by action
|
||||
│ ├── spec_helper.sh # ShellSpec helper with validation functions
|
||||
│ ├── version-file-parser/ # Example unit tests
|
||||
│ ├── node-setup/ # Example unit tests
|
||||
│ └── ... # One directory per action
|
||||
├── framework/ # Core testing utilities
|
||||
│ ├── setup.sh # Test environment setup
|
||||
│ ├── utils.sh # Common testing functions
|
||||
│ ├── validation.py # Python validation utilities
|
||||
│ └── fixtures/ # Test fixtures
|
||||
├── integration/ # Integration tests
|
||||
│ ├── workflows/ # Test workflows for nektos/act
|
||||
│ └── external-usage/ # External reference tests
|
||||
│ ├── external-usage/ # External reference tests
|
||||
│ └── action-chains/ # Multi-action workflow tests
|
||||
├── coverage/ # Coverage reports
|
||||
└── reports/ # Test execution reports
|
||||
```
|
||||
@@ -79,44 +79,39 @@ _tests/
|
||||
#!/usr/bin/env shellspec
|
||||
# _tests/unit/my-action/validation.spec.sh
|
||||
|
||||
Include _tests/framework/utils.sh
|
||||
|
||||
Describe "my-action validation"
|
||||
ACTION_DIR="my-action"
|
||||
ACTION_FILE="$ACTION_DIR/action.yml"
|
||||
ACTION_DIR="my-action"
|
||||
ACTION_FILE="$ACTION_DIR/action.yml"
|
||||
|
||||
BeforeAll "init_testing_framework"
|
||||
|
||||
Context "input validation"
|
||||
It "validates all inputs comprehensively"
|
||||
# Use validation helpers for comprehensive testing
|
||||
test_boolean_input "verbose"
|
||||
test_boolean_input "dry-run"
|
||||
|
||||
# Numeric range validations (use test_input_validation helper)
|
||||
test_input_validation "$ACTION_DIR" "max-retries" "1" "success"
|
||||
test_input_validation "$ACTION_DIR" "max-retries" "10" "success"
|
||||
test_input_validation "$ACTION_DIR" "timeout" "3600" "success"
|
||||
|
||||
# Enum validations (use test_input_validation helper)
|
||||
test_input_validation "$ACTION_DIR" "strategy" "fast" "success"
|
||||
test_input_validation "$ACTION_DIR" "format" "json" "success"
|
||||
|
||||
# Version validations (use test_input_validation helper)
|
||||
test_input_validation "$ACTION_DIR" "tool-version" "1.0.0" "success"
|
||||
|
||||
# Security and path validations (use test_input_validation helper)
|
||||
test_input_validation "$ACTION_DIR" "command" "echo test" "success"
|
||||
test_input_validation "$ACTION_DIR" "working-directory" "." "success"
|
||||
End
|
||||
Context "when validating required inputs"
|
||||
It "accepts valid input"
|
||||
When call validate_input_python "my-action" "input-name" "valid-value"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
Context "action structure"
|
||||
It "has valid structure and metadata"
|
||||
test_standard_action_structure "$ACTION_FILE" "Expected Action Name"
|
||||
End
|
||||
It "rejects invalid input"
|
||||
When call validate_input_python "my-action" "input-name" "invalid@value"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating boolean inputs"
|
||||
It "accepts true"
|
||||
When call validate_input_python "my-action" "dry-run" "true"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts false"
|
||||
When call validate_input_python "my-action" "dry-run" "false"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects invalid boolean"
|
||||
When call validate_input_python "my-action" "dry-run" "maybe"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
End
|
||||
```
|
||||
|
||||
### Integration Test Example
|
||||
@@ -149,66 +144,68 @@ jobs:
|
||||
required-input: 'test-value'
|
||||
```
|
||||
|
||||
## 🛠️ Testing Helpers
|
||||
## 🛠️ Testing Functions
|
||||
|
||||
### Available Validation Helpers
|
||||
### Primary Validation Function
|
||||
|
||||
The framework provides comprehensive validation helpers that handle common testing patterns:
|
||||
The framework provides one main validation function that uses the Python validation system:
|
||||
|
||||
#### Boolean Input Testing
|
||||
#### validate_input_python
|
||||
|
||||
Tests input validation using the centralized Python validator:
|
||||
|
||||
```bash
|
||||
test_boolean_input "verbose" # Tests: true, false, rejects invalid
|
||||
test_boolean_input "enable-cache"
|
||||
test_boolean_input "dry-run"
|
||||
validate_input_python "action-name" "input-name" "test-value"
|
||||
```
|
||||
|
||||
#### Numeric Range Testing
|
||||
**Examples:**
|
||||
|
||||
```bash
|
||||
# Note: test_numeric_range_input helper is not yet implemented.
|
||||
# Use test_input_validation with appropriate test values instead:
|
||||
test_input_validation "$ACTION_DIR" "max-retries" "1" "success" # min value
|
||||
test_input_validation "$ACTION_DIR" "max-retries" "10" "success" # max value
|
||||
test_input_validation "$ACTION_DIR" "max-retries" "0" "failure" # below min
|
||||
test_input_validation "$ACTION_DIR" "timeout" "3600" "success"
|
||||
test_input_validation "$ACTION_DIR" "parallel-jobs" "8" "success"
|
||||
# Boolean validation
|
||||
validate_input_python "pre-commit" "dry-run" "true" # success
|
||||
validate_input_python "pre-commit" "dry-run" "false" # success
|
||||
validate_input_python "pre-commit" "dry-run" "maybe" # failure
|
||||
|
||||
# Version validation
|
||||
validate_input_python "node-setup" "node-version" "18.0.0" # success
|
||||
validate_input_python "node-setup" "node-version" "v1.2.3" # success
|
||||
validate_input_python "node-setup" "node-version" "invalid" # failure
|
||||
|
||||
# Token validation
|
||||
validate_input_python "npm-publish" "npm-token" "ghp_123..." # success
|
||||
validate_input_python "npm-publish" "npm-token" "invalid" # failure
|
||||
|
||||
# Docker validation
|
||||
validate_input_python "docker-build" "image-name" "myapp" # success
|
||||
validate_input_python "docker-build" "tag" "v1.0.0" # success
|
||||
|
||||
# Path validation (security)
|
||||
validate_input_python "pre-commit" "config-file" "config.yml" # success
|
||||
validate_input_python "pre-commit" "config-file" "../etc/pass" # failure
|
||||
|
||||
# Injection detection
|
||||
validate_input_python "common-retry" "command" "echo test" # success
|
||||
validate_input_python "common-retry" "command" "rm -rf /; " # failure
|
||||
```
|
||||
|
||||
#### Version Testing
|
||||
### Helper Functions from spec_helper.sh
|
||||
|
||||
```bash
|
||||
# Note: test_version_input helper is not yet implemented.
|
||||
# Use test_input_validation with appropriate test values instead:
|
||||
test_input_validation "$ACTION_DIR" "version" "1.0.0" "success" # semver
|
||||
test_input_validation "$ACTION_DIR" "version" "v1.0.0" "success" # v-prefix
|
||||
test_input_validation "$ACTION_DIR" "version" "1.0.0-rc.1" "success" # pre-release
|
||||
test_input_validation "$ACTION_DIR" "tool-version" "2.3.4" "success"
|
||||
```
|
||||
# Setup/cleanup
|
||||
setup_default_inputs "action-name" "input-name" # Set required defaults
|
||||
cleanup_default_inputs "action-name" "input-name" # Clean up defaults
|
||||
shellspec_setup_test_env "test-name" # Setup test environment
|
||||
shellspec_cleanup_test_env "test-name" # Cleanup test environment
|
||||
|
||||
#### Enum Testing
|
||||
# Mock execution
|
||||
shellspec_mock_action_run "action-dir" key1 value1 key2 value2
|
||||
shellspec_validate_action_output "expected-key" "expected-value"
|
||||
|
||||
```bash
|
||||
# Note: test_enum_input helper is not yet implemented.
|
||||
# Use test_input_validation with appropriate test values instead:
|
||||
test_input_validation "$ACTION_DIR" "strategy" "linear" "success"
|
||||
test_input_validation "$ACTION_DIR" "strategy" "exponential" "success"
|
||||
test_input_validation "$ACTION_DIR" "strategy" "invalid" "failure"
|
||||
test_input_validation "$ACTION_DIR" "format" "json" "success"
|
||||
test_input_validation "$ACTION_DIR" "format" "yaml" "success"
|
||||
```
|
||||
|
||||
#### Docker-Specific Testing
|
||||
|
||||
```bash
|
||||
# Available framework helpers:
|
||||
test_input_validation "$action_dir" "$input_name" "$test_value" "$expected_result"
|
||||
test_action_outputs "$action_dir"
|
||||
test_external_usage "$action_dir"
|
||||
|
||||
# Note: Docker-specific helpers (test_docker_image_input, test_docker_tag_input,
|
||||
# test_docker_platforms_input) are referenced in examples but not yet implemented.
|
||||
# Use test_input_validation with appropriate test values instead.
|
||||
# Action metadata
|
||||
validate_action_yml "action.yml" # Validate YAML structure
|
||||
get_action_inputs "action.yml" # Get action inputs
|
||||
get_action_outputs "action.yml" # Get action outputs
|
||||
get_action_name "action.yml" # Get action name
|
||||
```
|
||||
|
||||
### Complete Action Validation Example
|
||||
@@ -218,41 +215,47 @@ Describe "comprehensive-action validation"
|
||||
ACTION_DIR="comprehensive-action"
|
||||
ACTION_FILE="$ACTION_DIR/action.yml"
|
||||
|
||||
Context "complete input validation"
|
||||
It "validates all input types systematically"
|
||||
# Boolean inputs
|
||||
test_boolean_input "verbose"
|
||||
test_boolean_input "enable-cache"
|
||||
test_boolean_input "dry-run"
|
||||
Context "when validating all input types"
|
||||
It "validates boolean inputs"
|
||||
When call validate_input_python "$ACTION_DIR" "verbose" "true"
|
||||
The status should be success
|
||||
|
||||
# Numeric ranges (use test_input_validation helper)
|
||||
test_input_validation "$ACTION_DIR" "max-retries" "1" "success"
|
||||
test_input_validation "$ACTION_DIR" "max-retries" "10" "success"
|
||||
test_input_validation "$ACTION_DIR" "timeout" "3600" "success"
|
||||
test_input_validation "$ACTION_DIR" "parallel-jobs" "8" "success"
|
||||
When call validate_input_python "$ACTION_DIR" "verbose" "false"
|
||||
The status should be success
|
||||
|
||||
# Enums (use test_input_validation helper)
|
||||
test_input_validation "$ACTION_DIR" "strategy" "fast" "success"
|
||||
test_input_validation "$ACTION_DIR" "format" "json" "success"
|
||||
When call validate_input_python "$ACTION_DIR" "verbose" "invalid"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
# Docker-specific (use test_input_validation helper)
|
||||
test_input_validation "$ACTION_DIR" "image-name" "myapp:latest" "success"
|
||||
test_input_validation "$ACTION_DIR" "tag" "1.0.0" "success"
|
||||
test_input_validation "$ACTION_DIR" "platforms" "linux/amd64,linux/arm64" "success"
|
||||
It "validates numeric inputs"
|
||||
When call validate_input_python "$ACTION_DIR" "max-retries" "3"
|
||||
The status should be success
|
||||
|
||||
# Security validation (use test_input_validation helper)
|
||||
test_input_validation "$ACTION_DIR" "command" "echo test" "success"
|
||||
test_input_validation "$ACTION_DIR" "build-args" "ARG1=value" "success"
|
||||
When call validate_input_python "$ACTION_DIR" "max-retries" "999"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
# Paths (use test_input_validation helper)
|
||||
test_input_validation "$ACTION_DIR" "working-directory" "." "success"
|
||||
test_input_validation "$ACTION_DIR" "output-directory" "./output" "success"
|
||||
It "validates version inputs"
|
||||
When call validate_input_python "$ACTION_DIR" "tool-version" "1.0.0"
|
||||
The status should be success
|
||||
|
||||
# Versions (use test_input_validation helper)
|
||||
test_input_validation "$ACTION_DIR" "tool-version" "1.0.0" "success"
|
||||
When call validate_input_python "$ACTION_DIR" "tool-version" "v1.2.3-rc.1"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
# Action structure
|
||||
test_standard_action_structure "$ACTION_FILE" "Comprehensive Action"
|
||||
It "validates security patterns"
|
||||
When call validate_input_python "$ACTION_DIR" "command" "echo test"
|
||||
The status should be success
|
||||
|
||||
When call validate_input_python "$ACTION_DIR" "command" "rm -rf /; "
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating action structure"
|
||||
It "has valid YAML structure"
|
||||
When call validate_action_yml "$ACTION_FILE"
|
||||
The status should be success
|
||||
End
|
||||
End
|
||||
End
|
||||
@@ -265,45 +268,37 @@ End
|
||||
Focus on version detection and environment setup:
|
||||
|
||||
```bash
|
||||
Context "version detection"
|
||||
Context "when detecting versions"
|
||||
It "detects version from config files"
|
||||
create_mock_node_repo # or appropriate repo type
|
||||
|
||||
# Test version detection logic
|
||||
export INPUT_LANGUAGE="node"
|
||||
echo "detected-version=18.0.0" >> "$GITHUB_OUTPUT"
|
||||
|
||||
When call validate_action_output "detected-version" "18.0.0"
|
||||
When call validate_input_python "node-setup" "node-version" "18.0.0"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "falls back to default when no version found"
|
||||
# Use test_input_validation helper for version validation
|
||||
test_input_validation "$ACTION_DIR" "default-version" "1.0.0" "success"
|
||||
It "accepts default version"
|
||||
When call validate_input_python "python-version-detect" "default-version" "3.11"
|
||||
The status should be success
|
||||
End
|
||||
End
|
||||
```
|
||||
|
||||
### Linting Actions (eslint-fix, prettier-fix, etc.)
|
||||
|
||||
Focus on file processing and fix capabilities:
|
||||
Focus on file processing and security:
|
||||
|
||||
```bash
|
||||
Context "file processing"
|
||||
BeforeEach "setup_test_env 'lint-test'"
|
||||
AfterEach "cleanup_test_env 'lint-test'"
|
||||
Context "when processing files"
|
||||
It "validates working directory"
|
||||
When call validate_input_python "eslint-fix" "working-directory" "."
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "validates inputs and processes files"
|
||||
test_boolean_input "fix-only"
|
||||
# Use test_input_validation helper for path and security validations
|
||||
test_input_validation "$ACTION_DIR" "working-directory" "." "success"
|
||||
test_input_validation "$ACTION_DIR" "custom-command" "echo test" "success"
|
||||
It "rejects path traversal"
|
||||
When call validate_input_python "eslint-fix" "working-directory" "../etc"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
# Mock file processing
|
||||
echo "files_changed=3" >> "$GITHUB_OUTPUT"
|
||||
echo "status=changes_made" >> "$GITHUB_OUTPUT"
|
||||
|
||||
When call validate_action_output "status" "changes_made"
|
||||
It "validates boolean flags"
|
||||
When call validate_input_python "eslint-fix" "fix-only" "true"
|
||||
The status should be success
|
||||
End
|
||||
End
|
||||
@@ -311,25 +306,22 @@ End
|
||||
|
||||
### Build Actions (docker-build, go-build, etc.)
|
||||
|
||||
Focus on build processes and artifact generation:
|
||||
Focus on build configuration:
|
||||
|
||||
```bash
|
||||
Context "build process"
|
||||
BeforeEach "setup_test_env 'build-test'"
|
||||
AfterEach "cleanup_test_env 'build-test'"
|
||||
Context "when building"
|
||||
It "validates image name"
|
||||
When call validate_input_python "docker-build" "image-name" "myapp"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "validates build inputs"
|
||||
# Use test_input_validation helper for Docker inputs
|
||||
test_input_validation "$ACTION_DIR" "image-name" "myapp:latest" "success"
|
||||
test_input_validation "$ACTION_DIR" "tag" "1.0.0" "success"
|
||||
test_input_validation "$ACTION_DIR" "platforms" "linux/amd64,linux/arm64" "success"
|
||||
test_input_validation "$ACTION_DIR" "parallel-builds" "8" "success"
|
||||
It "validates tag format"
|
||||
When call validate_input_python "docker-build" "tag" "v1.0.0"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
# Mock successful build
|
||||
echo "build-status=success" >> "$GITHUB_OUTPUT"
|
||||
echo "build-time=45" >> "$GITHUB_OUTPUT"
|
||||
|
||||
When call validate_action_output "build-status" "success"
|
||||
It "validates platforms"
|
||||
When call validate_input_python "docker-build" "platforms" "linux/amd64,linux/arm64"
|
||||
The status should be success
|
||||
End
|
||||
End
|
||||
@@ -337,25 +329,22 @@ End
|
||||
|
||||
### Publishing Actions (npm-publish, docker-publish, etc.)
|
||||
|
||||
Focus on registry interactions using mocks:
|
||||
Focus on credentials and registry validation:
|
||||
|
||||
```bash
|
||||
Context "publishing"
|
||||
BeforeEach "setup_mock_environment"
|
||||
AfterEach "cleanup_mock_environment"
|
||||
Context "when publishing"
|
||||
It "validates token format"
|
||||
When call validate_input_python "npm-publish" "npm-token" "ghp_123456789012345678901234567890123456"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "validates publishing inputs"
|
||||
# Use test_input_validation helper for version, security, and enum validations
|
||||
test_input_validation "$ACTION_DIR" "package-version" "1.0.0" "success"
|
||||
test_input_validation "$ACTION_DIR" "registry-token" "ghp_test123" "success"
|
||||
test_input_validation "$ACTION_DIR" "registry" "npm" "success"
|
||||
test_input_validation "$ACTION_DIR" "registry" "github" "success"
|
||||
It "rejects invalid token"
|
||||
When call validate_input_python "npm-publish" "npm-token" "invalid-token"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
# Mock successful publish
|
||||
echo "publish-status=success" >> "$GITHUB_OUTPUT"
|
||||
echo "registry-url=https://registry.npmjs.org/" >> "$GITHUB_OUTPUT"
|
||||
|
||||
When call validate_action_output "publish-status" "success"
|
||||
It "validates version"
|
||||
When call validate_input_python "npm-publish" "package-version" "1.0.0"
|
||||
The status should be success
|
||||
End
|
||||
End
|
||||
@@ -409,33 +398,33 @@ make test-action ACTION=name # Test specific action
|
||||
mkdir -p _tests/unit/new-action
|
||||
```
|
||||
|
||||
2. **Write Comprehensive Unit Tests**
|
||||
2. **Write Unit Tests**
|
||||
|
||||
```bash
|
||||
# Copy template and customize
|
||||
cp _tests/unit/version-file-parser/validation.spec.sh \
|
||||
_tests/unit/new-action/validation.spec.sh
|
||||
# _tests/unit/new-action/validation.spec.sh
|
||||
#!/usr/bin/env shellspec
|
||||
|
||||
Describe "new-action validation"
|
||||
ACTION_DIR="new-action"
|
||||
ACTION_FILE="$ACTION_DIR/action.yml"
|
||||
|
||||
Context "when validating inputs"
|
||||
It "validates required input"
|
||||
When call validate_input_python "new-action" "required-input" "value"
|
||||
The status should be success
|
||||
End
|
||||
End
|
||||
End
|
||||
```
|
||||
|
||||
3. **Use Validation Helpers**
|
||||
3. **Create Integration Test**
|
||||
|
||||
```bash
|
||||
# Focus on using helpers for comprehensive coverage
|
||||
test_boolean_input "verbose"
|
||||
# Use test_input_validation helper for numeric, security, and other validations
|
||||
test_input_validation "$ACTION_DIR" "timeout" "3600" "success"
|
||||
test_input_validation "$ACTION_DIR" "command" "echo test" "success"
|
||||
test_standard_action_structure "$ACTION_FILE" "New Action"
|
||||
# _tests/integration/workflows/new-action-test.yml
|
||||
# (See integration test example above)
|
||||
```
|
||||
|
||||
4. **Create Integration Test**
|
||||
|
||||
```bash
|
||||
cp _tests/integration/workflows/version-file-parser-test.yml \
|
||||
_tests/integration/workflows/new-action-test.yml
|
||||
```
|
||||
|
||||
5. **Test Your Tests**
|
||||
4. **Test Your Tests**
|
||||
|
||||
```bash
|
||||
make test-action ACTION=new-action
|
||||
@@ -443,7 +432,7 @@ make test-action ACTION=name # Test specific action
|
||||
|
||||
### Pull Request Checklist
|
||||
|
||||
- [ ] Tests use validation helpers for common patterns
|
||||
- [ ] Tests use `validate_input_python` for input validation
|
||||
- [ ] All test types pass locally (`make test`)
|
||||
- [ ] Integration test workflow created
|
||||
- [ ] Security testing included for user inputs
|
||||
@@ -453,24 +442,21 @@ make test-action ACTION=name # Test specific action
|
||||
|
||||
## 💡 Best Practices
|
||||
|
||||
### 1. Use Validation Helpers
|
||||
### 1. Use validate_input_python for All Input Testing
|
||||
|
||||
✅ **Good**:
|
||||
|
||||
```bash
|
||||
test_boolean_input "verbose"
|
||||
# Use test_input_validation helper for other validations
|
||||
test_input_validation "$ACTION_DIR" "timeout" "3600" "success"
|
||||
test_input_validation "$ACTION_DIR" "format" "json" "success"
|
||||
When call validate_input_python "my-action" "verbose" "true"
|
||||
The status should be success
|
||||
```
|
||||
|
||||
❌ **Avoid**:
|
||||
|
||||
```bash
|
||||
# Don't write manual tests for boolean inputs when test_boolean_input exists
|
||||
When call test_input_validation "$ACTION_DIR" "verbose" "true" "success"
|
||||
When call test_input_validation "$ACTION_DIR" "verbose" "false" "success"
|
||||
# Use test_boolean_input "verbose" instead
|
||||
# Don't manually test validation - use the Python validator
|
||||
export INPUT_VERBOSE="true"
|
||||
python3 validate-inputs/validator.py
|
||||
```
|
||||
|
||||
### 2. Group Related Validations
|
||||
@@ -478,26 +464,33 @@ When call test_input_validation "$ACTION_DIR" "verbose" "false" "success"
|
||||
✅ **Good**:
|
||||
|
||||
```bash
|
||||
Context "complete input validation"
|
||||
It "validates all input types"
|
||||
test_boolean_input "verbose"
|
||||
# Use test_input_validation helper for other validations
|
||||
test_input_validation "$ACTION_DIR" "timeout" "3600" "success"
|
||||
test_input_validation "$ACTION_DIR" "format" "json" "success"
|
||||
test_input_validation "$ACTION_DIR" "command" "echo test" "success"
|
||||
Context "when validating configuration"
|
||||
It "accepts valid boolean"
|
||||
When call validate_input_python "my-action" "dry-run" "true"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts valid version"
|
||||
When call validate_input_python "my-action" "tool-version" "1.0.0"
|
||||
The status should be success
|
||||
End
|
||||
End
|
||||
```
|
||||
|
||||
### 3. Include Security Testing
|
||||
### 3. Always Include Security Testing
|
||||
|
||||
✅ **Always include**:
|
||||
|
||||
```bash
|
||||
# Use test_input_validation helper for security and path validations
|
||||
test_input_validation "$ACTION_DIR" "command" "echo test" "success"
|
||||
test_input_validation "$ACTION_DIR" "user-script" "#!/bin/bash" "success"
|
||||
test_input_validation "$ACTION_DIR" "working-directory" "." "success"
|
||||
It "rejects command injection"
|
||||
When call validate_input_python "common-retry" "command" "rm -rf /; "
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects path traversal"
|
||||
When call validate_input_python "pre-commit" "config-file" "../etc/passwd"
|
||||
The status should be failure
|
||||
End
|
||||
```
|
||||
|
||||
### 4. Write Descriptive Test Names
|
||||
@@ -528,46 +521,34 @@ It "works correctly"
|
||||
|
||||
### Test Environment Setup
|
||||
|
||||
```bash
|
||||
# Setup test environment
|
||||
setup_test_env "test-name"
|
||||
|
||||
# Create mock repositories
|
||||
create_mock_repo "node" # Node.js project
|
||||
create_mock_repo "php" # PHP project
|
||||
create_mock_repo "python" # Python project
|
||||
create_mock_repo "go" # Go project
|
||||
create_mock_repo "dotnet" # .NET project
|
||||
|
||||
# Cleanup
|
||||
cleanup_test_env "test-name"
|
||||
```
|
||||
|
||||
### Mock Services
|
||||
|
||||
Built-in mocks for external services:
|
||||
|
||||
- **GitHub API** - Repository, releases, packages, workflows
|
||||
- **NPM Registry** - Package publishing and retrieval
|
||||
- **Docker Registry** - Image push/pull operations
|
||||
- **Container Registries** - GitHub Container Registry, Docker Hub
|
||||
|
||||
### Available Environment Variables
|
||||
The framework automatically sets up test environments via `spec_helper.sh`:
|
||||
|
||||
```bash
|
||||
# Test environment paths
|
||||
$TEST_WORKSPACE # Current test workspace
|
||||
$GITHUB_OUTPUT # Mock GitHub outputs file
|
||||
$GITHUB_ENV # Mock GitHub environment file
|
||||
$GITHUB_STEP_SUMMARY # Mock step summary file
|
||||
# Automatic setup on load
|
||||
- GitHub Actions environment variables
|
||||
- Temporary directories
|
||||
- Mock GITHUB_OUTPUT files
|
||||
- Default required inputs for actions
|
||||
|
||||
# Test framework paths
|
||||
$TEST_ROOT # _tests/ directory
|
||||
$FRAMEWORK_DIR # _tests/framework/ directory
|
||||
$FIXTURES_DIR # _tests/framework/fixtures/
|
||||
$MOCKS_DIR # _tests/framework/mocks/
|
||||
# Available variables
|
||||
$PROJECT_ROOT # Repository root
|
||||
$TEST_ROOT # _tests/ directory
|
||||
$FRAMEWORK_DIR # _tests/framework/
|
||||
$FIXTURES_DIR # _tests/framework/fixtures/
|
||||
$TEMP_DIR # Temporary test directory
|
||||
$GITHUB_OUTPUT # Mock outputs file
|
||||
$GITHUB_ENV # Mock environment file
|
||||
```
|
||||
|
||||
### Python Validation Integration
|
||||
|
||||
All input validation uses the centralized Python validation system from `validate-inputs/`:
|
||||
|
||||
- Convention-based automatic validation
|
||||
- 9 specialized validators (Boolean, Version, Token, Numeric, File, Network, Docker, Security, CodeQL)
|
||||
- Custom validator support per action
|
||||
- Injection and security pattern detection
|
||||
|
||||
## 🚨 Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
@@ -618,58 +599,67 @@ find _tests/ -name "*.sh" -exec chmod +x {} \;
|
||||
shellspec _tests/unit/my-action/validation.spec.sh
|
||||
```
|
||||
|
||||
3. **Check Test Output**
|
||||
3. **Enable Debug Mode**
|
||||
|
||||
```bash
|
||||
export SHELLSPEC_DEBUG=1
|
||||
shellspec _tests/unit/my-action/validation.spec.sh
|
||||
```
|
||||
|
||||
4. **Check Test Output**
|
||||
|
||||
```bash
|
||||
# Test results stored in _tests/reports/
|
||||
cat _tests/reports/unit/my-action.txt
|
||||
```
|
||||
|
||||
4. **Debug Mock Environment**
|
||||
|
||||
```bash
|
||||
# Enable mock debugging
|
||||
export MOCK_DEBUG=true
|
||||
```
|
||||
|
||||
## 📚 Resources
|
||||
|
||||
- [ShellSpec Documentation](https://shellspec.info/)
|
||||
- [nektos/act Documentation](https://nektosact.com/)
|
||||
- [GitHub Actions Documentation](https://docs.github.com/en/actions)
|
||||
- [Testing GitHub Actions Best Practices](https://docs.github.com/en/actions/creating-actions/creating-a-composite-action#testing-your-action)
|
||||
|
||||
---
|
||||
- [validate-inputs Documentation](../validate-inputs/docs/README_ARCHITECTURE.md)
|
||||
|
||||
## Framework Development
|
||||
|
||||
### Adding New Framework Features
|
||||
### Framework File Structure
|
||||
|
||||
1. **New Test Utilities**
|
||||
```text
|
||||
_tests/
|
||||
├── unit/
|
||||
│ └── spec_helper.sh # ShellSpec configuration and helpers
|
||||
├── framework/
|
||||
│ ├── setup.sh # Test environment initialization
|
||||
│ ├── utils.sh # Common utility functions
|
||||
│ ├── validation.py # Python validation helpers
|
||||
│ └── fixtures/ # Test fixtures
|
||||
└── integration/
|
||||
├── workflows/ # Integration test workflows
|
||||
├── external-usage/ # External reference tests
|
||||
└── action-chains/ # Multi-action tests
|
||||
```
|
||||
|
||||
```bash
|
||||
# Add to _tests/framework/utils.sh
|
||||
your_new_function() {
|
||||
local param="$1"
|
||||
# Implementation
|
||||
}
|
||||
### Available Functions
|
||||
|
||||
# Export for availability
|
||||
export -f your_new_function
|
||||
```
|
||||
**From spec_helper.sh (\_tests/unit/spec_helper.sh):**
|
||||
|
||||
2. **New Mock Services**
|
||||
- `validate_input_python(action, input_name, value)` - Main validation function
|
||||
- `setup_default_inputs(action, input_name)` - Set default required inputs
|
||||
- `cleanup_default_inputs(action, input_name)` - Clean up default inputs
|
||||
- `shellspec_setup_test_env(name)` - Setup test environment
|
||||
- `shellspec_cleanup_test_env(name)` - Cleanup test environment
|
||||
- `shellspec_mock_action_run(action_dir, ...)` - Mock action execution
|
||||
- `shellspec_validate_action_output(key, value)` - Validate outputs
|
||||
|
||||
```bash
|
||||
# Create _tests/framework/mocks/new-service.sh
|
||||
# Follow existing patterns in github-api.sh
|
||||
```
|
||||
**From utils.sh (\_tests/framework/utils.sh):**
|
||||
|
||||
3. **New Validation Helpers**
|
||||
- `validate_action_yml(file)` - Validate action YAML
|
||||
- `get_action_inputs(file)` - Extract action inputs
|
||||
- `get_action_outputs(file)` - Extract action outputs
|
||||
- `get_action_name(file)` - Get action name
|
||||
- `test_input_validation(dir, name, value, expected)` - Test input
|
||||
- `test_action_outputs(dir)` - Test action outputs
|
||||
- `test_external_usage(dir)` - Test external usage
|
||||
|
||||
```bash
|
||||
# Add to _tests/framework/validation_helpers.sh
|
||||
# Update this documentation
|
||||
```
|
||||
|
||||
**Last Updated:** August 17, 2025
|
||||
**Last Updated:** October 15, 2025
|
||||
|
||||
471
_tests/integration/workflows/common-cache-test.yml
Normal file
471
_tests/integration/workflows/common-cache-test.yml
Normal file
@@ -0,0 +1,471 @@
|
||||
---
|
||||
name: Integration Test - Common Cache
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
paths:
|
||||
- 'common-cache/**'
|
||||
- '_tests/integration/workflows/common-cache-test.yml'
|
||||
|
||||
jobs:
|
||||
test-common-cache-key-generation:
|
||||
name: Test Cache Key Generation
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test basic key generation
|
||||
run: |
|
||||
RUNNER_OS="Linux"
|
||||
CACHE_TYPE="npm"
|
||||
KEY_PREFIX=""
|
||||
|
||||
cache_key="$RUNNER_OS"
|
||||
[ -n "$CACHE_TYPE" ] && cache_key="${cache_key}-${CACHE_TYPE}"
|
||||
|
||||
expected="Linux-npm"
|
||||
if [[ "$cache_key" != "$expected" ]]; then
|
||||
echo "❌ ERROR: Expected '$expected', got '$cache_key'"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Basic cache key generation works"
|
||||
|
||||
- name: Test key with prefix
|
||||
run: |
|
||||
RUNNER_OS="Linux"
|
||||
CACHE_TYPE="npm"
|
||||
KEY_PREFIX="node-20"
|
||||
|
||||
cache_key="$RUNNER_OS"
|
||||
[ -n "$KEY_PREFIX" ] && cache_key="${cache_key}-${KEY_PREFIX}"
|
||||
[ -n "$CACHE_TYPE" ] && cache_key="${cache_key}-${CACHE_TYPE}"
|
||||
|
||||
expected="Linux-node-20-npm"
|
||||
if [[ "$cache_key" != "$expected" ]]; then
|
||||
echo "❌ ERROR: Expected '$expected', got '$cache_key'"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Cache key with prefix works"
|
||||
|
||||
- name: Test OS-specific keys
|
||||
run: |
|
||||
for os in "Linux" "macOS" "Windows"; do
|
||||
CACHE_TYPE="test"
|
||||
cache_key="$os-$CACHE_TYPE"
|
||||
if [[ ! "$cache_key" =~ ^(Linux|macOS|Windows)-test$ ]]; then
|
||||
echo "❌ ERROR: Invalid key for OS $os: $cache_key"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ OS-specific key for $os: $cache_key"
|
||||
done
|
||||
|
||||
test-common-cache-file-hashing:
|
||||
name: Test File Hashing
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Create test files
|
||||
run: |
|
||||
mkdir -p test-cache
|
||||
cd test-cache
|
||||
echo "content1" > file1.txt
|
||||
echo "content2" > file2.txt
|
||||
echo "content3" > file3.txt
|
||||
|
||||
- name: Test single file hash
|
||||
run: |
|
||||
cd test-cache
|
||||
file_hash=$(cat file1.txt | sha256sum | cut -d' ' -f1)
|
||||
|
||||
if [[ ! "$file_hash" =~ ^[a-f0-9]{64}$ ]]; then
|
||||
echo "❌ ERROR: Invalid hash format: $file_hash"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Single file hash: $file_hash"
|
||||
|
||||
- name: Test multiple file hash
|
||||
run: |
|
||||
cd test-cache
|
||||
multi_hash=$(cat file1.txt file2.txt file3.txt | sha256sum | cut -d' ' -f1)
|
||||
|
||||
if [[ ! "$multi_hash" =~ ^[a-f0-9]{64}$ ]]; then
|
||||
echo "❌ ERROR: Invalid hash format: $multi_hash"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Multiple file hash: $multi_hash"
|
||||
|
||||
- name: Test hash changes with content
|
||||
run: |
|
||||
cd test-cache
|
||||
|
||||
# Get initial hash
|
||||
hash1=$(cat file1.txt | sha256sum | cut -d' ' -f1)
|
||||
|
||||
# Modify file
|
||||
echo "modified" > file1.txt
|
||||
|
||||
# Get new hash
|
||||
hash2=$(cat file1.txt | sha256sum | cut -d' ' -f1)
|
||||
|
||||
if [[ "$hash1" == "$hash2" ]]; then
|
||||
echo "❌ ERROR: Hash should change when content changes"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Hash changes with content modification"
|
||||
|
||||
- name: Test comma-separated file list processing
|
||||
run: |
|
||||
cd test-cache
|
||||
|
||||
KEY_FILES="file1.txt,file2.txt,file3.txt"
|
||||
IFS=',' read -ra FILES <<< "$KEY_FILES"
|
||||
|
||||
existing_files=()
|
||||
for file in "${FILES[@]}"; do
|
||||
file=$(echo "$file" | xargs)
|
||||
if [ -f "$file" ]; then
|
||||
existing_files+=("$file")
|
||||
fi
|
||||
done
|
||||
|
||||
if [ ${#existing_files[@]} -ne 3 ]; then
|
||||
echo "❌ ERROR: Should find 3 files, found ${#existing_files[@]}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✓ Comma-separated file list processing works"
|
||||
|
||||
- name: Test missing file handling
|
||||
run: |
|
||||
cd test-cache
|
||||
|
||||
KEY_FILES="file1.txt,missing.txt,file2.txt"
|
||||
IFS=',' read -ra FILES <<< "$KEY_FILES"
|
||||
|
||||
existing_files=()
|
||||
for file in "${FILES[@]}"; do
|
||||
file=$(echo "$file" | xargs)
|
||||
if [ -f "$file" ]; then
|
||||
existing_files+=("$file")
|
||||
fi
|
||||
done
|
||||
|
||||
if [ ${#existing_files[@]} -ne 2 ]; then
|
||||
echo "❌ ERROR: Should find 2 files, found ${#existing_files[@]}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✓ Missing files correctly skipped"
|
||||
|
||||
test-common-cache-env-vars:
|
||||
name: Test Environment Variables
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test single env var inclusion
|
||||
run: |
|
||||
export NODE_VERSION="20.9.0"
|
||||
ENV_VARS="NODE_VERSION"
|
||||
|
||||
IFS=',' read -ra VARS <<< "$ENV_VARS"
|
||||
env_hash=""
|
||||
for var in "${VARS[@]}"; do
|
||||
if [ -n "${!var}" ]; then
|
||||
env_hash="${env_hash}-${var}-${!var}"
|
||||
fi
|
||||
done
|
||||
|
||||
expected="-NODE_VERSION-20.9.0"
|
||||
if [[ "$env_hash" != "$expected" ]]; then
|
||||
echo "❌ ERROR: Expected '$expected', got '$env_hash'"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Single env var inclusion works"
|
||||
|
||||
- name: Test multiple env vars
|
||||
run: |
|
||||
export NODE_VERSION="20.9.0"
|
||||
export PACKAGE_MANAGER="npm"
|
||||
ENV_VARS="NODE_VERSION,PACKAGE_MANAGER"
|
||||
|
||||
IFS=',' read -ra VARS <<< "$ENV_VARS"
|
||||
env_hash=""
|
||||
for var in "${VARS[@]}"; do
|
||||
if [ -n "${!var}" ]; then
|
||||
env_hash="${env_hash}-${var}-${!var}"
|
||||
fi
|
||||
done
|
||||
|
||||
expected="-NODE_VERSION-20.9.0-PACKAGE_MANAGER-npm"
|
||||
if [[ "$env_hash" != "$expected" ]]; then
|
||||
echo "❌ ERROR: Expected '$expected', got '$env_hash'"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Multiple env vars inclusion works"
|
||||
|
||||
- name: Test undefined env var skipping
|
||||
run: |
|
||||
export NODE_VERSION="20.9.0"
|
||||
ENV_VARS="NODE_VERSION,UNDEFINED_VAR"
|
||||
|
||||
IFS=',' read -ra VARS <<< "$ENV_VARS"
|
||||
env_hash=""
|
||||
for var in "${VARS[@]}"; do
|
||||
if [ -n "${!var}" ]; then
|
||||
env_hash="${env_hash}-${var}-${!var}"
|
||||
fi
|
||||
done
|
||||
|
||||
# Should only include NODE_VERSION
|
||||
expected="-NODE_VERSION-20.9.0"
|
||||
if [[ "$env_hash" != "$expected" ]]; then
|
||||
echo "❌ ERROR: Expected '$expected', got '$env_hash'"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Undefined env vars correctly skipped"
|
||||
|
||||
test-common-cache-path-processing:
|
||||
name: Test Path Processing
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test single path
|
||||
run: |
|
||||
CACHE_PATHS="~/.npm"
|
||||
IFS=',' read -ra PATHS <<< "$CACHE_PATHS"
|
||||
|
||||
if [ ${#PATHS[@]} -ne 1 ]; then
|
||||
echo "❌ ERROR: Should have 1 path, got ${#PATHS[@]}"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Single path processing works"
|
||||
|
||||
- name: Test multiple paths
|
||||
run: |
|
||||
CACHE_PATHS="~/.npm,~/.yarn/cache,node_modules"
|
||||
IFS=',' read -ra PATHS <<< "$CACHE_PATHS"
|
||||
|
||||
if [ ${#PATHS[@]} -ne 3 ]; then
|
||||
echo "❌ ERROR: Should have 3 paths, got ${#PATHS[@]}"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Multiple paths processing works"
|
||||
|
||||
- name: Test path with spaces (trimming)
|
||||
run: |
|
||||
CACHE_PATHS=" ~/.npm , ~/.yarn/cache , node_modules "
|
||||
IFS=',' read -ra PATHS <<< "$CACHE_PATHS"
|
||||
|
||||
trimmed_paths=()
|
||||
for path in "${PATHS[@]}"; do
|
||||
trimmed=$(echo "$path" | xargs)
|
||||
trimmed_paths+=("$trimmed")
|
||||
done
|
||||
|
||||
# Check first path is trimmed
|
||||
if [[ "${trimmed_paths[0]}" != "~/.npm" ]]; then
|
||||
echo "❌ ERROR: Path not trimmed: '${trimmed_paths[0]}'"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Path trimming works"
|
||||
|
||||
test-common-cache-complete-key-generation:
|
||||
name: Test Complete Key Generation
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Create test files
|
||||
run: |
|
||||
mkdir -p test-complete
|
||||
cd test-complete
|
||||
echo "package-lock content" > package-lock.json
|
||||
|
||||
- name: Test complete cache key with all components
|
||||
run: |
|
||||
cd test-complete
|
||||
|
||||
RUNNER_OS="Linux"
|
||||
CACHE_TYPE="npm"
|
||||
KEY_PREFIX="node-20"
|
||||
|
||||
# Generate file hash
|
||||
files_hash=$(cat package-lock.json | sha256sum | cut -d' ' -f1)
|
||||
|
||||
# Generate env hash
|
||||
export NODE_VERSION="20.9.0"
|
||||
env_hash="-NODE_VERSION-20.9.0"
|
||||
|
||||
# Generate final key
|
||||
cache_key="$RUNNER_OS"
|
||||
[ -n "$KEY_PREFIX" ] && cache_key="${cache_key}-${KEY_PREFIX}"
|
||||
[ -n "$CACHE_TYPE" ] && cache_key="${cache_key}-${CACHE_TYPE}"
|
||||
[ -n "$files_hash" ] && cache_key="${cache_key}-${files_hash}"
|
||||
[ -n "$env_hash" ] && cache_key="${cache_key}${env_hash}"
|
||||
|
||||
echo "Generated cache key: $cache_key"
|
||||
|
||||
# Verify structure
|
||||
if [[ ! "$cache_key" =~ ^Linux-node-20-npm-[a-f0-9]{64}-NODE_VERSION-20\.9\.0$ ]]; then
|
||||
echo "❌ ERROR: Invalid cache key structure: $cache_key"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Complete cache key generation works"
|
||||
|
||||
test-common-cache-restore-keys:
|
||||
name: Test Restore Keys
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test single restore key
|
||||
run: |
|
||||
RESTORE_KEYS="Linux-npm-"
|
||||
|
||||
if [[ -z "$RESTORE_KEYS" ]]; then
|
||||
echo "❌ ERROR: Restore keys should not be empty"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Single restore key: $RESTORE_KEYS"
|
||||
|
||||
- name: Test multiple restore keys
|
||||
run: |
|
||||
RESTORE_KEYS="Linux-node-20-npm-,Linux-node-npm-,Linux-npm-"
|
||||
|
||||
IFS=',' read -ra KEYS <<< "$RESTORE_KEYS"
|
||||
if [ ${#KEYS[@]} -ne 3 ]; then
|
||||
echo "❌ ERROR: Should have 3 restore keys, got ${#KEYS[@]}"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Multiple restore keys work"
|
||||
|
||||
test-common-cache-type-specific-scenarios:
|
||||
name: Test Type-Specific Scenarios
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test NPM cache key
|
||||
run: |
|
||||
TYPE="npm"
|
||||
FILES="package-lock.json"
|
||||
PATHS="~/.npm,node_modules"
|
||||
|
||||
echo "✓ NPM cache configuration valid"
|
||||
echo " Type: $TYPE"
|
||||
echo " Key files: $FILES"
|
||||
echo " Paths: $PATHS"
|
||||
|
||||
- name: Test Composer cache key
|
||||
run: |
|
||||
TYPE="composer"
|
||||
FILES="composer.lock"
|
||||
PATHS="~/.composer/cache,vendor"
|
||||
|
||||
echo "✓ Composer cache configuration valid"
|
||||
echo " Type: $TYPE"
|
||||
echo " Key files: $FILES"
|
||||
echo " Paths: $PATHS"
|
||||
|
||||
- name: Test Go cache key
|
||||
run: |
|
||||
TYPE="go"
|
||||
FILES="go.sum"
|
||||
PATHS="~/go/pkg/mod,~/.cache/go-build"
|
||||
|
||||
echo "✓ Go cache configuration valid"
|
||||
echo " Type: $TYPE"
|
||||
echo " Key files: $FILES"
|
||||
echo " Paths: $PATHS"
|
||||
|
||||
- name: Test Pip cache key
|
||||
run: |
|
||||
TYPE="pip"
|
||||
FILES="requirements.txt"
|
||||
PATHS="~/.cache/pip"
|
||||
|
||||
echo "✓ Pip cache configuration valid"
|
||||
echo " Type: $TYPE"
|
||||
echo " Key files: $FILES"
|
||||
echo " Paths: $PATHS"
|
||||
|
||||
test-common-cache-edge-cases:
|
||||
name: Test Edge Cases
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test empty prefix
|
||||
run: |
|
||||
KEY_PREFIX=""
|
||||
cache_key="Linux"
|
||||
[ -n "$KEY_PREFIX" ] && cache_key="${cache_key}-${KEY_PREFIX}"
|
||||
|
||||
if [[ "$cache_key" != "Linux" ]]; then
|
||||
echo "❌ ERROR: Empty prefix should not modify key"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Empty prefix handling works"
|
||||
|
||||
- name: Test no key files
|
||||
run: |
|
||||
KEY_FILES=""
|
||||
files_hash=""
|
||||
|
||||
if [ -n "$KEY_FILES" ]; then
|
||||
echo "❌ ERROR: Should detect empty key files"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ No key files handling works"
|
||||
|
||||
- name: Test no env vars
|
||||
run: |
|
||||
ENV_VARS=""
|
||||
env_hash=""
|
||||
|
||||
if [ -n "$ENV_VARS" ]; then
|
||||
echo "❌ ERROR: Should detect empty env vars"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ No env vars handling works"
|
||||
|
||||
integration-test-summary:
|
||||
name: Integration Test Summary
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- test-common-cache-key-generation
|
||||
- test-common-cache-file-hashing
|
||||
- test-common-cache-env-vars
|
||||
- test-common-cache-path-processing
|
||||
- test-common-cache-complete-key-generation
|
||||
- test-common-cache-restore-keys
|
||||
- test-common-cache-type-specific-scenarios
|
||||
- test-common-cache-edge-cases
|
||||
steps:
|
||||
- name: Summary
|
||||
run: |
|
||||
echo "=========================================="
|
||||
echo "Common Cache Integration Tests - PASSED"
|
||||
echo "=========================================="
|
||||
echo ""
|
||||
echo "✓ Cache key generation tests"
|
||||
echo "✓ File hashing tests"
|
||||
echo "✓ Environment variable tests"
|
||||
echo "✓ Path processing tests"
|
||||
echo "✓ Complete key generation tests"
|
||||
echo "✓ Restore keys tests"
|
||||
echo "✓ Type-specific scenario tests"
|
||||
echo "✓ Edge case tests"
|
||||
echo ""
|
||||
echo "All common-cache integration tests completed successfully!"
|
||||
186
_tests/integration/workflows/docker-build-publish-test.yml
Normal file
186
_tests/integration/workflows/docker-build-publish-test.yml
Normal file
@@ -0,0 +1,186 @@
|
||||
---
|
||||
name: Test Docker Build & Publish Integration
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
paths:
|
||||
- 'docker-build/**'
|
||||
- 'docker-publish/**'
|
||||
- 'docker-publish-gh/**'
|
||||
- 'docker-publish-hub/**'
|
||||
- '_tests/integration/workflows/docker-build-publish-test.yml'
|
||||
|
||||
jobs:
|
||||
test-docker-build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Create test Dockerfile
|
||||
run: |
|
||||
cat > Dockerfile <<EOF
|
||||
FROM alpine:3.19
|
||||
RUN apk add --no-cache bash
|
||||
COPY test.sh /test.sh
|
||||
RUN chmod +x /test.sh
|
||||
CMD ["/test.sh"]
|
||||
EOF
|
||||
|
||||
cat > test.sh <<EOF
|
||||
#!/bin/bash
|
||||
echo "Test container is running"
|
||||
EOF
|
||||
|
||||
- name: Test docker-build action
|
||||
id: build
|
||||
uses: ./docker-build
|
||||
with:
|
||||
image-name: 'test-image'
|
||||
tag: 'test-tag'
|
||||
dockerfile: './Dockerfile'
|
||||
context: '.'
|
||||
platforms: 'linux/amd64'
|
||||
push: 'false'
|
||||
scan-image: 'false'
|
||||
|
||||
- name: Validate build outputs
|
||||
run: |
|
||||
echo "Build outputs:"
|
||||
echo " Image Digest: ${{ steps.build.outputs.image-digest }}"
|
||||
echo " Build Time: ${{ steps.build.outputs.build-time }}"
|
||||
echo " Platforms: ${{ steps.build.outputs.platforms }}"
|
||||
|
||||
# Validate that we got a digest
|
||||
if [[ -z "${{ steps.build.outputs.image-digest }}" ]]; then
|
||||
echo "❌ ERROR: No image digest output"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Validate digest format (sha256:...)
|
||||
if ! echo "${{ steps.build.outputs.image-digest }}" | grep -E '^sha256:[a-f0-9]{64}'; then
|
||||
echo "❌ ERROR: Invalid digest format: ${{ steps.build.outputs.image-digest }}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✅ Docker build validation passed"
|
||||
|
||||
test-docker-inputs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Create test Dockerfile
|
||||
run: |
|
||||
cat > Dockerfile <<EOF
|
||||
FROM alpine:3.19
|
||||
CMD ["echo", "test"]
|
||||
EOF
|
||||
|
||||
- name: Test with build-args
|
||||
id: build-with-args
|
||||
uses: ./docker-build
|
||||
with:
|
||||
image-name: 'test-build-args'
|
||||
tag: 'latest'
|
||||
dockerfile: './Dockerfile'
|
||||
context: '.'
|
||||
build-args: |
|
||||
ARG1=value1
|
||||
ARG2=value2
|
||||
platforms: 'linux/amd64'
|
||||
push: 'false'
|
||||
scan-image: 'false'
|
||||
|
||||
- name: Validate build-args handling
|
||||
run: |
|
||||
if [[ -z "${{ steps.build-with-args.outputs.image-digest }}" ]]; then
|
||||
echo "❌ ERROR: Build with build-args failed"
|
||||
exit 1
|
||||
fi
|
||||
echo "✅ Build-args handling validated"
|
||||
|
||||
test-platform-detection:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Create test Dockerfile
|
||||
run: |
|
||||
cat > Dockerfile <<EOF
|
||||
FROM alpine:3.19
|
||||
CMD ["echo", "multi-platform test"]
|
||||
EOF
|
||||
|
||||
- name: Test multi-platform build
|
||||
id: multi-platform
|
||||
uses: ./docker-build
|
||||
with:
|
||||
image-name: 'test-multiplatform'
|
||||
tag: 'latest'
|
||||
dockerfile: './Dockerfile'
|
||||
context: '.'
|
||||
platforms: 'linux/amd64,linux/arm64'
|
||||
push: 'false'
|
||||
scan-image: 'false'
|
||||
|
||||
- name: Validate platform matrix output
|
||||
run: |
|
||||
echo "Platform Matrix: ${{ steps.multi-platform.outputs.platform-matrix }}"
|
||||
|
||||
# Check that we got platform results
|
||||
if [[ -z "${{ steps.multi-platform.outputs.platform-matrix }}" ]]; then
|
||||
echo "⚠️ WARNING: No platform matrix output (may be expected for local builds)"
|
||||
else
|
||||
echo "✅ Platform matrix generated"
|
||||
fi
|
||||
|
||||
echo "✅ Multi-platform build validated"
|
||||
|
||||
test-input-validation:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test invalid tag format
|
||||
id: invalid-tag
|
||||
uses: ./docker-build
|
||||
with:
|
||||
image-name: 'test-image'
|
||||
tag: 'INVALID TAG WITH SPACES'
|
||||
dockerfile: './Dockerfile'
|
||||
context: '.'
|
||||
platforms: 'linux/amd64'
|
||||
push: 'false'
|
||||
continue-on-error: true
|
||||
|
||||
- name: Validate tag validation
|
||||
run: |
|
||||
if [ "${{ steps.invalid-tag.outcome }}" != "failure" ]; then
|
||||
echo "❌ ERROR: Invalid tag should have failed validation"
|
||||
exit 1
|
||||
fi
|
||||
echo "✅ Tag validation works correctly"
|
||||
|
||||
- name: Test invalid image name
|
||||
id: invalid-image
|
||||
uses: ./docker-build
|
||||
with:
|
||||
image-name: 'UPPERCASE_NOT_ALLOWED'
|
||||
tag: 'latest'
|
||||
dockerfile: './Dockerfile'
|
||||
context: '.'
|
||||
platforms: 'linux/amd64'
|
||||
push: 'false'
|
||||
continue-on-error: true
|
||||
|
||||
- name: Validate image name validation
|
||||
run: |
|
||||
if [ "${{ steps.invalid-image.outcome }}" != "failure" ]; then
|
||||
echo "❌ ERROR: Invalid image name should have failed validation"
|
||||
exit 1
|
||||
fi
|
||||
echo "✅ Image name validation works correctly"
|
||||
440
_tests/integration/workflows/github-release-test.yml
Normal file
440
_tests/integration/workflows/github-release-test.yml
Normal file
@@ -0,0 +1,440 @@
|
||||
---
|
||||
name: Integration Test - GitHub Release
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
paths:
|
||||
- 'github-release/**'
|
||||
- '_tests/integration/workflows/github-release-test.yml'
|
||||
|
||||
jobs:
|
||||
test-github-release-validation:
|
||||
name: Test Input Validation
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test invalid version format
|
||||
run: |
|
||||
VERSION='not.a.version'
|
||||
if [[ "$VERSION" =~ ^v?[0-9]+\.[0-9]+\.[0-9]+(-[a-zA-Z0-9.-]+)?(\+[a-zA-Z0-9.-]+)?$ ]]; then
|
||||
echo "❌ ERROR: Invalid version format should have failed"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Invalid version format correctly rejected"
|
||||
|
||||
- name: Test version with alphabetic characters
|
||||
run: |
|
||||
VERSION='abc.def.ghi'
|
||||
if [[ "$VERSION" =~ ^v?[0-9]+\.[0-9]+\.[0-9]+(-[a-zA-Z0-9.-]+)?(\+[a-zA-Z0-9.-]+)?$ ]]; then
|
||||
echo "❌ ERROR: Alphabetic version should have failed"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Alphabetic version correctly rejected"
|
||||
|
||||
- name: Test valid version formats
|
||||
run: |
|
||||
for version in "1.2.3" "v1.2.3" "1.0.0-alpha" "2.0.0+build"; do
|
||||
if ! [[ "$version" =~ ^v?[0-9]+\.[0-9]+\.[0-9]+(-[a-zA-Z0-9.-]+)?(\+[a-zA-Z0-9.-]+)?$ ]]; then
|
||||
echo "❌ ERROR: Valid version '$version' should have passed"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Valid version '$version' accepted"
|
||||
done
|
||||
|
||||
test-github-release-version-formats:
|
||||
name: Test Version Format Support
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test basic SemVer (dry run)
|
||||
run: |
|
||||
echo "Testing basic SemVer format: 1.2.3"
|
||||
VERSION="1.2.3"
|
||||
if ! [[ "$VERSION" =~ ^v?[0-9]+\.[0-9]+\.[0-9]+(-[a-zA-Z0-9.-]+)?(\+[a-zA-Z0-9.-]+)?$ ]]; then
|
||||
echo "❌ ERROR: Valid version rejected"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Basic SemVer format accepted"
|
||||
|
||||
- name: Test SemVer with v prefix
|
||||
run: |
|
||||
echo "Testing SemVer with v prefix: v1.2.3"
|
||||
VERSION="v1.2.3"
|
||||
if ! [[ "$VERSION" =~ ^v?[0-9]+\.[0-9]+\.[0-9]+(-[a-zA-Z0-9.-]+)?(\+[a-zA-Z0-9.-]+)?$ ]]; then
|
||||
echo "❌ ERROR: Valid version rejected"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ SemVer with v prefix accepted"
|
||||
|
||||
- name: Test prerelease version
|
||||
run: |
|
||||
echo "Testing prerelease version: 1.0.0-alpha.1"
|
||||
VERSION="1.0.0-alpha.1"
|
||||
if ! [[ "$VERSION" =~ ^v?[0-9]+\.[0-9]+\.[0-9]+(-[a-zA-Z0-9.-]+)?(\+[a-zA-Z0-9.-]+)?$ ]]; then
|
||||
echo "❌ ERROR: Valid prerelease version rejected"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Prerelease version accepted"
|
||||
|
||||
- name: Test version with build metadata
|
||||
run: |
|
||||
echo "Testing version with build metadata: 1.0.0+build.123"
|
||||
VERSION="1.0.0+build.123"
|
||||
if ! [[ "$VERSION" =~ ^v?[0-9]+\.[0-9]+\.[0-9]+(-[a-zA-Z0-9.-]+)?(\+[a-zA-Z0-9.-]+)?$ ]]; then
|
||||
echo "❌ ERROR: Valid build metadata version rejected"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Version with build metadata accepted"
|
||||
|
||||
- name: Test complex version
|
||||
run: |
|
||||
echo "Testing complex version: v2.1.0-rc.1+build.456"
|
||||
VERSION="v2.1.0-rc.1+build.456"
|
||||
if ! [[ "$VERSION" =~ ^v?[0-9]+\.[0-9]+\.[0-9]+(-[a-zA-Z0-9.-]+)?(\+[a-zA-Z0-9.-]+)?$ ]]; then
|
||||
echo "❌ ERROR: Valid complex version rejected"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Complex version accepted"
|
||||
|
||||
test-github-release-tool-availability:
|
||||
name: Test Tool Availability Checks
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test gh CLI detection logic
|
||||
run: |
|
||||
# Test the logic for checking gh availability
|
||||
# In actual action, this would fail if gh is not available
|
||||
if command -v gh >/dev/null 2>&1; then
|
||||
echo "✓ gh CLI is available in this environment"
|
||||
gh --version
|
||||
else
|
||||
echo "⚠️ gh CLI not available in test environment (would fail in actual action)"
|
||||
echo "✓ Tool detection logic works correctly"
|
||||
fi
|
||||
|
||||
- name: Test jq detection logic
|
||||
run: |
|
||||
# Test the logic for checking jq availability
|
||||
# In actual action, this would fail if jq is not available
|
||||
if command -v jq >/dev/null 2>&1; then
|
||||
echo "✓ jq is available in this environment"
|
||||
jq --version
|
||||
else
|
||||
echo "⚠️ jq not available in test environment (would fail in actual action)"
|
||||
echo "✓ Tool detection logic works correctly"
|
||||
fi
|
||||
|
||||
- name: Test tool requirement validation
|
||||
run: |
|
||||
# Verify the action correctly checks for required tools
|
||||
REQUIRED_TOOLS=("gh" "jq")
|
||||
echo "Testing tool requirement checks..."
|
||||
|
||||
for tool in "${REQUIRED_TOOLS[@]}"; do
|
||||
if command -v "$tool" >/dev/null 2>&1; then
|
||||
echo " ✓ $tool: available"
|
||||
else
|
||||
echo " ⚠️ $tool: not available (action would fail at this check)"
|
||||
fi
|
||||
done
|
||||
|
||||
echo "✓ Tool requirement validation logic verified"
|
||||
|
||||
- name: Test gh authentication logic
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
# Test authentication detection logic
|
||||
if [[ -n "$GITHUB_TOKEN" ]]; then
|
||||
echo "✓ GITHUB_TOKEN environment variable is set"
|
||||
else
|
||||
echo "⚠️ GITHUB_TOKEN not set in test environment"
|
||||
fi
|
||||
|
||||
# Test gh auth status check (without requiring it to pass)
|
||||
if command -v gh >/dev/null 2>&1; then
|
||||
if gh auth status >/dev/null 2>&1; then
|
||||
echo "✓ gh authentication successful"
|
||||
else
|
||||
echo "⚠️ gh auth check failed (expected without proper setup)"
|
||||
fi
|
||||
else
|
||||
echo "⚠️ gh not available, skipping auth check"
|
||||
fi
|
||||
|
||||
echo "✓ Authentication detection logic verified"
|
||||
|
||||
test-github-release-changelog-validation:
|
||||
name: Test Changelog Validation
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test empty changelog (should trigger autogenerated notes)
|
||||
run: |
|
||||
echo "Testing empty changelog handling..."
|
||||
CHANGELOG=""
|
||||
if [[ -n "$CHANGELOG" ]]; then
|
||||
echo "❌ ERROR: Empty string should be empty"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Empty changelog correctly detected"
|
||||
|
||||
- name: Test normal changelog
|
||||
run: |
|
||||
echo "Testing normal changelog..."
|
||||
CHANGELOG="## Features
|
||||
- Added new feature
|
||||
- Improved performance"
|
||||
|
||||
if [[ -z "$CHANGELOG" ]]; then
|
||||
echo "❌ ERROR: Changelog should not be empty"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ${#CHANGELOG} -gt 10000 ]]; then
|
||||
echo "⚠️ Changelog is very long"
|
||||
fi
|
||||
|
||||
echo "✓ Normal changelog processed correctly"
|
||||
|
||||
- name: Test very long changelog warning
|
||||
run: |
|
||||
echo "Testing very long changelog..."
|
||||
# Create a changelog with >10000 characters
|
||||
CHANGELOG=$(printf 'A%.0s' {1..10001})
|
||||
|
||||
if [[ ${#CHANGELOG} -le 10000 ]]; then
|
||||
echo "❌ ERROR: Test changelog should be >10000 chars"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✓ Long changelog warning would trigger (${#CHANGELOG} characters)"
|
||||
|
||||
test-github-release-changelog-types:
|
||||
name: Test Changelog Content Types
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test multiline changelog
|
||||
run: |
|
||||
echo "Testing multiline changelog..."
|
||||
CHANGELOG="## Version 1.2.3
|
||||
|
||||
### Features
|
||||
- Feature A
|
||||
- Feature B
|
||||
|
||||
### Bug Fixes
|
||||
- Fix #123
|
||||
- Fix #456"
|
||||
|
||||
echo "✓ Multiline changelog supported"
|
||||
|
||||
- name: Test changelog with special characters
|
||||
run: |
|
||||
echo "Testing changelog with special characters..."
|
||||
CHANGELOG='## Release Notes
|
||||
|
||||
Special chars: $, `, \, ", '\'', !, @, #, %, &, *, (, )'
|
||||
|
||||
echo "✓ Special characters in changelog supported"
|
||||
|
||||
- name: Test changelog with markdown
|
||||
run: |
|
||||
echo "Testing changelog with markdown..."
|
||||
CHANGELOG="## Changes
|
||||
|
||||
**Bold** and *italic* text
|
||||
|
||||
- [x] Task completed
|
||||
- [ ] Task pending
|
||||
|
||||
\`\`\`bash
|
||||
echo 'code block'
|
||||
\`\`\`
|
||||
|
||||
| Table | Header |
|
||||
|-------|--------|
|
||||
| Cell | Data |"
|
||||
|
||||
echo "✓ Markdown in changelog supported"
|
||||
|
||||
test-github-release-output-format:
|
||||
name: Test Output Format
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Verify output structure (mock test)
|
||||
run: |
|
||||
echo "Testing output structure..."
|
||||
|
||||
# Check if jq is available for this test
|
||||
if ! command -v jq >/dev/null 2>&1; then
|
||||
echo "⚠️ jq not available, skipping JSON parsing test"
|
||||
echo "✓ Output format validation logic verified (jq would be required in actual action)"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Mock release info JSON (similar to gh release view output)
|
||||
RELEASE_INFO='{
|
||||
"url": "https://github.com/owner/repo/releases/tag/v1.0.0",
|
||||
"id": "RE_12345",
|
||||
"uploadUrl": "https://uploads.github.com/repos/owner/repo/releases/12345/assets{?name,label}"
|
||||
}'
|
||||
|
||||
# Extract outputs
|
||||
release_url=$(echo "$RELEASE_INFO" | jq -r '.url')
|
||||
release_id=$(echo "$RELEASE_INFO" | jq -r '.id')
|
||||
upload_url=$(echo "$RELEASE_INFO" | jq -r '.uploadUrl')
|
||||
|
||||
echo "Release URL: $release_url"
|
||||
echo "Release ID: $release_id"
|
||||
echo "Upload URL: $upload_url"
|
||||
|
||||
# Verify output format
|
||||
if [[ ! "$release_url" =~ ^https://github\.com/.*/releases/tag/.* ]]; then
|
||||
echo "❌ ERROR: Invalid release URL format"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ! "$release_id" =~ ^RE_.* ]]; then
|
||||
echo "❌ ERROR: Invalid release ID format"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ! "$upload_url" =~ ^https://uploads\.github\.com/.* ]]; then
|
||||
echo "❌ ERROR: Invalid upload URL format"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✓ Output format validation passed"
|
||||
|
||||
test-github-release-integration-scenarios:
|
||||
name: Test Integration Scenarios
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test release workflow without actual creation
|
||||
run: |
|
||||
echo "Simulating release workflow..."
|
||||
|
||||
# Validate version
|
||||
VERSION="v1.2.3-test"
|
||||
if ! [[ "$VERSION" =~ ^v?[0-9]+\.[0-9]+\.[0-9]+(-[a-zA-Z0-9.-]+)?(\+[a-zA-Z0-9.-]+)?$ ]]; then
|
||||
echo "❌ ERROR: Version validation failed"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Version validation passed"
|
||||
|
||||
# Check tool availability (non-fatal for test environment)
|
||||
gh_available=false
|
||||
jq_available=false
|
||||
|
||||
if command -v gh >/dev/null 2>&1; then
|
||||
echo "✓ gh CLI is available"
|
||||
gh_available=true
|
||||
else
|
||||
echo "⚠️ gh not available (would fail in actual action)"
|
||||
fi
|
||||
|
||||
if command -v jq >/dev/null 2>&1; then
|
||||
echo "✓ jq is available"
|
||||
jq_available=true
|
||||
else
|
||||
echo "⚠️ jq not available (would fail in actual action)"
|
||||
fi
|
||||
|
||||
# Create mock changelog
|
||||
CHANGELOG="Test release notes"
|
||||
NOTES_FILE="$(mktemp)"
|
||||
printf '%s' "$CHANGELOG" > "$NOTES_FILE"
|
||||
|
||||
# Verify changelog file
|
||||
if [[ ! -f "$NOTES_FILE" ]]; then
|
||||
echo "❌ ERROR: Changelog file not created"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
CONTENT=$(cat "$NOTES_FILE")
|
||||
if [[ "$CONTENT" != "$CHANGELOG" ]]; then
|
||||
echo "❌ ERROR: Changelog content mismatch"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
rm -f "$NOTES_FILE"
|
||||
|
||||
echo "✓ Release workflow simulation passed"
|
||||
|
||||
- name: Test autogenerated changelog scenario
|
||||
run: |
|
||||
echo "Testing autogenerated changelog scenario..."
|
||||
|
||||
VERSION="v2.0.0"
|
||||
CHANGELOG=""
|
||||
|
||||
if [[ -z "$CHANGELOG" ]]; then
|
||||
echo "✓ Would use --generate-notes flag"
|
||||
else
|
||||
echo "✓ Would use custom changelog"
|
||||
fi
|
||||
|
||||
- name: Test custom changelog scenario
|
||||
run: |
|
||||
echo "Testing custom changelog scenario..."
|
||||
|
||||
VERSION="v2.1.0"
|
||||
CHANGELOG="## Custom Release Notes
|
||||
|
||||
This release includes:
|
||||
- Feature X
|
||||
- Bug fix Y"
|
||||
|
||||
if [[ -n "$CHANGELOG" ]]; then
|
||||
echo "✓ Would use --notes-file with custom changelog"
|
||||
else
|
||||
echo "✓ Would use --generate-notes"
|
||||
fi
|
||||
|
||||
integration-test-summary:
|
||||
name: Integration Test Summary
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- test-github-release-validation
|
||||
- test-github-release-version-formats
|
||||
- test-github-release-tool-availability
|
||||
- test-github-release-changelog-validation
|
||||
- test-github-release-changelog-types
|
||||
- test-github-release-output-format
|
||||
- test-github-release-integration-scenarios
|
||||
steps:
|
||||
- name: Summary
|
||||
run: |
|
||||
echo "=========================================="
|
||||
echo "GitHub Release Integration Tests - PASSED"
|
||||
echo "=========================================="
|
||||
echo ""
|
||||
echo "✓ Input validation tests"
|
||||
echo "✓ Version format tests"
|
||||
echo "✓ Tool availability tests"
|
||||
echo "✓ Changelog validation tests"
|
||||
echo "✓ Changelog content tests"
|
||||
echo "✓ Output format tests"
|
||||
echo "✓ Integration scenario tests"
|
||||
echo ""
|
||||
echo "All github-release integration tests completed successfully!"
|
||||
316
_tests/integration/workflows/lint-fix-chain-test.yml
Normal file
316
_tests/integration/workflows/lint-fix-chain-test.yml
Normal file
@@ -0,0 +1,316 @@
|
||||
---
|
||||
name: Test Lint & Fix Action Chains
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
paths:
|
||||
- 'eslint-check/**'
|
||||
- 'eslint-fix/**'
|
||||
- 'prettier-check/**'
|
||||
- 'prettier-fix/**'
|
||||
- 'node-setup/**'
|
||||
- 'common-cache/**'
|
||||
- '_tests/integration/workflows/lint-fix-chain-test.yml'
|
||||
|
||||
jobs:
|
||||
test-eslint-chain:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Create test JavaScript files
|
||||
run: |
|
||||
mkdir -p test-project/src
|
||||
|
||||
# Create package.json
|
||||
cat > test-project/package.json <<EOF
|
||||
{
|
||||
"name": "test-project",
|
||||
"version": "1.0.0",
|
||||
"devDependencies": {
|
||||
"eslint": "^8.0.0"
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
# Create .eslintrc.json
|
||||
cat > test-project/.eslintrc.json <<EOF
|
||||
{
|
||||
"env": {
|
||||
"node": true,
|
||||
"es2021": true
|
||||
},
|
||||
"extends": "eslint:recommended",
|
||||
"parserOptions": {
|
||||
"ecmaVersion": 12
|
||||
},
|
||||
"rules": {
|
||||
"semi": ["error", "always"],
|
||||
"quotes": ["error", "single"]
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
# Create test file with linting issues
|
||||
cat > test-project/src/test.js <<EOF
|
||||
const x = "double quotes"
|
||||
console.log(x)
|
||||
EOF
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: ./node-setup
|
||||
with:
|
||||
node-version: '18'
|
||||
working-directory: './test-project'
|
||||
|
||||
- name: Test eslint-check (should find errors)
|
||||
id: eslint-check
|
||||
uses: ./eslint-check
|
||||
with:
|
||||
working-directory: './test-project'
|
||||
continue-on-error: true
|
||||
|
||||
- name: Validate eslint-check found issues
|
||||
run: |
|
||||
echo "ESLint check outcome: ${{ steps.eslint-check.outcome }}"
|
||||
echo "Error count: ${{ steps.eslint-check.outputs.error-count }}"
|
||||
echo "Warning count: ${{ steps.eslint-check.outputs.warning-count }}"
|
||||
|
||||
# Check should fail or find issues
|
||||
if [[ "${{ steps.eslint-check.outcome }}" == "success" ]]; then
|
||||
if [[ "${{ steps.eslint-check.outputs.error-count }}" == "0" ]]; then
|
||||
echo "⚠️ WARNING: Expected to find linting errors but found none"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "✅ ESLint check validated"
|
||||
|
||||
- name: Test eslint-fix (should fix issues)
|
||||
id: eslint-fix
|
||||
uses: ./eslint-fix
|
||||
with:
|
||||
working-directory: './test-project'
|
||||
token: ${{ github.token }}
|
||||
email: 'test@example.com'
|
||||
username: 'test-user'
|
||||
|
||||
- name: Validate eslint-fix ran
|
||||
run: |
|
||||
echo "Fixed count: ${{ steps.eslint-fix.outputs.fixed-count }}"
|
||||
echo "Files fixed: ${{ steps.eslint-fix.outputs.files-fixed }}"
|
||||
|
||||
# Check that fixes were attempted
|
||||
if [[ -n "${{ steps.eslint-fix.outputs.fixed-count }}" ]]; then
|
||||
echo "✅ ESLint fixed ${{ steps.eslint-fix.outputs.fixed-count }} issues"
|
||||
else
|
||||
echo "⚠️ No fix count reported (may be expected if no fixable issues)"
|
||||
fi
|
||||
|
||||
echo "✅ ESLint fix validated"
|
||||
|
||||
test-prettier-chain:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Create test files for Prettier
|
||||
run: |
|
||||
mkdir -p test-prettier
|
||||
|
||||
# Create package.json
|
||||
cat > test-prettier/package.json <<EOF
|
||||
{
|
||||
"name": "test-prettier",
|
||||
"version": "1.0.0",
|
||||
"devDependencies": {
|
||||
"prettier": "^3.0.0"
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
# Create .prettierrc
|
||||
cat > test-prettier/.prettierrc <<EOF
|
||||
{
|
||||
"semi": true,
|
||||
"singleQuote": true,
|
||||
"printWidth": 80
|
||||
}
|
||||
EOF
|
||||
|
||||
# Create badly formatted file
|
||||
cat > test-prettier/test.js <<EOF
|
||||
const x={"key":"value","another":"data"}
|
||||
console.log(x)
|
||||
EOF
|
||||
|
||||
# Create badly formatted JSON
|
||||
cat > test-prettier/test.json <<EOF
|
||||
{"key":"value","nested":{"data":"here"}}
|
||||
EOF
|
||||
|
||||
- name: Setup Node.js for Prettier
|
||||
uses: ./node-setup
|
||||
with:
|
||||
node-version: '18'
|
||||
working-directory: './test-prettier'
|
||||
|
||||
- name: Test prettier-check (should find issues)
|
||||
id: prettier-check
|
||||
uses: ./prettier-check
|
||||
with:
|
||||
working-directory: './test-prettier'
|
||||
continue-on-error: true
|
||||
|
||||
- name: Validate prettier-check found issues
|
||||
run: |
|
||||
echo "Prettier check outcome: ${{ steps.prettier-check.outcome }}"
|
||||
|
||||
# Check should find formatting issues
|
||||
if [[ "${{ steps.prettier-check.outcome }}" == "failure" ]]; then
|
||||
echo "✅ Prettier correctly found formatting issues"
|
||||
else
|
||||
echo "⚠️ WARNING: Expected Prettier to find formatting issues"
|
||||
fi
|
||||
|
||||
- name: Test prettier-fix (should fix issues)
|
||||
id: prettier-fix
|
||||
uses: ./prettier-fix
|
||||
with:
|
||||
working-directory: './test-prettier'
|
||||
token: ${{ github.token }}
|
||||
email: 'test@example.com'
|
||||
username: 'test-user'
|
||||
|
||||
- name: Validate prettier-fix ran
|
||||
run: |
|
||||
echo "Prettier fix completed"
|
||||
|
||||
# Check that files exist and have been processed
|
||||
if [[ -f "test-prettier/test.js" ]]; then
|
||||
echo "✅ Test file exists after Prettier fix"
|
||||
else
|
||||
echo "❌ ERROR: Test file missing after Prettier fix"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✅ Prettier fix validated"
|
||||
|
||||
test-action-chain-integration:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Create comprehensive test project
|
||||
run: |
|
||||
mkdir -p test-chain/src
|
||||
|
||||
# Create package.json with both ESLint and Prettier
|
||||
cat > test-chain/package.json <<EOF
|
||||
{
|
||||
"name": "test-chain",
|
||||
"version": "1.0.0",
|
||||
"devDependencies": {
|
||||
"eslint": "^8.0.0",
|
||||
"prettier": "^3.0.0"
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
# Create .eslintrc.json
|
||||
cat > test-chain/.eslintrc.json <<EOF
|
||||
{
|
||||
"env": {
|
||||
"node": true,
|
||||
"es2021": true
|
||||
},
|
||||
"extends": "eslint:recommended",
|
||||
"parserOptions": {
|
||||
"ecmaVersion": 12
|
||||
},
|
||||
"rules": {
|
||||
"semi": ["error", "always"],
|
||||
"quotes": ["error", "single"]
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
# Create .prettierrc
|
||||
cat > test-chain/.prettierrc <<EOF
|
||||
{
|
||||
"semi": true,
|
||||
"singleQuote": true,
|
||||
"printWidth": 80
|
||||
}
|
||||
EOF
|
||||
|
||||
# Create test file with both linting and formatting issues
|
||||
cat > test-chain/src/app.js <<EOF
|
||||
const message="hello world"
|
||||
function greet(){console.log(message)}
|
||||
greet()
|
||||
EOF
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: ./node-setup
|
||||
with:
|
||||
node-version: '18'
|
||||
working-directory: './test-chain'
|
||||
|
||||
- name: Run ESLint check
|
||||
id: lint-check
|
||||
uses: ./eslint-check
|
||||
with:
|
||||
working-directory: './test-chain'
|
||||
continue-on-error: true
|
||||
|
||||
- name: Run Prettier check
|
||||
id: format-check
|
||||
uses: ./prettier-check
|
||||
with:
|
||||
working-directory: './test-chain'
|
||||
continue-on-error: true
|
||||
|
||||
- name: Run ESLint fix
|
||||
id: lint-fix
|
||||
uses: ./eslint-fix
|
||||
with:
|
||||
working-directory: './test-chain'
|
||||
token: ${{ github.token }}
|
||||
email: 'test@example.com'
|
||||
username: 'test-user'
|
||||
|
||||
- name: Run Prettier fix
|
||||
id: format-fix
|
||||
uses: ./prettier-fix
|
||||
with:
|
||||
working-directory: './test-chain'
|
||||
token: ${{ github.token }}
|
||||
email: 'test@example.com'
|
||||
username: 'test-user'
|
||||
|
||||
- name: Validate complete chain
|
||||
run: |
|
||||
echo "=== Action Chain Results ==="
|
||||
echo "Lint Check: ${{ steps.lint-check.outcome }}"
|
||||
echo "Format Check: ${{ steps.format-check.outcome }}"
|
||||
echo "Lint Fix: ${{ steps.lint-fix.outcome }}"
|
||||
echo "Format Fix: ${{ steps.format-fix.outcome }}"
|
||||
|
||||
# Validate that all steps ran
|
||||
steps_run=0
|
||||
[[ "${{ steps.lint-check.outcome }}" != "skipped" ]] && ((steps_run++))
|
||||
[[ "${{ steps.format-check.outcome }}" != "skipped" ]] && ((steps_run++))
|
||||
[[ "${{ steps.lint-fix.outcome }}" != "skipped" ]] && ((steps_run++))
|
||||
[[ "${{ steps.format-fix.outcome }}" != "skipped" ]] && ((steps_run++))
|
||||
|
||||
if [[ $steps_run -eq 4 ]]; then
|
||||
echo "✅ Complete action chain executed successfully"
|
||||
else
|
||||
echo "❌ ERROR: Not all steps in chain executed (ran: $steps_run/4)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✅ Action chain integration validated"
|
||||
513
_tests/integration/workflows/node-setup-test.yml
Normal file
513
_tests/integration/workflows/node-setup-test.yml
Normal file
@@ -0,0 +1,513 @@
|
||||
---
|
||||
name: Integration Test - Node Setup
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
paths:
|
||||
- 'node-setup/**'
|
||||
- 'version-file-parser/**'
|
||||
- 'common-cache/**'
|
||||
- 'common-retry/**'
|
||||
- '_tests/integration/workflows/node-setup-test.yml'
|
||||
|
||||
jobs:
|
||||
test-node-setup-version-validation:
|
||||
name: Test Version Validation
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test invalid default version format (alphabetic)
|
||||
run: |
|
||||
VERSION="abc"
|
||||
if [[ "$VERSION" =~ ^[0-9]+(\.[0-9]+(\.[0-9]+)?)?$ ]]; then
|
||||
echo "❌ ERROR: Should reject alphabetic version"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Alphabetic version correctly rejected"
|
||||
|
||||
- name: Test invalid default version (too low)
|
||||
run: |
|
||||
VERSION="10"
|
||||
major=$(echo "$VERSION" | cut -d'.' -f1)
|
||||
if [ "$major" -lt 14 ] || [ "$major" -gt 30 ]; then
|
||||
echo "✓ Version $VERSION correctly rejected (major < 14)"
|
||||
else
|
||||
echo "❌ ERROR: Should reject Node.js $VERSION"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Test invalid default version (too high)
|
||||
run: |
|
||||
VERSION="50"
|
||||
major=$(echo "$VERSION" | cut -d'.' -f1)
|
||||
if [ "$major" -lt 14 ] || [ "$major" -gt 30 ]; then
|
||||
echo "✓ Version $VERSION correctly rejected (major > 30)"
|
||||
else
|
||||
echo "❌ ERROR: Should reject Node.js $VERSION"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Test valid version formats
|
||||
run: |
|
||||
for version in "20" "20.9" "20.9.0" "18" "22.1.0"; do
|
||||
if [[ "$version" =~ ^[0-9]+(\.[0-9]+(\.[0-9]+)?)?$ ]]; then
|
||||
major=$(echo "$version" | cut -d'.' -f1)
|
||||
if [ "$major" -ge 14 ] && [ "$major" -le 30 ]; then
|
||||
echo "✓ Version $version accepted"
|
||||
else
|
||||
echo "❌ ERROR: Version $version should be accepted"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo "❌ ERROR: Version $version format validation failed"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
test-node-setup-package-manager-validation:
|
||||
name: Test Package Manager Validation
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test valid package managers
|
||||
run: |
|
||||
for pm in "npm" "yarn" "pnpm" "bun" "auto"; do
|
||||
case "$pm" in
|
||||
"npm"|"yarn"|"pnpm"|"bun"|"auto")
|
||||
echo "✓ Package manager $pm accepted"
|
||||
;;
|
||||
*)
|
||||
echo "❌ ERROR: Valid package manager $pm rejected"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
- name: Test invalid package manager
|
||||
run: |
|
||||
PM="invalid-pm"
|
||||
case "$PM" in
|
||||
"npm"|"yarn"|"pnpm"|"bun"|"auto")
|
||||
echo "❌ ERROR: Invalid package manager should be rejected"
|
||||
exit 1
|
||||
;;
|
||||
*)
|
||||
echo "✓ Invalid package manager correctly rejected"
|
||||
;;
|
||||
esac
|
||||
|
||||
test-node-setup-url-validation:
|
||||
name: Test URL Validation
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test valid registry URLs
|
||||
run: |
|
||||
for url in "https://registry.npmjs.org" "http://localhost:4873" "https://npm.custom.com/"; do
|
||||
if [[ "$url" == "https://"* ]] || [[ "$url" == "http://"* ]]; then
|
||||
echo "✓ Registry URL $url accepted"
|
||||
else
|
||||
echo "❌ ERROR: Valid URL $url rejected"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
- name: Test invalid registry URLs
|
||||
run: |
|
||||
for url in "ftp://registry.com" "not-a-url" "registry.com"; do
|
||||
if [[ "$url" == "https://"* ]] || [[ "$url" == "http://"* ]]; then
|
||||
echo "❌ ERROR: Invalid URL $url should be rejected"
|
||||
exit 1
|
||||
else
|
||||
echo "✓ Invalid URL $url correctly rejected"
|
||||
fi
|
||||
done
|
||||
|
||||
test-node-setup-retries-validation:
|
||||
name: Test Retries Validation
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test valid retry counts
|
||||
run: |
|
||||
for retries in "1" "3" "5" "10"; do
|
||||
if [[ "$retries" =~ ^[0-9]+$ ]] && [ "$retries" -gt 0 ] && [ "$retries" -le 10 ]; then
|
||||
echo "✓ Max retries $retries accepted"
|
||||
else
|
||||
echo "❌ ERROR: Valid retry count $retries rejected"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
- name: Test invalid retry counts
|
||||
run: |
|
||||
for retries in "0" "11" "abc" "-1"; do
|
||||
if [[ "$retries" =~ ^[0-9]+$ ]] && [ "$retries" -gt 0 ] && [ "$retries" -le 10 ]; then
|
||||
echo "❌ ERROR: Invalid retry count $retries should be rejected"
|
||||
exit 1
|
||||
else
|
||||
echo "✓ Invalid retry count $retries correctly rejected"
|
||||
fi
|
||||
done
|
||||
|
||||
test-node-setup-boolean-validation:
|
||||
name: Test Boolean Validation
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test valid boolean values
|
||||
run: |
|
||||
for value in "true" "false"; do
|
||||
if [[ "$value" == "true" ]] || [[ "$value" == "false" ]]; then
|
||||
echo "✓ Boolean value $value accepted"
|
||||
else
|
||||
echo "❌ ERROR: Valid boolean $value rejected"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
- name: Test invalid boolean values
|
||||
run: |
|
||||
for value in "yes" "no" "1" "0" "True" "FALSE" ""; do
|
||||
if [[ "$value" != "true" ]] && [[ "$value" != "false" ]]; then
|
||||
echo "✓ Invalid boolean value '$value' correctly rejected"
|
||||
else
|
||||
echo "❌ ERROR: Invalid boolean $value should be rejected"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
test-node-setup-token-validation:
|
||||
name: Test Auth Token Validation
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test injection pattern detection
|
||||
run: |
|
||||
for token in "token;malicious" "token&&command" "token|pipe"; do
|
||||
if [[ "$token" == *";"* ]] || [[ "$token" == *"&&"* ]] || [[ "$token" == *"|"* ]]; then
|
||||
echo "✓ Injection pattern in token correctly detected"
|
||||
else
|
||||
echo "❌ ERROR: Should detect injection pattern in: $token"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
- name: Test valid tokens
|
||||
run: |
|
||||
for token in "npm_AbCdEf1234567890" "github_pat_12345abcdef" "simple-token"; do
|
||||
if [[ "$token" == *";"* ]] || [[ "$token" == *"&&"* ]] || [[ "$token" == *"|"* ]]; then
|
||||
echo "❌ ERROR: Valid token should not be rejected: $token"
|
||||
exit 1
|
||||
else
|
||||
echo "✓ Valid token accepted"
|
||||
fi
|
||||
done
|
||||
|
||||
test-node-setup-package-manager-resolution:
|
||||
name: Test Package Manager Resolution
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test auto detection with detected PM
|
||||
run: |
|
||||
INPUT_PM="auto"
|
||||
DETECTED_PM="pnpm"
|
||||
|
||||
if [ "$INPUT_PM" = "auto" ]; then
|
||||
if [ -n "$DETECTED_PM" ]; then
|
||||
FINAL_PM="$DETECTED_PM"
|
||||
else
|
||||
FINAL_PM="npm"
|
||||
fi
|
||||
else
|
||||
FINAL_PM="$INPUT_PM"
|
||||
fi
|
||||
|
||||
if [[ "$FINAL_PM" != "pnpm" ]]; then
|
||||
echo "❌ ERROR: Should use detected PM (pnpm)"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Auto-detected package manager correctly resolved"
|
||||
|
||||
- name: Test auto detection without detected PM
|
||||
run: |
|
||||
INPUT_PM="auto"
|
||||
DETECTED_PM=""
|
||||
|
||||
if [ "$INPUT_PM" = "auto" ]; then
|
||||
if [ -n "$DETECTED_PM" ]; then
|
||||
FINAL_PM="$DETECTED_PM"
|
||||
else
|
||||
FINAL_PM="npm"
|
||||
fi
|
||||
else
|
||||
FINAL_PM="$INPUT_PM"
|
||||
fi
|
||||
|
||||
if [[ "$FINAL_PM" != "npm" ]]; then
|
||||
echo "❌ ERROR: Should default to npm"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Defaults to npm when no detection"
|
||||
|
||||
- name: Test explicit package manager
|
||||
run: |
|
||||
INPUT_PM="yarn"
|
||||
DETECTED_PM="pnpm"
|
||||
|
||||
if [ "$INPUT_PM" = "auto" ]; then
|
||||
if [ -n "$DETECTED_PM" ]; then
|
||||
FINAL_PM="$DETECTED_PM"
|
||||
else
|
||||
FINAL_PM="npm"
|
||||
fi
|
||||
else
|
||||
FINAL_PM="$INPUT_PM"
|
||||
fi
|
||||
|
||||
if [[ "$FINAL_PM" != "yarn" ]]; then
|
||||
echo "❌ ERROR: Should use explicit PM (yarn)"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Explicit package manager correctly used"
|
||||
|
||||
test-node-setup-feature-detection:
|
||||
name: Test Feature Detection
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Create test package.json with ESM
|
||||
run: |
|
||||
mkdir -p test-esm
|
||||
cd test-esm
|
||||
cat > package.json <<'EOF'
|
||||
{
|
||||
"name": "test-esm",
|
||||
"version": "1.0.0",
|
||||
"type": "module"
|
||||
}
|
||||
EOF
|
||||
|
||||
- name: Test ESM detection
|
||||
run: |
|
||||
cd test-esm
|
||||
if command -v jq >/dev/null 2>&1; then
|
||||
pkg_type=$(jq -r '.type // "commonjs"' package.json 2>/dev/null)
|
||||
if [[ "$pkg_type" == "module" ]]; then
|
||||
echo "✓ ESM support correctly detected"
|
||||
else
|
||||
echo "❌ ERROR: Should detect ESM support"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo "⚠️ jq not available, skipping ESM detection test"
|
||||
echo "✓ ESM detection logic verified (jq would be required in actual action)"
|
||||
fi
|
||||
|
||||
- name: Create test with TypeScript
|
||||
run: |
|
||||
mkdir -p test-ts
|
||||
cd test-ts
|
||||
touch tsconfig.json
|
||||
cat > package.json <<'EOF'
|
||||
{
|
||||
"name": "test-ts",
|
||||
"devDependencies": {
|
||||
"typescript": "^5.0.0"
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
- name: Test TypeScript detection
|
||||
run: |
|
||||
cd test-ts
|
||||
typescript_support="false"
|
||||
if [ -f tsconfig.json ]; then
|
||||
typescript_support="true"
|
||||
fi
|
||||
if [[ "$typescript_support" != "true" ]]; then
|
||||
echo "❌ ERROR: Should detect TypeScript"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ TypeScript support correctly detected"
|
||||
|
||||
- name: Create test with frameworks
|
||||
run: |
|
||||
mkdir -p test-frameworks
|
||||
cd test-frameworks
|
||||
cat > package.json <<'EOF'
|
||||
{
|
||||
"name": "test-frameworks",
|
||||
"dependencies": {
|
||||
"react": "^18.0.0",
|
||||
"next": "^14.0.0"
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
- name: Test framework detection
|
||||
run: |
|
||||
cd test-frameworks
|
||||
if command -v jq >/dev/null 2>&1; then
|
||||
has_next=$(jq -e '.dependencies.next or .devDependencies.next' package.json >/dev/null 2>&1 && echo "yes" || echo "no")
|
||||
has_react=$(jq -e '.dependencies.react or .devDependencies.react' package.json >/dev/null 2>&1 && echo "yes" || echo "no")
|
||||
|
||||
if [[ "$has_next" == "yes" ]] && [[ "$has_react" == "yes" ]]; then
|
||||
echo "✓ Frameworks (Next.js, React) correctly detected"
|
||||
else
|
||||
echo "❌ ERROR: Should detect Next.js and React"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo "⚠️ jq not available, skipping framework detection test"
|
||||
echo "✓ Framework detection logic verified (jq would be required in actual action)"
|
||||
fi
|
||||
|
||||
test-node-setup-security:
|
||||
name: Test Security Measures
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test token sanitization
|
||||
run: |
|
||||
TOKEN="test-token
|
||||
with-newline"
|
||||
|
||||
# Should remove newlines
|
||||
sanitized=$(echo "$TOKEN" | tr -d '\n\r')
|
||||
|
||||
if [[ "$sanitized" == *$'\n'* ]] || [[ "$sanitized" == *$'\r'* ]]; then
|
||||
echo "❌ ERROR: Newlines not removed"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Token sanitization works correctly"
|
||||
|
||||
- name: Test package manager sanitization
|
||||
run: |
|
||||
PM="npm
|
||||
with-newline"
|
||||
|
||||
# Should remove newlines
|
||||
sanitized=$(echo "$PM" | tr -d '\n\r')
|
||||
|
||||
if [[ "$sanitized" == *$'\n'* ]] || [[ "$sanitized" == *$'\r'* ]]; then
|
||||
echo "❌ ERROR: Newlines not removed from PM"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Package manager sanitization works correctly"
|
||||
|
||||
test-node-setup-integration-workflow:
|
||||
name: Test Integration Workflow
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Simulate complete workflow
|
||||
run: |
|
||||
echo "=== Simulating Node Setup Workflow ==="
|
||||
|
||||
# 1. Validation
|
||||
echo "Step 1: Validate inputs"
|
||||
DEFAULT_VERSION="20"
|
||||
PACKAGE_MANAGER="npm"
|
||||
REGISTRY_URL="https://registry.npmjs.org"
|
||||
CACHE="true"
|
||||
INSTALL="true"
|
||||
MAX_RETRIES="3"
|
||||
echo "✓ Inputs validated"
|
||||
|
||||
# 2. Version parsing
|
||||
echo "Step 2: Parse Node.js version"
|
||||
NODE_VERSION="20.9.0"
|
||||
echo "✓ Version parsed: $NODE_VERSION"
|
||||
|
||||
# 3. Package manager resolution
|
||||
echo "Step 3: Resolve package manager"
|
||||
if [ "$PACKAGE_MANAGER" = "auto" ]; then
|
||||
FINAL_PM="npm"
|
||||
else
|
||||
FINAL_PM="$PACKAGE_MANAGER"
|
||||
fi
|
||||
echo "✓ Package manager resolved: $FINAL_PM"
|
||||
|
||||
# 4. Setup Node.js
|
||||
echo "Step 4: Setup Node.js $NODE_VERSION"
|
||||
if command -v node >/dev/null 2>&1; then
|
||||
echo "✓ Node.js available: $(node --version)"
|
||||
fi
|
||||
|
||||
# 5. Enable Corepack
|
||||
echo "Step 5: Enable Corepack"
|
||||
if command -v corepack >/dev/null 2>&1; then
|
||||
echo "✓ Corepack available"
|
||||
else
|
||||
echo "⚠️ Corepack not available in test environment"
|
||||
fi
|
||||
|
||||
# 6. Cache dependencies
|
||||
if [[ "$CACHE" == "true" ]]; then
|
||||
echo "Step 6: Cache dependencies"
|
||||
echo "✓ Would use common-cache action"
|
||||
fi
|
||||
|
||||
# 7. Install dependencies
|
||||
if [[ "$INSTALL" == "true" ]]; then
|
||||
echo "Step 7: Install dependencies"
|
||||
echo "✓ Would run: $FINAL_PM install"
|
||||
fi
|
||||
|
||||
echo "=== Workflow simulation completed ==="
|
||||
|
||||
integration-test-summary:
|
||||
name: Integration Test Summary
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- test-node-setup-version-validation
|
||||
- test-node-setup-package-manager-validation
|
||||
- test-node-setup-url-validation
|
||||
- test-node-setup-retries-validation
|
||||
- test-node-setup-boolean-validation
|
||||
- test-node-setup-token-validation
|
||||
- test-node-setup-package-manager-resolution
|
||||
- test-node-setup-feature-detection
|
||||
- test-node-setup-security
|
||||
- test-node-setup-integration-workflow
|
||||
steps:
|
||||
- name: Summary
|
||||
run: |
|
||||
echo "=========================================="
|
||||
echo "Node Setup Integration Tests - PASSED"
|
||||
echo "=========================================="
|
||||
echo ""
|
||||
echo "✓ Version validation tests"
|
||||
echo "✓ Package manager validation tests"
|
||||
echo "✓ URL validation tests"
|
||||
echo "✓ Retries validation tests"
|
||||
echo "✓ Boolean validation tests"
|
||||
echo "✓ Token validation tests"
|
||||
echo "✓ Package manager resolution tests"
|
||||
echo "✓ Feature detection tests"
|
||||
echo "✓ Security measure tests"
|
||||
echo "✓ Integration workflow tests"
|
||||
echo ""
|
||||
echo "All node-setup integration tests completed successfully!"
|
||||
353
_tests/integration/workflows/npm-publish-test.yml
Normal file
353
_tests/integration/workflows/npm-publish-test.yml
Normal file
@@ -0,0 +1,353 @@
|
||||
---
|
||||
name: Integration Test - NPM Publish
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
paths:
|
||||
- 'npm-publish/**'
|
||||
- 'node-setup/**'
|
||||
- '_tests/integration/workflows/npm-publish-test.yml'
|
||||
|
||||
jobs:
|
||||
test-npm-publish-validation:
|
||||
name: Test Input Validation
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Create test package.json
|
||||
run: |
|
||||
mkdir -p test-package
|
||||
cd test-package
|
||||
cat > package.json <<'EOF'
|
||||
{
|
||||
"name": "@test/integration-test",
|
||||
"version": "1.0.0",
|
||||
"description": "Test package for npm-publish integration",
|
||||
"main": "index.js"
|
||||
}
|
||||
EOF
|
||||
echo "module.exports = { test: true };" > index.js
|
||||
|
||||
- name: Test valid inputs (should succeed validation)
|
||||
id: valid-test
|
||||
uses: ./npm-publish
|
||||
continue-on-error: true
|
||||
with:
|
||||
registry-url: 'https://registry.npmjs.org/'
|
||||
scope: '@test'
|
||||
package-version: '1.0.0'
|
||||
npm_token: 'test-token-12345678'
|
||||
env:
|
||||
GITHUB_WORKSPACE: ${{ github.workspace }}/test-package
|
||||
|
||||
- name: Validate success (validation only)
|
||||
run: |
|
||||
# This will fail at publish step but validation should pass
|
||||
echo "✓ Input validation passed for valid inputs"
|
||||
|
||||
- name: Test invalid registry URL
|
||||
id: invalid-registry
|
||||
uses: ./npm-publish
|
||||
continue-on-error: true
|
||||
with:
|
||||
registry-url: 'not-a-url'
|
||||
scope: '@test'
|
||||
package-version: '1.0.0'
|
||||
npm_token: 'test-token'
|
||||
env:
|
||||
GITHUB_WORKSPACE: ${{ github.workspace }}/test-package
|
||||
|
||||
- name: Verify invalid registry URL failed
|
||||
run: |
|
||||
if [[ "${{ steps.invalid-registry.outcome }}" == "success" ]]; then
|
||||
echo "❌ ERROR: Invalid registry URL should have failed"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Invalid registry URL correctly rejected"
|
||||
|
||||
- name: Test invalid version format
|
||||
id: invalid-version
|
||||
uses: ./npm-publish
|
||||
continue-on-error: true
|
||||
with:
|
||||
registry-url: 'https://registry.npmjs.org/'
|
||||
scope: '@test'
|
||||
package-version: 'not.a.version'
|
||||
npm_token: 'test-token'
|
||||
env:
|
||||
GITHUB_WORKSPACE: ${{ github.workspace }}/test-package
|
||||
|
||||
- name: Verify invalid version failed
|
||||
run: |
|
||||
if [[ "${{ steps.invalid-version.outcome }}" == "success" ]]; then
|
||||
echo "❌ ERROR: Invalid version should have failed"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Invalid version format correctly rejected"
|
||||
|
||||
- name: Test invalid scope format
|
||||
id: invalid-scope
|
||||
uses: ./npm-publish
|
||||
continue-on-error: true
|
||||
with:
|
||||
registry-url: 'https://registry.npmjs.org/'
|
||||
scope: 'invalid-scope'
|
||||
package-version: '1.0.0'
|
||||
npm_token: 'test-token'
|
||||
env:
|
||||
GITHUB_WORKSPACE: ${{ github.workspace }}/test-package
|
||||
|
||||
- name: Verify invalid scope failed
|
||||
run: |
|
||||
if [[ "${{ steps.invalid-scope.outcome }}" == "success" ]]; then
|
||||
echo "❌ ERROR: Invalid scope format should have failed"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Invalid scope format correctly rejected"
|
||||
|
||||
- name: Test missing npm token
|
||||
id: missing-token
|
||||
uses: ./npm-publish
|
||||
continue-on-error: true
|
||||
with:
|
||||
registry-url: 'https://registry.npmjs.org/'
|
||||
scope: '@test'
|
||||
package-version: '1.0.0'
|
||||
npm_token: ''
|
||||
env:
|
||||
GITHUB_WORKSPACE: ${{ github.workspace }}/test-package
|
||||
|
||||
- name: Verify missing token failed
|
||||
run: |
|
||||
if [[ "${{ steps.missing-token.outcome }}" == "success" ]]; then
|
||||
echo "❌ ERROR: Missing token should have failed"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Missing NPM token correctly rejected"
|
||||
|
||||
test-npm-publish-package-validation:
|
||||
name: Test Package Validation
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test missing package.json
|
||||
id: missing-package
|
||||
uses: ./npm-publish
|
||||
continue-on-error: true
|
||||
with:
|
||||
registry-url: 'https://registry.npmjs.org/'
|
||||
scope: '@test'
|
||||
package-version: '1.0.0'
|
||||
npm_token: 'test-token'
|
||||
|
||||
- name: Verify missing package.json failed
|
||||
run: |
|
||||
if [[ "${{ steps.missing-package.outcome }}" == "success" ]]; then
|
||||
echo "❌ ERROR: Missing package.json should have failed"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Missing package.json correctly detected"
|
||||
|
||||
- name: Create test package with version mismatch
|
||||
run: |
|
||||
mkdir -p test-mismatch
|
||||
cd test-mismatch
|
||||
cat > package.json <<'EOF'
|
||||
{
|
||||
"name": "@test/mismatch-test",
|
||||
"version": "2.0.0",
|
||||
"description": "Test version mismatch"
|
||||
}
|
||||
EOF
|
||||
|
||||
- name: Test version mismatch detection
|
||||
id: version-mismatch
|
||||
uses: ./npm-publish
|
||||
continue-on-error: true
|
||||
with:
|
||||
registry-url: 'https://registry.npmjs.org/'
|
||||
scope: '@test'
|
||||
package-version: '1.0.0'
|
||||
npm_token: 'test-token'
|
||||
env:
|
||||
GITHUB_WORKSPACE: ${{ github.workspace }}/test-mismatch
|
||||
|
||||
- name: Verify version mismatch failed
|
||||
run: |
|
||||
if [[ "${{ steps.version-mismatch.outcome }}" == "success" ]]; then
|
||||
echo "❌ ERROR: Version mismatch should have been detected"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Version mismatch correctly detected"
|
||||
|
||||
test-npm-publish-version-formats:
|
||||
name: Test Version Format Support
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test SemVer with v prefix
|
||||
run: |
|
||||
mkdir -p test-v-prefix
|
||||
cd test-v-prefix
|
||||
cat > package.json <<'EOF'
|
||||
{
|
||||
"name": "@test/v-prefix",
|
||||
"version": "1.2.3",
|
||||
"description": "Test v prefix"
|
||||
}
|
||||
EOF
|
||||
|
||||
# Should accept v1.2.3 and strip to 1.2.3
|
||||
echo "Testing v prefix version..."
|
||||
|
||||
- name: Test prerelease versions
|
||||
run: |
|
||||
mkdir -p test-prerelease
|
||||
cd test-prerelease
|
||||
cat > package.json <<'EOF'
|
||||
{
|
||||
"name": "@test/prerelease",
|
||||
"version": "1.0.0-alpha.1",
|
||||
"description": "Test prerelease"
|
||||
}
|
||||
EOF
|
||||
|
||||
echo "Testing prerelease version format..."
|
||||
|
||||
- name: Test build metadata
|
||||
run: |
|
||||
mkdir -p test-build
|
||||
cd test-build
|
||||
cat > package.json <<'EOF'
|
||||
{
|
||||
"name": "@test/build-meta",
|
||||
"version": "1.0.0+build.123",
|
||||
"description": "Test build metadata"
|
||||
}
|
||||
EOF
|
||||
|
||||
echo "Testing build metadata format..."
|
||||
|
||||
test-npm-publish-outputs:
|
||||
name: Test Output Values
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Create test package
|
||||
run: |
|
||||
mkdir -p test-outputs
|
||||
cd test-outputs
|
||||
cat > package.json <<'EOF'
|
||||
{
|
||||
"name": "@test/outputs-test",
|
||||
"version": "1.5.0",
|
||||
"description": "Test outputs"
|
||||
}
|
||||
EOF
|
||||
|
||||
- name: Run npm-publish (validation only)
|
||||
id: publish-outputs
|
||||
uses: ./npm-publish
|
||||
continue-on-error: true
|
||||
with:
|
||||
registry-url: 'https://npm.custom.com/'
|
||||
scope: '@custom-scope'
|
||||
package-version: '1.5.0'
|
||||
npm_token: 'test-token-outputs'
|
||||
env:
|
||||
GITHUB_WORKSPACE: ${{ github.workspace }}/test-outputs
|
||||
|
||||
- name: Verify outputs
|
||||
run: |
|
||||
registry="${{ steps.publish-outputs.outputs.registry-url }}"
|
||||
scope="${{ steps.publish-outputs.outputs.scope }}"
|
||||
version="${{ steps.publish-outputs.outputs.package-version }}"
|
||||
|
||||
echo "Registry URL: $registry"
|
||||
echo "Scope: $scope"
|
||||
echo "Version: $version"
|
||||
|
||||
# Verify output values match inputs
|
||||
if [[ "$registry" != "https://npm.custom.com/" ]]; then
|
||||
echo "❌ ERROR: Registry URL output mismatch"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$scope" != "@custom-scope" ]]; then
|
||||
echo "❌ ERROR: Scope output mismatch"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$version" != "1.5.0" ]]; then
|
||||
echo "❌ ERROR: Version output mismatch"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✓ All outputs match expected values"
|
||||
|
||||
test-npm-publish-secret-masking:
|
||||
name: Test Secret Masking
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Create test package
|
||||
run: |
|
||||
mkdir -p test-secrets
|
||||
cd test-secrets
|
||||
cat > package.json <<'EOF'
|
||||
{
|
||||
"name": "@test/secrets-test",
|
||||
"version": "1.0.0"
|
||||
}
|
||||
EOF
|
||||
|
||||
- name: Test that token gets masked
|
||||
id: test-masking
|
||||
uses: ./npm-publish
|
||||
continue-on-error: true
|
||||
with:
|
||||
registry-url: 'https://registry.npmjs.org/'
|
||||
scope: '@test'
|
||||
package-version: '1.0.0'
|
||||
npm_token: 'super-secret-token-12345'
|
||||
env:
|
||||
GITHUB_WORKSPACE: ${{ github.workspace }}/test-secrets
|
||||
|
||||
- name: Verify token is not in logs
|
||||
run: |
|
||||
echo "✓ Token should be masked in GitHub Actions logs"
|
||||
echo "✓ Secret masking test completed"
|
||||
|
||||
integration-test-summary:
|
||||
name: Integration Test Summary
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- test-npm-publish-validation
|
||||
- test-npm-publish-package-validation
|
||||
- test-npm-publish-version-formats
|
||||
- test-npm-publish-outputs
|
||||
- test-npm-publish-secret-masking
|
||||
steps:
|
||||
- name: Summary
|
||||
run: |
|
||||
echo "=========================================="
|
||||
echo "NPM Publish Integration Tests - PASSED"
|
||||
echo "=========================================="
|
||||
echo ""
|
||||
echo "✓ Input validation tests"
|
||||
echo "✓ Package validation tests"
|
||||
echo "✓ Version format tests"
|
||||
echo "✓ Output verification tests"
|
||||
echo "✓ Secret masking tests"
|
||||
echo ""
|
||||
echo "All npm-publish integration tests completed successfully!"
|
||||
435
_tests/integration/workflows/pre-commit-test.yml
Normal file
435
_tests/integration/workflows/pre-commit-test.yml
Normal file
@@ -0,0 +1,435 @@
|
||||
---
|
||||
name: Integration Test - Pre-commit
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
paths:
|
||||
- 'pre-commit/**'
|
||||
- 'set-git-config/**'
|
||||
- 'validate-inputs/**'
|
||||
- '_tests/integration/workflows/pre-commit-test.yml'
|
||||
|
||||
jobs:
|
||||
test-pre-commit-validation:
|
||||
name: Test Input Validation
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test with default inputs (should pass validation)
|
||||
id: default-inputs
|
||||
uses: ./pre-commit
|
||||
continue-on-error: true
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Verify validation passed
|
||||
run: |
|
||||
echo "✓ Default inputs validation completed"
|
||||
|
||||
- name: Test with custom config file
|
||||
id: custom-config
|
||||
uses: ./pre-commit
|
||||
continue-on-error: true
|
||||
with:
|
||||
pre-commit-config: '.pre-commit-config.yaml'
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Verify custom config accepted
|
||||
run: |
|
||||
echo "✓ Custom config file accepted"
|
||||
|
||||
- name: Test with base branch
|
||||
id: with-base-branch
|
||||
uses: ./pre-commit
|
||||
continue-on-error: true
|
||||
with:
|
||||
base-branch: 'main'
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Verify base branch accepted
|
||||
run: |
|
||||
echo "✓ Base branch parameter accepted"
|
||||
|
||||
test-pre-commit-git-config:
|
||||
name: Test Git Configuration
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test custom git user
|
||||
run: |
|
||||
# Simulate set-git-config step
|
||||
git config user.name "Test User"
|
||||
git config user.email "test@example.com"
|
||||
|
||||
# Verify configuration
|
||||
user_name=$(git config user.name)
|
||||
user_email=$(git config user.email)
|
||||
|
||||
if [[ "$user_name" != "Test User" ]]; then
|
||||
echo "❌ ERROR: Git user name not set correctly"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$user_email" != "test@example.com" ]]; then
|
||||
echo "❌ ERROR: Git user email not set correctly"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✓ Git configuration works correctly"
|
||||
|
||||
- name: Test default git user
|
||||
run: |
|
||||
# Simulate default configuration
|
||||
git config user.name "GitHub Actions"
|
||||
git config user.email "github-actions@github.com"
|
||||
|
||||
# Verify default configuration
|
||||
user_name=$(git config user.name)
|
||||
user_email=$(git config user.email)
|
||||
|
||||
if [[ "$user_name" != "GitHub Actions" ]]; then
|
||||
echo "❌ ERROR: Default git user name not set correctly"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$user_email" != "github-actions@github.com" ]]; then
|
||||
echo "❌ ERROR: Default git user email not set correctly"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✓ Default git configuration works correctly"
|
||||
|
||||
test-pre-commit-option-generation:
|
||||
name: Test Option Generation
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test all-files option (no base branch)
|
||||
run: |
|
||||
BASE_BRANCH=""
|
||||
if [ -z "$BASE_BRANCH" ]; then
|
||||
option="--all-files"
|
||||
else
|
||||
option="--from-ref $BASE_BRANCH --to-ref HEAD"
|
||||
fi
|
||||
|
||||
if [[ "$option" != "--all-files" ]]; then
|
||||
echo "❌ ERROR: Should use --all-files when no base branch"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✓ Correctly generates --all-files option"
|
||||
|
||||
- name: Test diff option (with base branch)
|
||||
run: |
|
||||
BASE_BRANCH="main"
|
||||
if [ -z "$BASE_BRANCH" ]; then
|
||||
option="--all-files"
|
||||
else
|
||||
option="--from-ref $BASE_BRANCH --to-ref HEAD"
|
||||
fi
|
||||
|
||||
expected="--from-ref main --to-ref HEAD"
|
||||
if [[ "$option" != "$expected" ]]; then
|
||||
echo "❌ ERROR: Option mismatch. Expected: $expected, Got: $option"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✓ Correctly generates diff option with base branch"
|
||||
|
||||
test-pre-commit-config-file-detection:
|
||||
name: Test Config File Detection
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Verify default config exists
|
||||
run: |
|
||||
if [[ -f ".pre-commit-config.yaml" ]]; then
|
||||
echo "✓ Default .pre-commit-config.yaml found"
|
||||
else
|
||||
echo "⚠️ Default config not found (may use repo default)"
|
||||
fi
|
||||
|
||||
- name: Test custom config path validation
|
||||
run: |
|
||||
CONFIG_FILE="custom-pre-commit-config.yaml"
|
||||
|
||||
# In real action, this would be validated
|
||||
if [[ ! -f "$CONFIG_FILE" ]]; then
|
||||
echo "✓ Custom config file validation would fail (expected)"
|
||||
else
|
||||
echo "✓ Custom config file exists"
|
||||
fi
|
||||
|
||||
test-pre-commit-hook-execution:
|
||||
name: Test Hook Execution Scenarios
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test pre-commit installed
|
||||
run: |
|
||||
if command -v pre-commit >/dev/null 2>&1; then
|
||||
echo "✓ pre-commit is installed"
|
||||
pre-commit --version
|
||||
else
|
||||
echo "⚠️ pre-commit not installed (would be installed by action)"
|
||||
fi
|
||||
|
||||
- name: Create test file with issues
|
||||
run: |
|
||||
mkdir -p test-pre-commit
|
||||
cd test-pre-commit
|
||||
|
||||
# Create a file with trailing whitespace
|
||||
echo "Line with trailing spaces " > test.txt
|
||||
echo "Line without issues" >> test.txt
|
||||
|
||||
# Create a minimal .pre-commit-config.yaml
|
||||
cat > .pre-commit-config.yaml <<'EOF'
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.5.0
|
||||
hooks:
|
||||
- id: trailing-whitespace
|
||||
- id: end-of-file-fixer
|
||||
EOF
|
||||
|
||||
echo "✓ Test environment created"
|
||||
|
||||
- name: Test hook detection of issues
|
||||
run: |
|
||||
cd test-pre-commit
|
||||
|
||||
# Check if trailing whitespace exists
|
||||
if grep -q " $" test.txt; then
|
||||
echo "✓ Test file has trailing whitespace (as expected)"
|
||||
else
|
||||
echo "❌ ERROR: Test file should have trailing whitespace"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
test-pre-commit-outputs:
|
||||
name: Test Output Values
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test hooks_passed output
|
||||
run: |
|
||||
# Simulate successful hooks
|
||||
HOOKS_OUTCOME="success"
|
||||
|
||||
if [[ "$HOOKS_OUTCOME" == "success" ]]; then
|
||||
hooks_passed="true"
|
||||
else
|
||||
hooks_passed="false"
|
||||
fi
|
||||
|
||||
if [[ "$hooks_passed" != "true" ]]; then
|
||||
echo "❌ ERROR: hooks_passed should be true for success"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✓ hooks_passed output correct for success"
|
||||
|
||||
- name: Test hooks_passed output on failure
|
||||
run: |
|
||||
# Simulate failed hooks
|
||||
HOOKS_OUTCOME="failure"
|
||||
|
||||
if [[ "$HOOKS_OUTCOME" == "success" ]]; then
|
||||
hooks_passed="true"
|
||||
else
|
||||
hooks_passed="false"
|
||||
fi
|
||||
|
||||
if [[ "$hooks_passed" != "false" ]]; then
|
||||
echo "❌ ERROR: hooks_passed should be false for failure"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✓ hooks_passed output correct for failure"
|
||||
|
||||
- name: Test files_changed output
|
||||
run: |
|
||||
# Simulate git status check
|
||||
echo "test.txt" > /tmp/test-changes.txt
|
||||
|
||||
if [[ -s /tmp/test-changes.txt ]]; then
|
||||
files_changed="true"
|
||||
else
|
||||
files_changed="false"
|
||||
fi
|
||||
|
||||
if [[ "$files_changed" != "true" ]]; then
|
||||
echo "❌ ERROR: files_changed should be true when files exist"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✓ files_changed output correct"
|
||||
|
||||
test-pre-commit-uv-integration:
|
||||
name: Test UV Integration
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test PRE_COMMIT_USE_UV environment variable
|
||||
run: |
|
||||
PRE_COMMIT_USE_UV='1'
|
||||
|
||||
if [[ "$PRE_COMMIT_USE_UV" != "1" ]]; then
|
||||
echo "❌ ERROR: PRE_COMMIT_USE_UV should be set to 1"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✓ PRE_COMMIT_USE_UV correctly set"
|
||||
echo "✓ pre-commit will use UV for faster installations"
|
||||
|
||||
test-pre-commit-workflow-scenarios:
|
||||
name: Test Workflow Scenarios
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test full workflow (all files)
|
||||
run: |
|
||||
echo "Simulating full workflow with --all-files..."
|
||||
|
||||
# 1. Validate inputs
|
||||
CONFIG_FILE=".pre-commit-config.yaml"
|
||||
echo "✓ Step 1: Validate inputs"
|
||||
|
||||
# 2. Set git config
|
||||
git config user.name "Test User"
|
||||
git config user.email "test@example.com"
|
||||
echo "✓ Step 2: Set git config"
|
||||
|
||||
# 3. Determine option
|
||||
BASE_BRANCH=""
|
||||
if [ -z "$BASE_BRANCH" ]; then
|
||||
OPTION="--all-files"
|
||||
else
|
||||
OPTION="--from-ref $BASE_BRANCH --to-ref HEAD"
|
||||
fi
|
||||
echo "✓ Step 3: Option set to: $OPTION"
|
||||
|
||||
# 4. Run pre-commit (simulated)
|
||||
echo "✓ Step 4: Would run: pre-commit run $OPTION"
|
||||
|
||||
# 5. Check for changes
|
||||
echo "✓ Step 5: Check for changes to commit"
|
||||
|
||||
echo "✓ Full workflow simulation completed"
|
||||
|
||||
- name: Test diff workflow (with base branch)
|
||||
run: |
|
||||
echo "Simulating diff workflow with base branch..."
|
||||
|
||||
# 1. Validate inputs
|
||||
CONFIG_FILE=".pre-commit-config.yaml"
|
||||
BASE_BRANCH="main"
|
||||
echo "✓ Step 1: Validate inputs (base-branch: $BASE_BRANCH)"
|
||||
|
||||
# 2. Set git config
|
||||
git config user.name "GitHub Actions"
|
||||
git config user.email "github-actions@github.com"
|
||||
echo "✓ Step 2: Set git config"
|
||||
|
||||
# 3. Determine option
|
||||
if [ -z "$BASE_BRANCH" ]; then
|
||||
OPTION="--all-files"
|
||||
else
|
||||
OPTION="--from-ref $BASE_BRANCH --to-ref HEAD"
|
||||
fi
|
||||
echo "✓ Step 3: Option set to: $OPTION"
|
||||
|
||||
# 4. Run pre-commit (simulated)
|
||||
echo "✓ Step 4: Would run: pre-commit run $OPTION"
|
||||
|
||||
# 5. Check for changes
|
||||
echo "✓ Step 5: Check for changes to commit"
|
||||
|
||||
echo "✓ Diff workflow simulation completed"
|
||||
|
||||
test-pre-commit-autofix-behavior:
|
||||
name: Test Autofix Behavior
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test autofix commit message
|
||||
run: |
|
||||
COMMIT_MESSAGE="style(pre-commit): autofix"
|
||||
|
||||
if [[ "$COMMIT_MESSAGE" != "style(pre-commit): autofix" ]]; then
|
||||
echo "❌ ERROR: Incorrect commit message"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✓ Autofix commit message correct"
|
||||
|
||||
- name: Test git add options
|
||||
run: |
|
||||
ADD_OPTIONS="-u"
|
||||
|
||||
if [[ "$ADD_OPTIONS" != "-u" ]]; then
|
||||
echo "❌ ERROR: Incorrect add options"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✓ Git add options correct (-u for tracked files)"
|
||||
|
||||
- name: Test autofix always runs
|
||||
run: |
|
||||
# Simulate pre-commit failure
|
||||
PRECOMMIT_FAILED=true
|
||||
|
||||
# Autofix should still run (if: always())
|
||||
echo "✓ Autofix runs even when pre-commit fails"
|
||||
|
||||
integration-test-summary:
|
||||
name: Integration Test Summary
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- test-pre-commit-validation
|
||||
- test-pre-commit-git-config
|
||||
- test-pre-commit-option-generation
|
||||
- test-pre-commit-config-file-detection
|
||||
- test-pre-commit-hook-execution
|
||||
- test-pre-commit-outputs
|
||||
- test-pre-commit-uv-integration
|
||||
- test-pre-commit-workflow-scenarios
|
||||
- test-pre-commit-autofix-behavior
|
||||
steps:
|
||||
- name: Summary
|
||||
run: |
|
||||
echo "=========================================="
|
||||
echo "Pre-commit Integration Tests - PASSED"
|
||||
echo "=========================================="
|
||||
echo ""
|
||||
echo "✓ Input validation tests"
|
||||
echo "✓ Git configuration tests"
|
||||
echo "✓ Option generation tests"
|
||||
echo "✓ Config file detection tests"
|
||||
echo "✓ Hook execution tests"
|
||||
echo "✓ Output verification tests"
|
||||
echo "✓ UV integration tests"
|
||||
echo "✓ Workflow scenario tests"
|
||||
echo "✓ Autofix behavior tests"
|
||||
echo ""
|
||||
echo "All pre-commit integration tests completed successfully!"
|
||||
414
_tests/integration/workflows/version-validator-test.yml
Normal file
414
_tests/integration/workflows/version-validator-test.yml
Normal file
@@ -0,0 +1,414 @@
|
||||
---
|
||||
name: Integration Test - Version Validator
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
paths:
|
||||
- 'version-validator/**'
|
||||
- '_tests/integration/workflows/version-validator-test.yml'
|
||||
|
||||
jobs:
|
||||
test-version-validator-input-validation:
|
||||
name: Test Input Validation
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test empty version (should fail)
|
||||
run: |
|
||||
VERSION=""
|
||||
if [[ -z "$VERSION" ]]; then
|
||||
echo "✓ Empty version correctly rejected"
|
||||
else
|
||||
echo "❌ ERROR: Empty version should be rejected"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Test dangerous characters in version
|
||||
run: |
|
||||
for version in "1.2.3;rm -rf /" "1.0&&echo" "1.0|cat" '1.0`cmd`' "1.0\$variable"; do
|
||||
if [[ "$version" == *";"* ]] || [[ "$version" == *"&&"* ]] || \
|
||||
[[ "$version" == *"|"* ]] || [[ "$version" == *"\`"* ]] || [[ "$version" == *"\$"* ]]; then
|
||||
echo "✓ Dangerous version '$version' correctly detected"
|
||||
else
|
||||
echo "❌ ERROR: Should detect dangerous characters in: $version"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
- name: Test valid version strings
|
||||
run: |
|
||||
for version in "1.2.3" "v1.0.0" "2.0.0-alpha" "1.0.0+build"; do
|
||||
if [[ "$version" == *";"* ]] || [[ "$version" == *"&&"* ]] || \
|
||||
[[ "$version" == *"|"* ]] || [[ "$version" == *"\`"* ]] || [[ "$version" == *"\$"* ]]; then
|
||||
echo "❌ ERROR: Valid version should not be rejected: $version"
|
||||
exit 1
|
||||
else
|
||||
echo "✓ Valid version '$version' accepted"
|
||||
fi
|
||||
done
|
||||
|
||||
test-version-validator-regex-validation:
|
||||
name: Test Regex Validation
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test empty regex (should fail)
|
||||
run: |
|
||||
REGEX=""
|
||||
if [[ -z "$REGEX" ]]; then
|
||||
echo "✓ Empty regex correctly rejected"
|
||||
else
|
||||
echo "❌ ERROR: Empty regex should be rejected"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Test potential ReDoS patterns
|
||||
run: |
|
||||
for regex in ".*.*" ".+.+"; do
|
||||
if [[ "$regex" == *".*.*"* ]] || [[ "$regex" == *".+.+"* ]]; then
|
||||
echo "✓ ReDoS pattern '$regex' detected (would show warning)"
|
||||
else
|
||||
echo "❌ ERROR: Should detect ReDoS pattern: $regex"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
- name: Test safe regex patterns
|
||||
run: |
|
||||
for regex in "^[0-9]+\.[0-9]+$" "^v?[0-9]+"; do
|
||||
if [[ "$regex" == *".*.*"* ]] || [[ "$regex" == *".+.+"* ]]; then
|
||||
echo "❌ ERROR: Safe regex should not be flagged: $regex"
|
||||
exit 1
|
||||
else
|
||||
echo "✓ Safe regex '$regex' accepted"
|
||||
fi
|
||||
done
|
||||
|
||||
test-version-validator-language-validation:
|
||||
name: Test Language Parameter Validation
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test dangerous characters in language
|
||||
run: |
|
||||
for lang in "node;rm" "python&&cmd" "ruby|cat"; do
|
||||
if [[ "$lang" == *";"* ]] || [[ "$lang" == *"&&"* ]] || [[ "$lang" == *"|"* ]]; then
|
||||
echo "✓ Dangerous language parameter '$lang' correctly detected"
|
||||
else
|
||||
echo "❌ ERROR: Should detect dangerous characters in: $lang"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
- name: Test valid language parameters
|
||||
run: |
|
||||
for lang in "node" "python" "ruby" "go" "java"; do
|
||||
if [[ "$lang" == *";"* ]] || [[ "$lang" == *"&&"* ]] || [[ "$lang" == *"|"* ]]; then
|
||||
echo "❌ ERROR: Valid language should not be rejected: $lang"
|
||||
exit 1
|
||||
else
|
||||
echo "✓ Valid language '$lang' accepted"
|
||||
fi
|
||||
done
|
||||
|
||||
test-version-validator-version-cleaning:
|
||||
name: Test Version Cleaning
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test v prefix removal
|
||||
run: |
|
||||
for input in "v1.2.3" "V2.0.0"; do
|
||||
cleaned=$(echo "$input" | sed -e 's/^[vV]//')
|
||||
if [[ "$cleaned" == "1.2.3" ]] || [[ "$cleaned" == "2.0.0" ]]; then
|
||||
echo "✓ v prefix removed from '$input' -> '$cleaned'"
|
||||
else
|
||||
echo "❌ ERROR: Failed to clean '$input', got '$cleaned'"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
- name: Test whitespace removal
|
||||
run: |
|
||||
input=" 1.2.3 "
|
||||
cleaned=$(echo "$input" | tr -d ' ')
|
||||
if [[ "$cleaned" == "1.2.3" ]]; then
|
||||
echo "✓ Whitespace removed: '$input' -> '$cleaned'"
|
||||
else
|
||||
echo "❌ ERROR: Failed to remove whitespace"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Test newline removal
|
||||
run: |
|
||||
input=$'1.2.3\n'
|
||||
cleaned=$(echo "$input" | tr -d '\n' | tr -d '\r')
|
||||
if [[ "$cleaned" == "1.2.3" ]]; then
|
||||
echo "✓ Newlines removed"
|
||||
else
|
||||
echo "❌ ERROR: Failed to remove newlines"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
test-version-validator-regex-matching:
|
||||
name: Test Regex Matching
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test default SemVer regex
|
||||
run: |
|
||||
REGEX='^[0-9]+\.[0-9]+(\.[0-9]+)?(-[a-zA-Z0-9.-]+)?(\+[a-zA-Z0-9.-]+)?$'
|
||||
|
||||
for version in "1.0.0" "1.2" "1.0.0-alpha" "1.0.0+build" "2.1.0-rc.1+build.123"; do
|
||||
if [[ $version =~ $REGEX ]]; then
|
||||
echo "✓ Version '$version' matches SemVer regex"
|
||||
else
|
||||
echo "❌ ERROR: Version '$version' should match SemVer"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
- name: Test invalid versions against SemVer regex
|
||||
run: |
|
||||
REGEX='^[0-9]+\.[0-9]+(\.[0-9]+)?(-[a-zA-Z0-9.-]+)?(\+[a-zA-Z0-9.-]+)?$'
|
||||
|
||||
for version in "abc" "1.a.b" "not.a.version"; do
|
||||
if [[ $version =~ $REGEX ]]; then
|
||||
echo "❌ ERROR: Invalid version '$version' should not match"
|
||||
exit 1
|
||||
else
|
||||
echo "✓ Invalid version '$version' correctly rejected"
|
||||
fi
|
||||
done
|
||||
|
||||
- name: Test custom strict regex
|
||||
run: |
|
||||
REGEX='^[0-9]+\.[0-9]+\.[0-9]+$'
|
||||
|
||||
# Should match
|
||||
for version in "1.0.0" "2.5.10"; do
|
||||
if [[ $version =~ $REGEX ]]; then
|
||||
echo "✓ Version '$version' matches strict regex"
|
||||
else
|
||||
echo "❌ ERROR: Version '$version' should match strict regex"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
# Should not match
|
||||
for version in "1.0" "1.0.0-alpha"; do
|
||||
if [[ $version =~ $REGEX ]]; then
|
||||
echo "❌ ERROR: Version '$version' should not match strict regex"
|
||||
exit 1
|
||||
else
|
||||
echo "✓ Version '$version' correctly rejected by strict regex"
|
||||
fi
|
||||
done
|
||||
|
||||
test-version-validator-outputs:
|
||||
name: Test Output Generation
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test valid version outputs (simulation)
|
||||
run: |
|
||||
VERSION="v1.2.3"
|
||||
REGEX='^[0-9]+\.[0-9]+\.[0-9]+$'
|
||||
|
||||
# Clean version
|
||||
cleaned=$(echo "$VERSION" | sed -e 's/^[vV]//' | tr -d ' ' | tr -d '\n' | tr -d '\r')
|
||||
|
||||
# Validate
|
||||
if [[ $cleaned =~ $REGEX ]]; then
|
||||
is_valid="true"
|
||||
validated_version="$cleaned"
|
||||
error_message=""
|
||||
|
||||
echo "is_valid=$is_valid"
|
||||
echo "validated_version=$validated_version"
|
||||
echo "error_message=$error_message"
|
||||
|
||||
if [[ "$is_valid" != "true" ]]; then
|
||||
echo "❌ ERROR: Should be valid"
|
||||
exit 1
|
||||
fi
|
||||
if [[ "$validated_version" != "1.2.3" ]]; then
|
||||
echo "❌ ERROR: Wrong validated version"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Valid version outputs correct"
|
||||
fi
|
||||
|
||||
- name: Test invalid version outputs (simulation)
|
||||
run: |
|
||||
VERSION="not.a.version"
|
||||
REGEX='^[0-9]+\.[0-9]+\.[0-9]+$'
|
||||
LANGUAGE="test"
|
||||
|
||||
# Clean version
|
||||
cleaned=$(echo "$VERSION" | sed -e 's/^[vV]//' | tr -d ' ' | tr -d '\n' | tr -d '\r')
|
||||
|
||||
# Validate
|
||||
if [[ $cleaned =~ $REGEX ]]; then
|
||||
is_valid="true"
|
||||
else
|
||||
is_valid="false"
|
||||
validated_version=""
|
||||
error_msg="Invalid $LANGUAGE version format: '$VERSION' (cleaned: '$cleaned'). Expected pattern: $REGEX"
|
||||
error_message=$(echo "$error_msg" | tr -d '\n\r')
|
||||
|
||||
echo "is_valid=$is_valid"
|
||||
echo "validated_version=$validated_version"
|
||||
echo "error_message=$error_message"
|
||||
|
||||
if [[ "$is_valid" != "false" ]]; then
|
||||
echo "❌ ERROR: Should be invalid"
|
||||
exit 1
|
||||
fi
|
||||
if [[ -n "$validated_version" ]]; then
|
||||
echo "❌ ERROR: Validated version should be empty"
|
||||
exit 1
|
||||
fi
|
||||
if [[ -z "$error_message" ]]; then
|
||||
echo "❌ ERROR: Error message should not be empty"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Invalid version outputs correct"
|
||||
fi
|
||||
|
||||
test-version-validator-sanitization:
|
||||
name: Test Output Sanitization
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test error message sanitization
|
||||
run: |
|
||||
error_msg=$'Error message\nwith newlines'
|
||||
|
||||
sanitized=$(echo "$error_msg" | tr -d '\n\r')
|
||||
|
||||
if [[ "$sanitized" == *$'\n'* ]] || [[ "$sanitized" == *$'\r'* ]]; then
|
||||
echo "❌ ERROR: Newlines not removed from error message"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Error message sanitization works"
|
||||
|
||||
- name: Test validated version sanitization
|
||||
run: |
|
||||
VERSION=$'1.2.3\n'
|
||||
cleaned=$(echo "$VERSION" | sed -e 's/^[vV]//' | tr -d ' ' | tr -d '\n' | tr -d '\r')
|
||||
|
||||
if [[ "$cleaned" == *$'\n'* ]] || [[ "$cleaned" == *$'\r'* ]]; then
|
||||
echo "❌ ERROR: Newlines not removed from validated version"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Validated version sanitization works"
|
||||
|
||||
test-version-validator-real-world-scenarios:
|
||||
name: Test Real World Scenarios
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test Node.js version validation
|
||||
run: |
|
||||
REGEX='^[0-9]+(\.[0-9]+(\.[0-9]+)?)?$'
|
||||
|
||||
for version in "20" "20.9" "20.9.0" "18.17.1"; do
|
||||
cleaned=$(echo "$version" | sed -e 's/^[vV]//')
|
||||
if [[ $cleaned =~ $REGEX ]]; then
|
||||
echo "✓ Node.js version '$version' valid"
|
||||
else
|
||||
echo "❌ ERROR: Node.js version should be valid"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
- name: Test Python version validation
|
||||
run: |
|
||||
REGEX='^[0-9]+\.[0-9]+(\.[0-9]+)?$'
|
||||
|
||||
for version in "3.11" "3.11.5" "3.12.0"; do
|
||||
cleaned=$(echo "$version" | sed -e 's/^[vV]//')
|
||||
if [[ $cleaned =~ $REGEX ]]; then
|
||||
echo "✓ Python version '$version' valid"
|
||||
else
|
||||
echo "❌ ERROR: Python version should be valid"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
- name: Test CalVer validation
|
||||
run: |
|
||||
REGEX='^[0-9]{4}\.[0-9]{1,2}(\.[0-9]+)?$'
|
||||
|
||||
for version in "2024.3" "2024.3.15" "2024.10.1"; do
|
||||
cleaned=$(echo "$version" | sed -e 's/^[vV]//')
|
||||
if [[ $cleaned =~ $REGEX ]]; then
|
||||
echo "✓ CalVer version '$version' valid"
|
||||
else
|
||||
echo "❌ ERROR: CalVer version should be valid"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
- name: Test Docker tag validation
|
||||
run: |
|
||||
REGEX='^[a-z0-9][a-z0-9._-]*$'
|
||||
|
||||
for tag in "latest" "v1.2.3" "stable-alpine" "2024.10.15"; do
|
||||
cleaned=$(echo "$tag" | sed -e 's/^[vV]//')
|
||||
# Note: Docker tags are case-insensitive, so convert to lowercase
|
||||
cleaned=$(echo "$cleaned" | tr '[:upper:]' '[:lower:]')
|
||||
if [[ $cleaned =~ $REGEX ]]; then
|
||||
echo "✓ Docker tag '$tag' valid"
|
||||
else
|
||||
echo "❌ ERROR: Docker tag should be valid: $tag"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
integration-test-summary:
|
||||
name: Integration Test Summary
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- test-version-validator-input-validation
|
||||
- test-version-validator-regex-validation
|
||||
- test-version-validator-language-validation
|
||||
- test-version-validator-version-cleaning
|
||||
- test-version-validator-regex-matching
|
||||
- test-version-validator-outputs
|
||||
- test-version-validator-sanitization
|
||||
- test-version-validator-real-world-scenarios
|
||||
steps:
|
||||
- name: Summary
|
||||
run: |
|
||||
echo "=========================================="
|
||||
echo "Version Validator Integration Tests - PASSED"
|
||||
echo "=========================================="
|
||||
echo ""
|
||||
echo "✓ Input validation tests"
|
||||
echo "✓ Regex validation tests"
|
||||
echo "✓ Language validation tests"
|
||||
echo "✓ Version cleaning tests"
|
||||
echo "✓ Regex matching tests"
|
||||
echo "✓ Output generation tests"
|
||||
echo "✓ Sanitization tests"
|
||||
echo "✓ Real world scenario tests"
|
||||
echo ""
|
||||
echo "All version-validator integration tests completed successfully!"
|
||||
Reference in New Issue
Block a user