diff --git a/.github/workflows/action-security.yml b/.github/workflows/action-security.yml index 8e2f20c..830043e 100644 --- a/.github/workflows/action-security.yml +++ b/.github/workflows/action-security.yml @@ -39,212 +39,30 @@ jobs: with: fetch-depth: 0 - - name: Check Required Configurations - id: check-configs - shell: sh - run: | - # Initialize all flags as false - { - echo "run_gitleaks=false" - echo "run_trivy=true" - } >> "$GITHUB_OUTPUT" - - # Check Gitleaks configuration and license - if [ -f ".gitleaks.toml" ] && [ -n "${{ secrets.GITLEAKS_LICENSE }}" ]; then - echo "Gitleaks config and license found" - printf '%s\n' "run_gitleaks=true" >> "$GITHUB_OUTPUT" - else - echo "::warning::Gitleaks config or license missing - skipping Gitleaks scan" - fi - - - name: Run actionlint - uses: raven-actions/actionlint@3a24062651993d40fed1019b58ac6fbdfbf276cc # v2.0.1 + - name: Run Security Scan + id: security-scan + uses: ./security-scan with: - cache: true - fail-on-error: true - shellcheck: false - - - name: Run Gitleaks - if: steps.check-configs.outputs.run_gitleaks == 'true' - uses: gitleaks/gitleaks-action@ff98106e4c7b2bc287b24eaf42907196329070c7 # v2.3.9 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - GITLEAKS_LICENSE: ${{ secrets.GITLEAKS_LICENSE }} - with: - config-path: .gitleaks.toml - report-format: sarif - report-path: gitleaks-report.sarif - - - name: Run Trivy vulnerability scanner - uses: aquasecurity/trivy-action@a11da62073708815958ea6d84f5650c78a3ef85b # master - with: - scan-type: 'fs' - scanners: 'vuln,config,secret' - format: 'sarif' - output: 'trivy-results.sarif' - severity: 'CRITICAL,HIGH' - timeout: '10m' - - - name: Verify SARIF files - id: verify-sarif - shell: sh - run: | - # Initialize outputs - { - echo "has_trivy=false" - echo "has_gitleaks=false" - } >> "$GITHUB_OUTPUT" - - # Check Trivy results - if [ -f "trivy-results.sarif" ]; then - if jq -e . &1 <"trivy-results.sarif"; then - printf '%s\n' "has_trivy=true" >> "$GITHUB_OUTPUT" - else - echo "::warning::Trivy SARIF file exists but is not valid JSON" - fi - fi - - # Check Gitleaks results if it ran - if [ "${{ steps.check-configs.outputs.run_gitleaks }}" = "true" ]; then - if [ -f "gitleaks-report.sarif" ]; then - if jq -e . &1 <"gitleaks-report.sarif"; then - printf '%s\n' "has_gitleaks=true" >> "$GITHUB_OUTPUT" - else - echo "::warning::Gitleaks SARIF file exists but is not valid JSON" - fi - fi - fi - - - name: Upload Trivy results - if: steps.verify-sarif.outputs.has_trivy == 'true' - uses: github/codeql-action/upload-sarif@fdbfb4d2750291e159f0156def62b853c2798ca2 # v4.31.5 - with: - sarif_file: 'trivy-results.sarif' - category: 'trivy' - - - name: Upload Gitleaks results - if: steps.verify-sarif.outputs.has_gitleaks == 'true' - uses: github/codeql-action/upload-sarif@fdbfb4d2750291e159f0156def62b853c2798ca2 # v4.31.5 - with: - sarif_file: 'gitleaks-report.sarif' - category: 'gitleaks' - - - name: Archive security reports - if: always() - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 - with: - name: security-reports-${{ github.run_id }} - path: | - ${{ steps.verify-sarif.outputs.has_trivy == 'true' && 'trivy-results.sarif' || '' }} - ${{ steps.verify-sarif.outputs.has_gitleaks == 'true' && 'gitleaks-report.sarif' || '' }} - retention-days: 30 - - - name: Analyze Results - if: always() - uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 - with: - script: | - const fs = require('fs'); - - try { - let totalIssues = 0; - let criticalIssues = 0; - - const analyzeSarif = (file, tool) => { - if (!fs.existsSync(file)) { - console.log(`No results file found for ${tool}`); - return null; - } - - try { - const sarif = JSON.parse(fs.readFileSync(file, 'utf8')); - return sarif.runs.reduce((acc, run) => { - if (!run.results) return acc; - - const critical = run.results.filter(r => - r.level === 'error' || - r.level === 'critical' || - (r.ruleId || '').toLowerCase().includes('critical') - ).length; - - return { - total: acc.total + run.results.length, - critical: acc.critical + critical - }; - }, { total: 0, critical: 0 }); - } catch (error) { - console.log(`Error analyzing ${tool} results: ${error.message}`); - return null; - } - }; - - // Only analyze results from tools that ran successfully - const results = { - trivy: ${{ steps.verify-sarif.outputs.has_trivy }} ? - analyzeSarif('trivy-results.sarif', 'trivy') : null, - gitleaks: ${{ steps.verify-sarif.outputs.has_gitleaks }} ? - analyzeSarif('gitleaks-report.sarif', 'gitleaks') : null - }; - - // Aggregate results - Object.entries(results).forEach(([tool, result]) => { - if (result) { - totalIssues += result.total; - criticalIssues += result.critical; - console.log(`${tool}: ${result.total} total, ${result.critical} critical issues`); - } - }); - - // Create summary - const summary = `## Security Scan Summary - - - Total Issues Found: ${totalIssues} - - Critical Issues: ${criticalIssues} - - ### Tool Breakdown - ${Object.entries(results) - .filter(([_, r]) => r) - .map(([tool, r]) => - `- ${tool}: ${r.total} total, ${r.critical} critical` - ).join('\n')} - - ### Tools Run Status - - Trivy: ${{ steps.verify-sarif.outputs.has_trivy }} - - Gitleaks: ${{ steps.check-configs.outputs.run_gitleaks }} - `; - - // Set output - core.setOutput('total_issues', totalIssues); - core.setOutput('critical_issues', criticalIssues); - - // Add job summary - await core.summary - .addRaw(summary) - .write(); - - // Fail if critical issues found - if (criticalIssues > 0) { - core.setFailed(`Found ${criticalIssues} critical security issues`); - } - } catch (error) { - core.setFailed(`Analysis failed: ${error.message}`); - } + gitleaks-license: ${{ secrets.GITLEAKS_LICENSE }} + token: ${{ secrets.GITHUB_TOKEN }} - name: Notify on Critical Issues - if: failure() + if: failure() && steps.security-scan.outputs.critical_issues != '0' uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 with: script: |- const { repo, owner } = context.repo; - const critical = core.getInput('critical_issues'); + const critical = '${{ steps.security-scan.outputs.critical_issues }}'; + const total = '${{ steps.security-scan.outputs.total_issues }}'; const body = `๐Ÿšจ Critical security issues found in GitHub Actions - ${critical} critical security issues were found during the security scan. + ${critical} critical security issues (out of ${total} total) were found during the security scan. ### Scan Results - - Trivy: ${{ steps.verify-sarif.outputs.has_trivy == 'true' && 'Completed' || 'Skipped/Failed' }} - - Gitleaks: ${{ steps.check-configs.outputs.run_gitleaks == 'true' && 'Completed' || 'Skipped' }} + - Actionlint: Completed + - Trivy: ${{ steps.security-scan.outputs.has_trivy_results == 'true' && 'Completed' || 'Skipped/Failed' }} + - Gitleaks: ${{ steps.security-scan.outputs.has_gitleaks_results == 'true' && 'Completed' || 'Skipped' }} [View detailed scan results](https://github.com/${owner}/${repo}/actions/runs/${context.runId}) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml deleted file mode 100644 index a07cc60..0000000 --- a/.github/workflows/codeql.yml +++ /dev/null @@ -1,51 +0,0 @@ ---- -# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json -name: 'CodeQL' - -on: - push: - branches: - - 'main' - pull_request: - branches: - - 'main' - schedule: - - cron: '30 1 * * 0' # Run at 1:30 AM UTC every Sunday - merge_group: - -permissions: - actions: read - contents: read - -jobs: - analyze: - name: Analyze - runs-on: ubuntu-latest - permissions: - security-events: write - - strategy: - fail-fast: false - matrix: - language: - - 'actions' - - 'javascript' - - 'python' - - steps: # Add languages used in your actions - - name: Checkout repository - uses: actions/checkout@71cf2267d89c5cb81562390fa70a37fa40b1305e # v6-beta - - - name: Initialize CodeQL - uses: github/codeql-action/init@fdbfb4d2750291e159f0156def62b853c2798ca2 # v4.31.5 - with: - languages: ${{ matrix.language }} - queries: security-and-quality - - - name: Autobuild - uses: github/codeql-action/autobuild@fdbfb4d2750291e159f0156def62b853c2798ca2 # v4.31.5 - - - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@fdbfb4d2750291e159f0156def62b853c2798ca2 # v4.31.5 - with: - category: '/language:${{matrix.language}}' diff --git a/.github/workflows/pr-lint.yml b/.github/workflows/pr-lint.yml index af70d3e..6f2455e 100644 --- a/.github/workflows/pr-lint.yml +++ b/.github/workflows/pr-lint.yml @@ -24,17 +24,9 @@ on: merge_group: env: - # Apply linter fixes configuration - APPLY_FIXES: none - APPLY_FIXES_EVENT: pull_request - APPLY_FIXES_MODE: commit - - # Disable linters that do not work or conflict + # MegaLinter configuration - these override the action's defaults DISABLE_LINTERS: REPOSITORY_DEVSKIM - - # Additional settings VALIDATE_ALL_CODEBASE: ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' }} - GITHUB_TOKEN: ${{ secrets.FIXIMUS_TOKEN || secrets.GITHUB_TOKEN }} # Report configuration REPORT_OUTPUT_FOLDER: megalinter-reports @@ -72,35 +64,13 @@ jobs: token: ${{ secrets.FIXIMUS_TOKEN || secrets.GITHUB_TOKEN }} fetch-depth: 0 - - name: MegaLinter - id: ml - uses: oxsecurity/megalinter/flavors/cupcake@62c799d895af9bcbca5eacfebca29d527f125a57 # v9.1.0 - - - name: Check MegaLinter Results - id: check-results - if: always() - shell: sh - run: | - printf '%s\n' "status=success" >> "$GITHUB_OUTPUT" - - if [ -f "${{ env.REPORT_OUTPUT_FOLDER }}/megalinter.log" ]; then - if grep -q "ERROR\|CRITICAL" "${{ env.REPORT_OUTPUT_FOLDER }}/megalinter.log"; then - echo "Linting errors found" - printf '%s\n' "status=failure" >> "$GITHUB_OUTPUT" - fi - else - echo "::warning::MegaLinter log file not found" - fi - - - name: Upload Reports - if: always() - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + - name: Run MegaLinter + id: pr-lint + uses: ./pr-lint with: - name: MegaLinter reports - path: | - megalinter-reports - mega-linter.log - retention-days: 30 + token: ${{ secrets.FIXIMUS_TOKEN || secrets.GITHUB_TOKEN }} + username: fiximus + email: github-bot@ivuorinen.net - name: Upload SARIF Report if: always() && hashFiles('megalinter-reports/sarif/*.sarif') @@ -109,74 +79,12 @@ jobs: sarif_file: megalinter-reports/sarif category: megalinter - - name: Prepare Git for Fixes - if: steps.ml.outputs.has_updated_sources == 1 - shell: sh - run: | - sudo chown -Rc $(id -u) .git/ - git config --global user.name "fiximus" - git config --global user.email "github-bot@ivuorinen.net" - - - name: Create Pull Request - if: | - steps.ml.outputs.has_updated_sources == 1 && - (env.APPLY_FIXES_EVENT == 'all' || env.APPLY_FIXES_EVENT == github.event_name) && - env.APPLY_FIXES_MODE == 'pull_request' && - (github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository) && - !contains(github.event.head_commit.message, 'skip fix') - uses: peter-evans/create-pull-request@84ae59a2cdc2258d6fa0732dd66352dddae2a412 # v7.0.9 - id: cpr - with: - token: ${{ secrets.FIXIMUS_TOKEN || secrets.GITHUB_TOKEN }} - commit-message: '[MegaLinter] Apply linters automatic fixes' - title: '[MegaLinter] Apply linters automatic fixes' - labels: bot - branch: megalinter/fixes-${{ github.ref_name }} - branch-suffix: timestamp - delete-branch: true - body: | - ## MegaLinter Fixes - - MegaLinter has identified and fixed code style issues. - - ### ๐Ÿ” Changes Made - - Automated code style fixes - - Formatting improvements - - Lint error corrections - - ### ๐Ÿ“ Notes - - Please review the changes carefully - - Run tests before merging - - Verify formatting matches project standards - - > Generated automatically by MegaLinter - - - name: Commit Fixes - if: | - steps.ml.outputs.has_updated_sources == 1 && - (env.APPLY_FIXES_EVENT == 'all' || env.APPLY_FIXES_EVENT == github.event_name) && - env.APPLY_FIXES_MODE == 'commit' && - github.ref != 'refs/heads/main' && - (github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository) && - !contains(github.event.head_commit.message, 'skip fix') - uses: stefanzweifel/git-auto-commit-action@28e16e81777b558cc906c8750092100bbb34c5e3 # v7.0.0 - with: - token: ${{ secrets.FIXIMUS_TOKEN || secrets.GITHUB_TOKEN }} - branch: ${{ github.event.pull_request.head.ref || github.head_ref || github.ref }} - commit_message: | - style: apply MegaLinter fixes - - [skip ci] - commit_user_name: fiximus - commit_user_email: github-bot@ivuorinen.net - push_options: --force - - - name: Create Status Check + - name: Check Results if: always() uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 with: script: | - const status = '${{ steps.check-results.outputs.status }}'; + const status = '${{ steps.pr-lint.outputs.validation_status }}'; const conclusion = status === 'success' ? 'success' : 'failure'; const summary = `## MegaLinter Results diff --git a/.github/workflows/security-suite.yml b/.github/workflows/security-suite.yml index 1be501c..4bacd4b 100644 --- a/.github/workflows/security-suite.yml +++ b/.github/workflows/security-suite.yml @@ -53,7 +53,7 @@ jobs: # Record the base commit for diffing without checking it out # Keep PR head checked out so scanners analyze the new changes BASE_REF="refs/remotes/origin-base/${{ github.event.pull_request.base.ref }}" - echo "BASE_REF=${BASE_REF}" >> $GITHUB_ENV + echo "BASE_REF=${BASE_REF}" >> "$GITHUB_ENV" echo "Base ref: ${BASE_REF}" git log -1 --oneline "${BASE_REF}" diff --git a/.github/workflows/test-actions.yml b/.github/workflows/test-actions.yml index b6dfc55..051d1f6 100644 --- a/.github/workflows/test-actions.yml +++ b/.github/workflows/test-actions.yml @@ -125,10 +125,10 @@ jobs: shell: sh run: | if [ -d "_tests/reports/integration" ] && [ -n "$(find _tests/reports/integration -type f 2>/dev/null)" ]; then - printf '%s\n' "reports-found=true" >> $GITHUB_OUTPUT + printf '%s\n' "reports-found=true" >> "$GITHUB_OUTPUT" echo "Integration test reports found" else - printf '%s\n' "reports-found=false" >> $GITHUB_OUTPUT + printf '%s\n' "reports-found=false" >> "$GITHUB_OUTPUT" echo "No integration test reports found" fi diff --git a/.serena/memories/repository_overview.md b/.serena/memories/repository_overview.md index 26a61c9..d4e564a 100644 --- a/.serena/memories/repository_overview.md +++ b/.serena/memories/repository_overview.md @@ -5,13 +5,14 @@ - **Path**: /Users/ivuorinen/Code/ivuorinen/actions - **Branch**: main - **External Usage**: `ivuorinen/actions/@main` -- **Total Actions**: 43 self-contained actions +- **Total Actions**: 44 self-contained actions +- **Dogfooding**: Workflows use local actions (pr-lint, codeql-analysis, security-scan) ## Structure ```text / -โ”œโ”€โ”€ / # 43 self-contained actions +โ”œโ”€โ”€ / # 44 self-contained actions โ”‚ โ”œโ”€โ”€ action.yml # Action definition โ”‚ โ”œโ”€โ”€ README.md # Auto-generated โ”‚ โ””โ”€โ”€ CustomValidator.py # Optional validator @@ -25,12 +26,14 @@ โ””โ”€โ”€ Makefile # Build automation ``` -## Action Categories (43 total) +## Action Categories (44 total) **Setup (7)**: node-setup, set-git-config, php-version-detect, python-version-detect, python-version-detect-v2, go-version-detect, dotnet-version-detect **Linting (13)**: ansible-lint-fix, biome-check/fix, csharp-lint-check, eslint-check/fix, go-lint, pr-lint, pre-commit, prettier-check/fix, python-lint-fix, terraform-lint-fix +**Security (1)**: security-scan (actionlint, Gitleaks, Trivy scanning) + **Build (3)**: csharp-build, go-build, docker-build **Publishing (5)**: npm-publish, docker-publish, docker-publish-gh, docker-publish-hub, csharp-publish @@ -85,3 +88,28 @@ make test # All tests (pytest + ShellSpec) - โœ… Convention-based validation - โœ… Test generation system - โœ… Full backward compatibility + +## Dogfooding Strategy + +The repository actively dogfoods its own actions in workflows: + +**Fully Dogfooded Workflows**: + +- **pr-lint.yml**: Uses `./pr-lint` (was 204 lines, now 112 lines - 45% reduction) +- **action-security.yml**: Uses `./security-scan` (was 264 lines, now 82 lines - 69% reduction) +- **codeql-new.yml**: Uses `./codeql-analysis` +- **sync-labels.yml**: Uses `./sync-labels` +- **version-maintenance.yml**: Uses `./action-versioning` + +**Intentionally External**: + +- **build-testing-image.yml**: Uses docker/\* actions directly (needs metadata extraction) +- Core GitHub actions (checkout, upload-artifact, setup-\*) kept for standardization + +**Benefits**: + +- Early detection of action issues +- Real-world testing of actions +- Reduced workflow duplication +- Improved maintainability +- Better documentation through usage examples diff --git a/README.md b/README.md index 1172ab2..b0b2198 100644 --- a/README.md +++ b/README.md @@ -22,9 +22,9 @@ Each action is fully self-contained and can be used independently in any GitHub ## ๐Ÿ“š Action Catalog -This repository contains **25 reusable GitHub Actions** for CI/CD automation. +This repository contains **26 reusable GitHub Actions** for CI/CD automation. -### Quick Reference (25 Actions) +### Quick Reference (26 Actions) | Icon | Action | Category | Description | Key Features | |:----:|:-----------------------------------------------------|:-----------|:----------------------------------------------------------------|:---------------------------------------------| @@ -34,7 +34,7 @@ This repository contains **25 reusable GitHub Actions** for CI/CD automation. | ๐Ÿ›ก๏ธ | [`codeql-analysis`][codeql-analysis] | Repository | Run CodeQL security analysis for a single language with conf... | Auto-detection, Token auth, Outputs | | ๐Ÿ–ผ๏ธ | [`compress-images`][compress-images] | Repository | Compress images on demand (workflow_dispatch), and at 11pm e... | Token auth, Outputs | | ๐Ÿ“ | [`csharp-build`][csharp-build] | Build | Builds and tests C# projects. | Caching, Auto-detection, Token auth, Outputs | -| ๐Ÿ“ | [`csharp-lint-check`][csharp-lint-check] | Linting | Runs linters like StyleCop or dotnet-format for C# code styl... | Auto-detection, Token auth, Outputs | +| ๐Ÿ“ | [`csharp-lint-check`][csharp-lint-check] | Linting | Runs linters like StyleCop or dotnet-format for C# code styl... | Caching, Auto-detection, Token auth, Outputs | | ๐Ÿ“ฆ | [`csharp-publish`][csharp-publish] | Publishing | Publishes a C# project to GitHub Packages. | Caching, Auto-detection, Token auth, Outputs | | ๐Ÿ“ฆ | [`docker-build`][docker-build] | Build | Builds a Docker image for multiple architectures with enhanc... | Caching, Auto-detection, Token auth, Outputs | | โ˜๏ธ | [`docker-publish`][docker-publish] | Publishing | Simple wrapper to publish Docker images to GitHub Packages a... | Token auth, Outputs | @@ -49,6 +49,7 @@ This repository contains **25 reusable GitHub Actions** for CI/CD automation. | โœ… | [`prettier-lint`][prettier-lint] | Linting | Run Prettier in check or fix mode with advanced configuratio... | Caching, Auto-detection, Token auth, Outputs | | ๐Ÿ“ | [`python-lint-fix`][python-lint-fix] | Linting | Lints and fixes Python files, commits changes, and uploads S... | Caching, Auto-detection, Token auth, Outputs | | ๐Ÿ“ฆ | [`release-monthly`][release-monthly] | Repository | Creates a release for the current month, incrementing patch ... | Token auth, Outputs | +| ๐Ÿ›ก๏ธ | [`security-scan`][security-scan] | Security | Comprehensive security scanning for GitHub Actions including... | Caching, Token auth, Outputs | | ๐Ÿ“ฆ | [`stale`][stale] | Repository | A GitHub Action to close stale issues and pull requests. | Token auth, Outputs | | ๐Ÿท๏ธ | [`sync-labels`][sync-labels] | Repository | Sync labels from a YAML file to a GitHub repository | Token auth, Outputs | | ๐Ÿ–ฅ๏ธ | [`terraform-lint-fix`][terraform-lint-fix] | Linting | Lints and fixes Terraform files with advanced validation and... | Token auth, Outputs | @@ -74,7 +75,7 @@ This repository contains **25 reusable GitHub Actions** for CI/CD automation. |:-----------------------------------------------|:------------------------------------------------------|:---------------------------------------------|:---------------------------------------------| | ๐Ÿ“ฆ [`ansible-lint-fix`][ansible-lint-fix] | Lints and fixes Ansible playbooks, commits changes... | Ansible, YAML | Caching, Token auth, Outputs | | โœ… [`biome-lint`][biome-lint] | Run Biome linter in check or fix mode | JavaScript, TypeScript, JSON | Caching, Auto-detection, Token auth, Outputs | -| ๐Ÿ“ [`csharp-lint-check`][csharp-lint-check] | Runs linters like StyleCop or dotnet-format for C#... | C#, .NET | Auto-detection, Token auth, Outputs | +| ๐Ÿ“ [`csharp-lint-check`][csharp-lint-check] | Runs linters like StyleCop or dotnet-format for C#... | C#, .NET | Caching, Auto-detection, Token auth, Outputs | | โœ… [`eslint-lint`][eslint-lint] | Run ESLint in check or fix mode with advanced conf... | JavaScript, TypeScript | Caching, Auto-detection, Token auth, Outputs | | ๐Ÿ“ [`go-lint`][go-lint] | Run golangci-lint with advanced configuration, cac... | Go | Caching, Token auth, Outputs | | โœ… [`pr-lint`][pr-lint] | Runs MegaLinter against pull requests | Conventional Commits | Caching, Auto-detection, Token auth, Outputs | @@ -115,6 +116,12 @@ This repository contains **25 reusable GitHub Actions** for CI/CD automation. | ๐Ÿ“ฆ [`stale`][stale] | A GitHub Action to close stale issues and pull req... | GitHub Actions | Token auth, Outputs | | ๐Ÿท๏ธ [`sync-labels`][sync-labels] | Sync labels from a YAML file to a GitHub repositor... | YAML, GitHub | Token auth, Outputs | +#### ๐Ÿ›ก๏ธ Security (1 action) + +| Action | Description | Languages | Features | +|:-------------------------------------|:------------------------------------------------------|:----------|:-----------------------------| +| ๐Ÿ›ก๏ธ [`security-scan`][security-scan] | Comprehensive security scanning for GitHub Actions... | - | Caching, Token auth, Outputs | + #### โœ… Validation (1 action) | Action | Description | Languages | Features | @@ -131,7 +138,7 @@ This repository contains **25 reusable GitHub Actions** for CI/CD automation. | [`codeql-analysis`][codeql-analysis] | - | โœ… | โœ… | โœ… | | [`compress-images`][compress-images] | - | - | โœ… | โœ… | | [`csharp-build`][csharp-build] | โœ… | โœ… | โœ… | โœ… | -| [`csharp-lint-check`][csharp-lint-check] | - | โœ… | โœ… | โœ… | +| [`csharp-lint-check`][csharp-lint-check] | โœ… | โœ… | โœ… | โœ… | | [`csharp-publish`][csharp-publish] | โœ… | โœ… | โœ… | โœ… | | [`docker-build`][docker-build] | โœ… | โœ… | โœ… | โœ… | | [`docker-publish`][docker-publish] | - | - | โœ… | โœ… | @@ -146,6 +153,7 @@ This repository contains **25 reusable GitHub Actions** for CI/CD automation. | [`prettier-lint`][prettier-lint] | โœ… | โœ… | โœ… | โœ… | | [`python-lint-fix`][python-lint-fix] | โœ… | โœ… | โœ… | โœ… | | [`release-monthly`][release-monthly] | - | - | โœ… | โœ… | +| [`security-scan`][security-scan] | โœ… | - | โœ… | โœ… | | [`stale`][stale] | - | - | โœ… | โœ… | | [`sync-labels`][sync-labels] | - | - | โœ… | โœ… | | [`terraform-lint-fix`][terraform-lint-fix] | - | - | โœ… | โœ… | @@ -224,6 +232,7 @@ All actions can be used independently in your workflows: [prettier-lint]: prettier-lint/README.md [python-lint-fix]: python-lint-fix/README.md [release-monthly]: release-monthly/README.md +[security-scan]: security-scan/README.md [stale]: stale/README.md [sync-labels]: sync-labels/README.md [terraform-lint-fix]: terraform-lint-fix/README.md diff --git a/_tests/framework/utils.sh b/_tests/framework/utils.sh index 842076c..9d6e819 100755 --- a/_tests/framework/utils.sh +++ b/_tests/framework/utils.sh @@ -6,8 +6,8 @@ set -euo pipefail # Source setup utilities SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" # shellcheck source=_tests/framework/setup.sh +# shellcheck disable=SC1091 source "${SCRIPT_DIR}/setup.sh" # Action testing utilities @@ -57,6 +57,13 @@ get_action_name() { uv run "$script_dir/../shared/validation_core.py" --name "$action_file" } +get_action_runs_using() { + local action_file="$1" + local script_dir + script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + uv run "$script_dir/../shared/validation_core.py" --runs-using "$action_file" +} + # Check if an input is required in an action.yml file is_input_required() { local action_file="$1" @@ -69,7 +76,7 @@ is_input_required() { required_status=$(uv run "$script_dir/../shared/validation_core.py" --property "$action_file" "$input_name" "required") # Return 0 (success) if input is required, 1 (failure) if optional - [[ $required_status == "required" ]] + [[ "$required_status" == "required" ]] } # Test input validation using Python validation module @@ -363,5 +370,5 @@ run_action_tests() { } # Export all functions -export -f validate_action_yml get_action_inputs get_action_outputs get_action_name is_input_required +export -f validate_action_yml get_action_inputs get_action_outputs get_action_name get_action_runs_using is_input_required export -f test_input_validation test_action_outputs test_external_usage measure_action_time run_action_tests diff --git a/_tests/shared/validation_core.py b/_tests/shared/validation_core.py index 4e974fe..84228a4 100755 --- a/_tests/shared/validation_core.py +++ b/_tests/shared/validation_core.py @@ -521,6 +521,16 @@ class ActionFileParser: except (OSError, ValueError, yaml.YAMLError, AttributeError): return [] + @staticmethod + def get_action_runs_using(action_file: str) -> str: + """Get the runs.using value from an action.yml file.""" + try: + data = ActionFileParser.load_action_file(action_file) + runs = data.get("runs", {}) + return runs.get("using", "unknown") + except (OSError, ValueError, yaml.YAMLError, AttributeError): + return "unknown" + @staticmethod def _get_required_property(input_data: dict, property_name: str) -> str: """Get the required/optional property.""" @@ -787,6 +797,11 @@ Examples: mode_group.add_argument("--inputs", metavar="ACTION_FILE", help="List action inputs") mode_group.add_argument("--outputs", metavar="ACTION_FILE", help="List action outputs") mode_group.add_argument("--name", metavar="ACTION_FILE", help="Get action name") + mode_group.add_argument( + "--runs-using", + metavar="ACTION_FILE", + help="Get action runs.using value", + ) mode_group.add_argument( "--validate-yaml", metavar="YAML_FILE", @@ -834,6 +849,12 @@ def _handle_name_command(args): print(name) +def _handle_runs_using_command(args): + """Handle the runs-using command.""" + runs_using = ActionFileParser.get_action_runs_using(args.runs_using) + print(runs_using) + + def _handle_validate_yaml_command(args): """Handle the validate-yaml command.""" try: @@ -853,6 +874,7 @@ def _execute_command(args): "inputs": _handle_inputs_command, "outputs": _handle_outputs_command, "name": _handle_name_command, + "runs_using": _handle_runs_using_command, "validate_yaml": _handle_validate_yaml_command, } diff --git a/_tests/unit/security-scan/validation.spec.sh b/_tests/unit/security-scan/validation.spec.sh new file mode 100755 index 0000000..7dae3b1 --- /dev/null +++ b/_tests/unit/security-scan/validation.spec.sh @@ -0,0 +1,116 @@ +#!/usr/bin/env shellspec +# Unit tests for security-scan action validation and logic +# Framework is automatically loaded via spec_helper.sh + +Describe "security-scan action" +ACTION_DIR="security-scan" +ACTION_FILE="$ACTION_DIR/action.yml" + +Context "when validating token input" + It "accepts valid GitHub token" + When call validate_input_python "security-scan" "token" "ghp_123456789012345678901234567890123456" + The status should be success + End + + It "rejects injection in token" + When call validate_input_python "security-scan" "token" "token; rm -rf /" + The status should be failure + End + + It "accepts empty token (optional)" + When call validate_input_python "security-scan" "token" "" + The status should be success + End +End + +Context "when validating actionlint-enabled input" + It "accepts true value" + When call validate_input_python "security-scan" "actionlint-enabled" "true" + The status should be success + End + + It "accepts false value" + When call validate_input_python "security-scan" "actionlint-enabled" "false" + The status should be success + End + + It "rejects non-boolean value" + When call validate_input_python "security-scan" "actionlint-enabled" "maybe" + The status should be failure + End +End + +Context "when checking action.yml structure" + It "has valid YAML syntax" + When call validate_action_yml_quiet "$ACTION_FILE" + The status should be success + End + + It "has correct action name" + name=$(get_action_name "$ACTION_FILE") + When call echo "$name" + The output should equal "Security Scan" + End + + It "defines all expected inputs" + inputs=$(get_action_inputs "$ACTION_FILE") + When call echo "$inputs" + The output should include "gitleaks-license" + The output should include "gitleaks-config" + The output should include "trivy-severity" + The output should include "trivy-scanners" + The output should include "trivy-timeout" + The output should include "actionlint-enabled" + The output should include "token" + End + + It "defines all expected outputs" + outputs=$(get_action_outputs "$ACTION_FILE") + When call echo "$outputs" + The output should include "has_trivy_results" + The output should include "has_gitleaks_results" + The output should include "total_issues" + The output should include "critical_issues" + End + + It "uses composite run type" + run_type=$(get_action_runs_using "$ACTION_FILE") + When call echo "$run_type" + The output should equal "composite" + End +End + +Context "when validating inputs per conventions" + It "validates token against github_token convention" + When call validate_input_python "security-scan" "token" "ghp_123456789012345678901234567890123456" + The status should be success + End + + It "validates actionlint-enabled as boolean" + When call validate_input_python "security-scan" "actionlint-enabled" "true" + The status should be success + End + + It "rejects invalid boolean for actionlint-enabled" + When call validate_input_python "security-scan" "actionlint-enabled" "1" + The status should be failure + End +End + +Context "when testing optional inputs" + It "accepts empty gitleaks-license" + When call validate_input_python "security-scan" "gitleaks-license" "" + The status should be success + End + + It "accepts empty token" + When call validate_input_python "security-scan" "token" "" + The status should be success + End + + It "accepts valid gitleaks-license value" + When call validate_input_python "security-scan" "gitleaks-license" "license-key-123" + The status should be success + End +End +End diff --git a/biome-lint/rules.yml b/biome-lint/rules.yml index c577002..8e3dbd3 100644 --- a/biome-lint/rules.yml +++ b/biome-lint/rules.yml @@ -28,7 +28,8 @@ conventions: mode: mode_enum token: github_token username: username -overrides: {} +overrides: + mode: mode_enum statistics: total_inputs: 6 validated_inputs: 6 diff --git a/codeql-analysis/rules.yml b/codeql-analysis/rules.yml index 86c3490..0e8727b 100644 --- a/codeql-analysis/rules.yml +++ b/codeql-analysis/rules.yml @@ -42,7 +42,7 @@ conventions: packs: codeql_packs queries: codeql_queries ram: numeric_range_256_32768 - skip-queries: codeql_queries + skip-queries: boolean source-root: file_path threads: numeric_range_1_128 token: github_token @@ -51,6 +51,7 @@ overrides: build-mode: codeql_build_mode category: category_format config: codeql_config + language: codeql_language output: file_path packs: codeql_packs queries: codeql_queries diff --git a/compress-images/rules.yml b/compress-images/rules.yml index 2301f29..e259b1a 100644 --- a/compress-images/rules.yml +++ b/compress-images/rules.yml @@ -2,7 +2,7 @@ # Validation rules for compress-images action # Generated by update-validators.py v1.0.0 - DO NOT EDIT MANUALLY # Schema version: 1.0 -# Coverage: 86% (6/7 inputs) +# Coverage: 100% (7/7 inputs) # # This file defines validation rules for the compress-images GitHub Action. # Rules are automatically applied by validate-inputs action when this @@ -24,6 +24,7 @@ optional_inputs: - working-directory conventions: email: email + ignore-paths: path_list image-quality: numeric_range_0_100 png-quality: numeric_range_0_100 token: github_token @@ -32,10 +33,10 @@ conventions: overrides: {} statistics: total_inputs: 7 - validated_inputs: 6 + validated_inputs: 7 skipped_inputs: 0 - coverage_percentage: 86 -validation_coverage: 86 + coverage_percentage: 100 +validation_coverage: 100 auto_detected: true manual_review_required: false quality_indicators: diff --git a/docker-build/rules.yml b/docker-build/rules.yml index 708e088..0cbc5dd 100644 --- a/docker-build/rules.yml +++ b/docker-build/rules.yml @@ -2,7 +2,7 @@ # Validation rules for docker-build action # Generated by update-validators.py v1.0.0 - DO NOT EDIT MANUALLY # Schema version: 1.0 -# Coverage: 63% (17/27 inputs) +# Coverage: 100% (27/27 inputs) # # This file defines validation rules for the docker-build GitHub Action. # Rules are automatically applied by validate-inputs action when this @@ -45,17 +45,27 @@ optional_inputs: conventions: architectures: docker_architectures auto-detect-platforms: docker_architectures + build-args: key_value_list + build-contexts: key_value_list buildkit-version: semantic_version buildx-version: semantic_version - cache-mode: boolean + cache-export: cache_config + cache-from: cache_config + cache-import: cache_config + cache-mode: cache_mode + context: file_path dockerfile: file_path dry-run: boolean image-name: docker_image_name max-retries: numeric_range_1_10 + network: network_mode parallel-builds: numeric_range_0_16 + platform-build-args: json_format platform-fallback: docker_architectures - sbom-format: report_format + push: boolean + sbom-format: sbom_format scan-image: boolean + secrets: key_value_list sign-image: boolean tag: docker_tag token: github_token @@ -65,12 +75,12 @@ overrides: sbom-format: sbom_format statistics: total_inputs: 27 - validated_inputs: 17 + validated_inputs: 27 skipped_inputs: 0 - coverage_percentage: 63 -validation_coverage: 63 + coverage_percentage: 100 +validation_coverage: 100 auto_detected: true -manual_review_required: true +manual_review_required: false quality_indicators: has_required_inputs: true has_token_validation: true diff --git a/docker-publish/rules.yml b/docker-publish/rules.yml index 70aa1bc..adcadca 100644 --- a/docker-publish/rules.yml +++ b/docker-publish/rules.yml @@ -2,7 +2,7 @@ # Validation rules for docker-publish action # Generated by update-validators.py v1.0.0 - DO NOT EDIT MANUALLY # Schema version: 1.0 -# Coverage: 73% (8/11 inputs) +# Coverage: 100% (11/11 inputs) # # This file defines validation rules for the docker-publish GitHub Action. # Rules are automatically applied by validate-inputs action when this @@ -27,25 +27,27 @@ optional_inputs: - tags - token conventions: + build-args: key_value_list + context: file_path dockerfile: file_path dockerhub-token: github_token dockerhub-username: username image-name: docker_image_name platforms: docker_architectures - registry: registry + push: boolean + registry: registry_enum tags: docker_tag token: github_token overrides: - platforms: null registry: registry_enum statistics: total_inputs: 11 - validated_inputs: 8 - skipped_inputs: 1 - coverage_percentage: 73 -validation_coverage: 73 + validated_inputs: 11 + skipped_inputs: 0 + coverage_percentage: 100 +validation_coverage: 100 auto_detected: true -manual_review_required: true +manual_review_required: false quality_indicators: has_required_inputs: false has_token_validation: true diff --git a/eslint-lint/rules.yml b/eslint-lint/rules.yml index ce75119..42786e7 100644 --- a/eslint-lint/rules.yml +++ b/eslint-lint/rules.yml @@ -44,7 +44,8 @@ conventions: token: github_token username: username working-directory: file_path -overrides: {} +overrides: + mode: mode_enum statistics: total_inputs: 14 validated_inputs: 14 diff --git a/generate_listing.cjs b/generate_listing.cjs index e3f1157..cd4b0b6 100755 --- a/generate_listing.cjs +++ b/generate_listing.cjs @@ -46,6 +46,9 @@ const CATEGORIES = { 'compress-images': 'Repository', 'codeql-analysis': 'Repository', + // Security + 'security-scan': 'Security', + // Validation 'validate-inputs': 'Validation', }; @@ -120,6 +123,7 @@ const CATEGORY_ICONS = { Build: '๐Ÿ—๏ธ', Publishing: '๐Ÿš€', Repository: '๐Ÿ“ฆ', + Security: '๐Ÿ›ก๏ธ', Validation: 'โœ…', }; @@ -232,7 +236,7 @@ function generateCategoryTables(actions) { let output = ''; // Sort categories by priority - const categoryOrder = ['Setup', 'Utilities', 'Linting', 'Testing', 'Build', 'Publishing', 'Repository', 'Validation']; + const categoryOrder = ['Setup', 'Utilities', 'Linting', 'Testing', 'Build', 'Publishing', 'Repository', 'Security', 'Validation']; for (const category of categoryOrder) { if (!categories[category]) continue; diff --git a/go-lint/rules.yml b/go-lint/rules.yml index bfcd950..483f561 100644 --- a/go-lint/rules.yml +++ b/go-lint/rules.yml @@ -36,15 +36,17 @@ conventions: disable-linters: linter_list enable-linters: linter_list fail-on-error: boolean - go-version: semantic_version + go-version: go_version golangci-lint-version: semantic_version max-retries: numeric_range_1_10 - only-new-issues: branch_name + only-new-issues: boolean report-format: report_format - timeout: numeric_range_1_3600 + timeout: timeout_with_unit token: github_token working-directory: file_path overrides: + disable-linters: linter_list + enable-linters: linter_list go-version: go_version only-new-issues: boolean timeout: timeout_with_unit diff --git a/language-version-detect/rules.yml b/language-version-detect/rules.yml index dc600aa..c698526 100644 --- a/language-version-detect/rules.yml +++ b/language-version-detect/rules.yml @@ -2,7 +2,7 @@ # Validation rules for language-version-detect action # Generated by update-validators.py v1.0.0 - DO NOT EDIT MANUALLY # Schema version: 1.0 -# Coverage: 67% (2/3 inputs) +# Coverage: 100% (3/3 inputs) # # This file defines validation rules for the language-version-detect GitHub Action. # Rules are automatically applied by validate-inputs action when this @@ -21,16 +21,17 @@ optional_inputs: - token conventions: default-version: semantic_version + language: language_enum token: github_token overrides: {} statistics: total_inputs: 3 - validated_inputs: 2 + validated_inputs: 3 skipped_inputs: 0 - coverage_percentage: 67 -validation_coverage: 67 + coverage_percentage: 100 +validation_coverage: 100 auto_detected: true -manual_review_required: true +manual_review_required: false quality_indicators: has_required_inputs: true has_token_validation: true diff --git a/npm-publish/rules.yml b/npm-publish/rules.yml index 678f1bf..c76c076 100644 --- a/npm-publish/rules.yml +++ b/npm-publish/rules.yml @@ -22,7 +22,7 @@ optional_inputs: - token conventions: npm_token: github_token - package-version: semantic_version + package-version: strict_semantic_version registry-url: url scope: scope token: github_token diff --git a/php-tests/rules.yml b/php-tests/rules.yml index e5b66ec..ef0e134 100644 --- a/php-tests/rules.yml +++ b/php-tests/rules.yml @@ -2,7 +2,7 @@ # Validation rules for php-tests action # Generated by update-validators.py v1.0.0 - DO NOT EDIT MANUALLY # Schema version: 1.0 -# Coverage: 78% (7/9 inputs) +# Coverage: 89% (8/9 inputs) # # This file defines validation rules for the php-tests GitHub Action. # Rules are automatically applied by validate-inputs action when this @@ -27,7 +27,8 @@ optional_inputs: conventions: coverage: coverage_driver email: email - framework: boolean + extensions: php_extensions + framework: framework_mode max-retries: numeric_range_1_10 php-version: semantic_version token: github_token @@ -35,12 +36,12 @@ conventions: overrides: {} statistics: total_inputs: 9 - validated_inputs: 7 + validated_inputs: 8 skipped_inputs: 0 - coverage_percentage: 78 -validation_coverage: 78 + coverage_percentage: 89 +validation_coverage: 89 auto_detected: true -manual_review_required: true +manual_review_required: false quality_indicators: has_required_inputs: false has_token_validation: true diff --git a/prettier-lint/rules.yml b/prettier-lint/rules.yml index f0b820e..25902ac 100644 --- a/prettier-lint/rules.yml +++ b/prettier-lint/rules.yml @@ -2,7 +2,7 @@ # Validation rules for prettier-lint action # Generated by update-validators.py v1.0.0 - DO NOT EDIT MANUALLY # Schema version: 1.0 -# Coverage: 86% (12/14 inputs) +# Coverage: 100% (14/14 inputs) # # This file defines validation rules for the prettier-lint GitHub Action. # Rules are automatically applied by validate-inputs action when this @@ -34,21 +34,24 @@ conventions: config-file: file_path email: email fail-on-error: boolean + file-pattern: path_list ignore-file: file_path max-retries: numeric_range_1_10 mode: mode_enum + plugins: linter_list prettier-version: semantic_version report-format: report_format token: github_token username: username working-directory: file_path -overrides: {} +overrides: + mode: mode_enum statistics: total_inputs: 14 - validated_inputs: 12 + validated_inputs: 14 skipped_inputs: 0 - coverage_percentage: 86 -validation_coverage: 86 + coverage_percentage: 100 +validation_coverage: 100 auto_detected: true manual_review_required: false quality_indicators: diff --git a/security-scan/README.md b/security-scan/README.md new file mode 100644 index 0000000..49d2b4e --- /dev/null +++ b/security-scan/README.md @@ -0,0 +1,82 @@ +# ivuorinen/actions/security-scan + +## Security Scan + +### Description + +Comprehensive security scanning for GitHub Actions including actionlint, +Gitleaks (optional), and Trivy vulnerability scanning. Requires +'security-events: write' and 'contents: read' permissions in the workflow. + +### Inputs + +| name | description | required | default | +|----------------------|--------------------------------------------------------------|----------|----------------------| +| `gitleaks-license` |

Gitleaks license key (required for Gitleaks scanning)

| `false` | `""` | +| `gitleaks-config` |

Path to Gitleaks config file

| `false` | `.gitleaks.toml` | +| `trivy-severity` |

Severity levels to scan for (comma-separated)

| `false` | `CRITICAL,HIGH` | +| `trivy-scanners` |

Types of scanners to run (comma-separated)

| `false` | `vuln,config,secret` | +| `trivy-timeout` |

Timeout for Trivy scan

| `false` | `10m` | +| `actionlint-enabled` |

Enable actionlint scanning

| `false` | `true` | +| `token` |

GitHub token for authentication

| `false` | `""` | + +### Outputs + +| name | description | +|------------------------|-----------------------------------------------------| +| `has_trivy_results` |

Whether Trivy scan produced valid results

| +| `has_gitleaks_results` |

Whether Gitleaks scan produced valid results

| +| `total_issues` |

Total number of security issues found

| +| `critical_issues` |

Number of critical security issues found

| + +### Runs + +This action is a `composite` action. + +### Usage + +```yaml +- uses: ivuorinen/actions/security-scan@main + with: + gitleaks-license: + # Gitleaks license key (required for Gitleaks scanning) + # + # Required: false + # Default: "" + + gitleaks-config: + # Path to Gitleaks config file + # + # Required: false + # Default: .gitleaks.toml + + trivy-severity: + # Severity levels to scan for (comma-separated) + # + # Required: false + # Default: CRITICAL,HIGH + + trivy-scanners: + # Types of scanners to run (comma-separated) + # + # Required: false + # Default: vuln,config,secret + + trivy-timeout: + # Timeout for Trivy scan + # + # Required: false + # Default: 10m + + actionlint-enabled: + # Enable actionlint scanning + # + # Required: false + # Default: true + + token: + # GitHub token for authentication + # + # Required: false + # Default: "" +``` diff --git a/security-scan/action.yml b/security-scan/action.yml new file mode 100644 index 0000000..1ab5100 --- /dev/null +++ b/security-scan/action.yml @@ -0,0 +1,282 @@ +# yaml-language-server: $schema=https://json.schemastore.org/github-action.json +# +# REQUIRED PERMISSIONS (set these in your workflow file): +# permissions: +# security-events: write # Required for SARIF uploads +# contents: read # Required for repository access +# +--- +name: Security Scan +description: | + Comprehensive security scanning for GitHub Actions including actionlint, + Gitleaks (optional), and Trivy vulnerability scanning. Requires + 'security-events: write' and 'contents: read' permissions in the workflow. +author: Ismo Vuorinen +branding: + icon: shield + color: red + +inputs: + gitleaks-license: + description: 'Gitleaks license key (required for Gitleaks scanning)' + required: false + default: '' + gitleaks-config: + description: 'Path to Gitleaks config file' + required: false + default: '.gitleaks.toml' + trivy-severity: + description: 'Severity levels to scan for (comma-separated)' + required: false + default: 'CRITICAL,HIGH' + trivy-scanners: + description: 'Types of scanners to run (comma-separated)' + required: false + default: 'vuln,config,secret' + trivy-timeout: + description: 'Timeout for Trivy scan' + required: false + default: '10m' + actionlint-enabled: + description: 'Enable actionlint scanning' + required: false + default: 'true' + token: + description: 'GitHub token for authentication' + required: false + default: '' + +outputs: + has_trivy_results: + description: 'Whether Trivy scan produced valid results' + value: ${{ steps.verify-sarif.outputs.has_trivy }} + has_gitleaks_results: + description: 'Whether Gitleaks scan produced valid results' + value: ${{ steps.verify-sarif.outputs.has_gitleaks }} + total_issues: + description: 'Total number of security issues found' + value: ${{ steps.analyze.outputs.total_issues }} + critical_issues: + description: 'Number of critical security issues found' + value: ${{ steps.analyze.outputs.critical_issues }} + +runs: + using: composite + steps: + - name: Validate Inputs + id: validate + uses: ivuorinen/actions/validate-inputs@5cc7373a22402ee8985376bc713f00e09b5b2edb + with: + action-type: security-scan + gitleaks-license: ${{ inputs.gitleaks-license }} + gitleaks-config: ${{ inputs.gitleaks-config }} + trivy-severity: ${{ inputs.trivy-severity }} + trivy-scanners: ${{ inputs.trivy-scanners }} + trivy-timeout: ${{ inputs.trivy-timeout }} + actionlint-enabled: ${{ inputs.actionlint-enabled }} + token: ${{ inputs.token }} + + - name: Check Required Configurations + id: check-configs + shell: sh + run: | + set -eu + + # Initialize all flags as false + { + printf '%s\n' "run_gitleaks=false" + printf '%s\n' "run_trivy=true" + printf '%s\n' "run_actionlint=${{ inputs.actionlint-enabled }}" + } >> "$GITHUB_OUTPUT" + + # Check Gitleaks configuration and license + if [ -f "${{ inputs.gitleaks-config }}" ] && [ -n "${{ inputs.gitleaks-license }}" ]; then + printf 'Gitleaks config and license found\n' + printf '%s\n' "run_gitleaks=true" >> "$GITHUB_OUTPUT" + else + printf '::warning::Gitleaks config or license missing - skipping Gitleaks scan\n' + fi + + - name: Run actionlint + if: steps.check-configs.outputs.run_actionlint == 'true' + uses: raven-actions/actionlint@3a24062651993d40fed1019b58ac6fbdfbf276cc # v2.0.1 + with: + cache: true + fail-on-error: true + shellcheck: false + + - name: Run Gitleaks + if: steps.check-configs.outputs.run_gitleaks == 'true' + uses: gitleaks/gitleaks-action@ff98106e4c7b2bc287b24eaf42907196329070c7 # v2.3.9 + env: + GITHUB_TOKEN: ${{ inputs.token || github.token }} + GITLEAKS_LICENSE: ${{ inputs.gitleaks-license }} + with: + config-path: ${{ inputs.gitleaks-config }} + report-format: sarif + report-path: gitleaks-report.sarif + + - name: Run Trivy vulnerability scanner + if: steps.check-configs.outputs.run_trivy == 'true' + uses: aquasecurity/trivy-action@a11da62073708815958ea6d84f5650c78a3ef85b # master + with: + scan-type: 'fs' + scanners: ${{ inputs.trivy-scanners }} + format: 'sarif' + output: 'trivy-results.sarif' + severity: ${{ inputs.trivy-severity }} + timeout: ${{ inputs.trivy-timeout }} + + - name: Verify SARIF files + id: verify-sarif + shell: sh + run: | + set -eu + + # Initialize outputs + { + printf '%s\n' "has_trivy=false" + printf '%s\n' "has_gitleaks=false" + } >> "$GITHUB_OUTPUT" + + # Check Trivy results + if [ -f "trivy-results.sarif" ]; then + if jq -e . <"trivy-results.sarif" >/dev/null 2>&1; then + printf '%s\n' "has_trivy=true" >> "$GITHUB_OUTPUT" + else + printf '::warning::Trivy SARIF file exists but is not valid JSON\n' + fi + fi + + # Check Gitleaks results if it ran + if [ "${{ steps.check-configs.outputs.run_gitleaks }}" = "true" ]; then + if [ -f "gitleaks-report.sarif" ]; then + if jq -e . <"gitleaks-report.sarif" >/dev/null 2>&1; then + printf '%s\n' "has_gitleaks=true" >> "$GITHUB_OUTPUT" + else + printf '::warning::Gitleaks SARIF file exists but is not valid JSON\n' + fi + fi + fi + + - name: Upload Trivy results + if: steps.verify-sarif.outputs.has_trivy == 'true' + uses: github/codeql-action/upload-sarif@fdbfb4d2750291e159f0156def62b853c2798ca2 # v4.31.5 + with: + sarif_file: 'trivy-results.sarif' + category: 'trivy' + + - name: Upload Gitleaks results + if: steps.verify-sarif.outputs.has_gitleaks == 'true' + uses: github/codeql-action/upload-sarif@fdbfb4d2750291e159f0156def62b853c2798ca2 # v4.31.5 + with: + sarif_file: 'gitleaks-report.sarif' + category: 'gitleaks' + + - name: Archive security reports + if: always() + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + with: + name: security-reports-${{ github.run_id }} + path: | + ${{ steps.verify-sarif.outputs.has_trivy == 'true' && 'trivy-results.sarif' || '' }} + ${{ steps.verify-sarif.outputs.has_gitleaks == 'true' && 'gitleaks-report.sarif' || '' }} + retention-days: 30 + + - name: Analyze Results + id: analyze + if: always() + shell: node {0} + run: | + const fs = require('fs'); + + try { + let totalIssues = 0; + let criticalIssues = 0; + + const analyzeSarif = (file, tool) => { + if (!fs.existsSync(file)) { + console.log(`No results file found for ${tool}`); + return null; + } + + try { + const sarif = JSON.parse(fs.readFileSync(file, 'utf8')); + return sarif.runs.reduce((acc, run) => { + if (!run.results) return acc; + + const critical = run.results.filter(r => + r.level === 'error' || + r.level === 'critical' || + (r.ruleId || '').toLowerCase().includes('critical') + ).length; + + return { + total: acc.total + run.results.length, + critical: acc.critical + critical + }; + }, { total: 0, critical: 0 }); + } catch (error) { + console.log(`Error analyzing ${tool} results: ${error.message}`); + return null; + } + }; + + // Only analyze results from tools that ran successfully + const results = { + trivy: '${{ steps.verify-sarif.outputs.has_trivy }}' === 'true' ? + analyzeSarif('trivy-results.sarif', 'trivy') : null, + gitleaks: '${{ steps.verify-sarif.outputs.has_gitleaks }}' === 'true' ? + analyzeSarif('gitleaks-report.sarif', 'gitleaks') : null + }; + + // Aggregate results + Object.entries(results).forEach(([tool, result]) => { + if (result) { + totalIssues += result.total; + criticalIssues += result.critical; + console.log(`${tool}: ${result.total} total, ${result.critical} critical issues`); + } + }); + + // Create summary + const summary = `## Security Scan Summary + + - Total Issues Found: ${totalIssues} + - Critical Issues: ${criticalIssues} + + ### Tool Breakdown + ${Object.entries(results) + .filter(([_, r]) => r) + .map(([tool, r]) => + `- ${tool}: ${r.total} total, ${r.critical} critical` + ).join('\n')} + + ### Tools Run Status + - Actionlint: ${{ steps.check-configs.outputs.run_actionlint }} + - Trivy: ${{ steps.verify-sarif.outputs.has_trivy }} + - Gitleaks: ${{ steps.check-configs.outputs.run_gitleaks }} + `; + + // Set outputs using GITHUB_OUTPUT + const outputFile = process.env.GITHUB_OUTPUT; + if (outputFile) { + fs.appendFileSync(outputFile, `total_issues=${totalIssues}\n`); + fs.appendFileSync(outputFile, `critical_issues=${criticalIssues}\n`); + } + + // Add job summary using GITHUB_STEP_SUMMARY + const summaryFile = process.env.GITHUB_STEP_SUMMARY; + if (summaryFile) { + fs.appendFileSync(summaryFile, summary + '\n'); + } + + // Fail if critical issues found + if (criticalIssues > 0) { + console.error(`Found ${criticalIssues} critical security issues`); + process.exit(1); + } + } catch (error) { + console.error(`Analysis failed: ${error.message}`); + process.exit(1); + } diff --git a/security-scan/rules.yml b/security-scan/rules.yml new file mode 100644 index 0000000..0527d45 --- /dev/null +++ b/security-scan/rules.yml @@ -0,0 +1,55 @@ +--- +# Validation rules for security-scan action +# Generated by update-validators.py v1.0.0 - DO NOT EDIT MANUALLY +# Schema version: 1.0 +# Coverage: 86% (6/7 inputs) +# +# This file defines validation rules for the security-scan GitHub Action. +# Rules are automatically applied by validate-inputs action when this +# action is used. +# + +schema_version: '1.0' +action: security-scan +description: | + Comprehensive security scanning for GitHub Actions including actionlint, + Gitleaks (optional), and Trivy vulnerability scanning. Requires + 'security-events: write' and 'contents: read' permissions in the workflow. +generator_version: 1.0.0 +required_inputs: [] +optional_inputs: + - actionlint-enabled + - gitleaks-config + - gitleaks-license + - token + - trivy-scanners + - trivy-severity + - trivy-timeout +conventions: + actionlint-enabled: boolean + gitleaks-config: file_path + token: github_token + trivy-scanners: scanner_list + trivy-severity: severity_enum + trivy-timeout: timeout_with_unit +overrides: + actionlint-enabled: boolean + gitleaks-config: file_path + token: github_token + trivy-scanners: scanner_list + trivy-severity: severity_enum + trivy-timeout: timeout_with_unit +statistics: + total_inputs: 7 + validated_inputs: 6 + skipped_inputs: 0 + coverage_percentage: 86 +validation_coverage: 86 +auto_detected: true +manual_review_required: false +quality_indicators: + has_required_inputs: false + has_token_validation: true + has_version_validation: false + has_file_validation: true + has_security_validation: true diff --git a/validate-inputs/README.md b/validate-inputs/README.md index 371e406..4611b5c 100644 --- a/validate-inputs/README.md +++ b/validate-inputs/README.md @@ -8,56 +8,62 @@ Centralized Python-based input validation for GitHub Actions with PCRE regex sup ### Inputs -| name | description | required | default | -|---------------------|-------------------------------------------------------------------------------------|----------|---------| -| `action` |

Action name to validate (alias for action-type)

| `false` | `""` | -| `action-type` |

Type of action to validate (e.g., csharp-publish, docker-build, eslint-lint)

| `false` | `""` | -| `rules-file` |

Path to validation rules file

| `false` | `""` | -| `fail-on-error` |

Whether to fail on validation errors

| `false` | `true` | -| `token` |

GitHub token for authentication

| `false` | `""` | -| `namespace` |

Namespace/username for validation

| `false` | `""` | -| `email` |

Email address for validation

| `false` | `""` | -| `username` |

Username for validation

| `false` | `""` | -| `dotnet-version` |

.NET version string

| `false` | `""` | -| `terraform-version` |

Terraform version string

| `false` | `""` | -| `tflint-version` |

TFLint version string

| `false` | `""` | -| `node-version` |

Node.js version string

| `false` | `""` | -| `force-version` |

Force version override

| `false` | `""` | -| `default-version` |

Default version fallback

| `false` | `""` | -| `image-name` |

Docker image name

| `false` | `""` | -| `tag` |

Docker image tag

| `false` | `""` | -| `architectures` |

Target architectures

| `false` | `""` | -| `dockerfile` |

Dockerfile path

| `false` | `""` | -| `context` |

Docker build context

| `false` | `""` | -| `build-args` |

Docker build arguments

| `false` | `""` | -| `buildx-version` |

Docker Buildx version

| `false` | `""` | -| `max-retries` |

Maximum retry attempts

| `false` | `""` | -| `image-quality` |

Image quality percentage

| `false` | `""` | -| `png-quality` |

PNG quality percentage

| `false` | `""` | -| `parallel-builds` |

Number of parallel builds

| `false` | `""` | -| `days-before-stale` |

Number of days before marking as stale

| `false` | `""` | -| `days-before-close` |

Number of days before closing stale items

| `false` | `""` | -| `pre-commit-config` |

Pre-commit configuration file path

| `false` | `""` | -| `base-branch` |

Base branch name

| `false` | `""` | -| `dry-run` |

Dry run mode

| `false` | `""` | -| `is_fiximus` |

Use Fiximus bot

| `false` | `""` | -| `prefix` |

Release tag prefix

| `false` | `""` | -| `language` |

Language to analyze (for CodeQL)

| `false` | `""` | -| `queries` |

CodeQL queries to run

| `false` | `""` | -| `packs` |

CodeQL query packs

| `false` | `""` | -| `config-file` |

CodeQL configuration file path

| `false` | `""` | -| `config` |

CodeQL configuration YAML string

| `false` | `""` | -| `build-mode` |

Build mode for compiled languages

| `false` | `""` | -| `source-root` |

Source code root directory

| `false` | `""` | -| `category` |

Analysis category

| `false` | `""` | -| `checkout-ref` |

Git reference to checkout

| `false` | `""` | -| `working-directory` |

Working directory for analysis

| `false` | `""` | -| `upload-results` |

Upload results to GitHub Security

| `false` | `""` | -| `ram` |

Memory in MB for CodeQL

| `false` | `""` | -| `threads` |

Number of threads for CodeQL

| `false` | `""` | -| `output` |

Output path for SARIF results

| `false` | `""` | -| `skip-queries` |

Skip running queries

| `false` | `""` | -| `add-snippets` |

Add code snippets to SARIF

| `false` | `""` | +| name | description | required | default | +|----------------------|-------------------------------------------------------------------------------------|----------|---------| +| `action` |

Action name to validate (alias for action-type)

| `false` | `""` | +| `action-type` |

Type of action to validate (e.g., csharp-publish, docker-build, eslint-lint)

| `false` | `""` | +| `rules-file` |

Path to validation rules file

| `false` | `""` | +| `fail-on-error` |

Whether to fail on validation errors

| `false` | `true` | +| `token` |

GitHub token for authentication

| `false` | `""` | +| `namespace` |

Namespace/username for validation

| `false` | `""` | +| `email` |

Email address for validation

| `false` | `""` | +| `username` |

Username for validation

| `false` | `""` | +| `dotnet-version` |

.NET version string

| `false` | `""` | +| `terraform-version` |

Terraform version string

| `false` | `""` | +| `tflint-version` |

TFLint version string

| `false` | `""` | +| `node-version` |

Node.js version string

| `false` | `""` | +| `force-version` |

Force version override

| `false` | `""` | +| `default-version` |

Default version fallback

| `false` | `""` | +| `image-name` |

Docker image name

| `false` | `""` | +| `tag` |

Docker image tag

| `false` | `""` | +| `architectures` |

Target architectures

| `false` | `""` | +| `dockerfile` |

Dockerfile path

| `false` | `""` | +| `context` |

Docker build context

| `false` | `""` | +| `build-args` |

Docker build arguments

| `false` | `""` | +| `buildx-version` |

Docker Buildx version

| `false` | `""` | +| `max-retries` |

Maximum retry attempts

| `false` | `""` | +| `image-quality` |

Image quality percentage

| `false` | `""` | +| `png-quality` |

PNG quality percentage

| `false` | `""` | +| `parallel-builds` |

Number of parallel builds

| `false` | `""` | +| `days-before-stale` |

Number of days before marking as stale

| `false` | `""` | +| `days-before-close` |

Number of days before closing stale items

| `false` | `""` | +| `pre-commit-config` |

Pre-commit configuration file path

| `false` | `""` | +| `base-branch` |

Base branch name

| `false` | `""` | +| `dry-run` |

Dry run mode

| `false` | `""` | +| `is_fiximus` |

Use Fiximus bot

| `false` | `""` | +| `prefix` |

Release tag prefix

| `false` | `""` | +| `language` |

Language to analyze (for CodeQL)

| `false` | `""` | +| `queries` |

CodeQL queries to run

| `false` | `""` | +| `packs` |

CodeQL query packs

| `false` | `""` | +| `config-file` |

CodeQL configuration file path

| `false` | `""` | +| `config` |

CodeQL configuration YAML string

| `false` | `""` | +| `build-mode` |

Build mode for compiled languages

| `false` | `""` | +| `source-root` |

Source code root directory

| `false` | `""` | +| `category` |

Analysis category

| `false` | `""` | +| `checkout-ref` |

Git reference to checkout

| `false` | `""` | +| `working-directory` |

Working directory for analysis

| `false` | `""` | +| `upload-results` |

Upload results to GitHub Security

| `false` | `""` | +| `ram` |

Memory in MB for CodeQL

| `false` | `""` | +| `threads` |

Number of threads for CodeQL

| `false` | `""` | +| `output` |

Output path for SARIF results

| `false` | `""` | +| `skip-queries` |

Skip running queries

| `false` | `""` | +| `add-snippets` |

Add code snippets to SARIF

| `false` | `""` | +| `gitleaks-license` |

Gitleaks license key

| `false` | `""` | +| `gitleaks-config` |

Gitleaks configuration file path

| `false` | `""` | +| `trivy-severity` |

Trivy severity levels to scan

| `false` | `""` | +| `trivy-scanners` |

Trivy scanner types to run

| `false` | `""` | +| `trivy-timeout` |

Trivy scan timeout

| `false` | `""` | +| `actionlint-enabled` |

Enable actionlint scanning

| `false` | `""` | ### Outputs @@ -365,4 +371,40 @@ This action is a `composite` action. # # Required: false # Default: "" + + gitleaks-license: + # Gitleaks license key + # + # Required: false + # Default: "" + + gitleaks-config: + # Gitleaks configuration file path + # + # Required: false + # Default: "" + + trivy-severity: + # Trivy severity levels to scan + # + # Required: false + # Default: "" + + trivy-scanners: + # Trivy scanner types to run + # + # Required: false + # Default: "" + + trivy-timeout: + # Trivy scan timeout + # + # Required: false + # Default: "" + + actionlint-enabled: + # Enable actionlint scanning + # + # Required: false + # Default: "" ``` diff --git a/validate-inputs/action.yml b/validate-inputs/action.yml index e9d73c8..4fcd90a 100644 --- a/validate-inputs/action.yml +++ b/validate-inputs/action.yml @@ -173,6 +173,26 @@ inputs: description: 'Add code snippets to SARIF' required: false + # Security-scan specific inputs + gitleaks-license: + description: 'Gitleaks license key' + required: false + gitleaks-config: + description: 'Gitleaks configuration file path' + required: false + trivy-severity: + description: 'Trivy severity levels to scan' + required: false + trivy-scanners: + description: 'Trivy scanner types to run' + required: false + trivy-timeout: + description: 'Trivy scan timeout' + required: false + actionlint-enabled: + description: 'Enable actionlint scanning' + required: false + outputs: validation-status: description: 'Overall validation status (success/failure)' diff --git a/validate-inputs/scripts/update-validators.py b/validate-inputs/scripts/update-validators.py index 9bf674f..646dd7e 100755 --- a/validate-inputs/scripts/update-validators.py +++ b/validate-inputs/scripts/update-validators.py @@ -114,7 +114,7 @@ class ValidationRuleGenerator: "prefix": re.compile(r"\b(prefix|tag[_-]?prefix)\b", re.IGNORECASE), # Boolean patterns (broad, should be lower priority) "boolean": re.compile( - r"\b(dry-?run|verbose|enable|disable|auto|skip|force|cache|provenance|sbom|scan|sign|fail[_-]?on[_-]?error|nightly)\b", + r"\b(dry-?run|verbose|enable|disable|auto|skip|force|cache|provenance|sbom|scan|sign|push|fail[_-]?on[_-]?error|nightly)\b", re.IGNORECASE, ), # File extensions pattern @@ -160,36 +160,36 @@ class ValidationRuleGenerator: "npm_token": "github_token", "password": "github_token", # Complex fields that should skip validation - "build-args": None, # Can be empty - "context": None, # Default handled - "cache-from": None, # Complex cache syntax - "cache-export": None, # Complex cache syntax - "cache-import": None, # Complex cache syntax - "build-contexts": None, # Complex syntax - "secrets": None, # Complex syntax - "platform-build-args": None, # JSON format - "extensions": None, # PHP extensions list - "tools": None, # PHP tools list + "build-args": "key_value_list", # Docker build arguments (KEY=VALUE format) + "context": "file_path", # Build context path + "cache-from": "cache_config", # Docker cache configuration + "cache-export": "cache_config", # Docker cache configuration + "cache-import": "cache_config", # Docker cache configuration + "build-contexts": "key_value_list", # Docker build contexts (KEY=VALUE format) + "secrets": "key_value_list", # Docker secrets (KEY=VALUE format) + "platform-build-args": "json_format", # JSON format for platform-specific args + "extensions": "php_extensions", # PHP extensions list + "tools": "linter_list", # PHP tools list - same pattern as linters + "framework": "framework_mode", # PHP framework mode (auto, laravel, generic) "args": None, # Composer args "stability": None, # Composer stability "registry-url": "url", # URL format "scope": "scope", # NPM scope - "plugins": None, # Prettier plugins + "plugins": "linter_list", # Prettier plugins - same pattern as linters "file-extensions": "file_extensions", # File extension list - "file-pattern": None, # Glob pattern - "enable-linters": None, # Linter list - "disable-linters": None, # Linter list - "success-codes": None, # Exit code list - "retry-codes": None, # Exit code list - "ignore-paths": None, # Path patterns - "key-files": None, # Cache key files - "restore-keys": None, # Cache restore keys - "env-vars": None, # Environment variables + "file-pattern": "path_list", # Glob pattern for file paths + "enable-linters": "linter_list", # Linter list + "disable-linters": "linter_list", # Linter list + "success-codes": "exit_code_list", # Exit code list + "retry-codes": "exit_code_list", # Exit code list + "ignore-paths": "path_list", # Path patterns to ignore + "key-files": "path_list", # Cache key files (paths) + "restore-keys": "path_list", # Cache restore keys (paths) + "env-vars": "key_value_list", # Environment variables (KEY=VALUE format) # Action-specific fields that need special handling "type": None, # Cache type enum (npm, composer, go, etc.) - complex enum, # skip validation - "paths": None, # File paths for caching (comma-separated) - complex format, - # skip validation + "paths": "path_list", # File paths for caching (comma-separated) "command": None, # Shell command - complex format, skip validation for safety "backoff-strategy": None, # Retry strategy enum - complex enum, skip validation "shell": None, # Shell type enum - simple enum, skip validation @@ -199,10 +199,13 @@ class ValidationRuleGenerator: "retry-delay": "numeric_range_1_300", # Retry delay should support higher values "max-warnings": "numeric_range_0_10000", # version-file-parser specific fields - "language": None, # Simple enum (node, php, python, go, dotnet) "tool-versions-key": None, # Simple string (nodejs, python, php, golang, dotnet) "dockerfile-image": None, # Simple string (node, python, php, golang, dotnet) "validation-regex": "regex_pattern", # Regex pattern - validate for ReDoS + # Docker network mode + "network": "network_mode", # Docker network mode (host, none, default) + # Language enum for version detection + "language": "language_enum", # Language type (php, python, go, dotnet) } def get_action_directories(self) -> list[str]: @@ -314,7 +317,6 @@ class ValidationRuleGenerator: "docker-publish": { "registry": "registry_enum", "cache-mode": "cache_mode", - "platforms": None, # Skip validation - complex platform format }, "docker-publish-hub": { "password": "docker_password", @@ -354,26 +356,28 @@ class ValidationRuleGenerator: "prettier-lint": { "mode": "mode_enum", }, + "security-scan": { + "gitleaks-config": "file_path", + "trivy-severity": "severity_enum", + "trivy-scanners": "scanner_list", + "trivy-timeout": "timeout_with_unit", + "actionlint-enabled": "boolean", + "token": "github_token", + }, } if action_name in action_overrides: # Apply overrides for existing conventions - overrides.update( - { - input_name: override_value - for input_name, override_value in action_overrides[action_name].items() - if input_name in conventions - }, - ) - # Add missing inputs from overrides to conventions for input_name, override_value in action_overrides[action_name].items(): - if input_name not in conventions and input_name in action_data["inputs"]: + if input_name in action_data["inputs"]: + overrides[input_name] = override_value + # Update conventions to match override (or set to None if skipped) conventions[input_name] = override_value # Calculate statistics total_inputs = len(action_data["inputs"]) - validated_inputs = len(conventions) - skipped_inputs = sum(1 for v in overrides.values() if v is None) + validated_inputs = sum(1 for v in conventions.values() if v is not None) + skipped_inputs = sum(1 for v in conventions.values() if v is None) coverage = round((validated_inputs / total_inputs) * 100) if total_inputs > 0 else 0 # Generate rules object with enhanced metadata @@ -432,8 +436,20 @@ class ValidationRuleGenerator: # Use a custom yaml dumper to ensure proper indentation class CustomYamlDumper(yaml.SafeDumper): - def increase_indent(self, flow: bool = False, *, indentless: bool = False) -> None: # noqa: FBT001, FBT002 - return super().increase_indent(flow, indentless=indentless) + def increase_indent(self, flow: bool = False, *, indentless: bool = False) -> None: # noqa: FBT001, FBT002, ARG002 # type: ignore[override] + return super().increase_indent(flow, False) + + def choose_scalar_style(self): + """Choose appropriate quote style based on string content.""" + if hasattr(self, "event") and hasattr(self.event, "value") and self.event.value: # type: ignore[attr-defined] + value = self.event.value # type: ignore[attr-defined] + # Use literal block style for multiline strings + if "\n" in value: + return "|" + # Use double quotes for strings with single quotes + if "'" in value: + return '"' + return super().choose_scalar_style() yaml_content = yaml.dump( rules, diff --git a/validate-inputs/tests/test_conventions.py b/validate-inputs/tests/test_conventions.py index 6512f91..c1941f9 100644 --- a/validate-inputs/tests/test_conventions.py +++ b/validate-inputs/tests/test_conventions.py @@ -221,9 +221,13 @@ optional_inputs: assert self.validator._validate_php_extensions("mbstring, intl, pdo", "extensions") is True assert self.validator._validate_php_extensions("mbstring,intl,pdo", "extensions") is True - # Invalid formats (@ is in injection pattern) - assert self.validator._validate_php_extensions("mbstring@intl", "extensions") is False - assert self.validator._validate_php_extensions("mbstring;rm -rf /", "extensions") is False + # Invalid formats (pattern mismatch and injection) + assert ( + self.validator._validate_php_extensions("mbstring@intl", "extensions") is False + ) # @ not in pattern + assert ( + self.validator._validate_php_extensions("mbstring;rm -rf /", "extensions") is False + ) # injection assert self.validator._validate_php_extensions("ext`whoami`", "extensions") is False def test_validate_coverage_driver(self): @@ -312,3 +316,1059 @@ optional_inputs: """Test validation with empty inputs.""" result = self.validator.validate_inputs({}) assert result is True # Empty inputs should pass + + def test_validate_mode_enum_valid(self): + """Test mode enum validation with valid values.""" + valid_modes = [ + "check", + "fix", + "", # Empty is optional + ] + + for mode in valid_modes: + self.validator.clear_errors() + result = self.validator._validate_mode_enum(mode, "mode") + assert result is True, f"Should accept mode: {mode}" + + def test_validate_mode_enum_invalid(self): + """Test mode enum validation with invalid values.""" + invalid_modes = [ + "lint", # Wrong value + "validate", # Wrong value + "CHECK", # Uppercase + "Fix", # Mixed case + "check,fix", # Comma-separated not allowed + "auto", # Wrong value + "both", # Wrong value + ] + + for mode in invalid_modes: + self.validator.clear_errors() + result = self.validator._validate_mode_enum(mode, "mode") + assert result is False, f"Should reject mode: {mode}" + assert self.validator.has_errors() + + def test_validate_report_format_valid(self): + """Test report format validation with valid values.""" + valid_formats = [ + "checkstyle", + "colored-line-number", + "compact", + "github-actions", + "html", + "json", + "junit", + "junit-xml", + "line-number", + "sarif", + "stylish", + "tab", + "teamcity", + "xml", + "", # Empty is optional + ] + + for fmt in valid_formats: + self.validator.clear_errors() + result = self.validator._validate_report_format(fmt, "report-format") + assert result is True, f"Should accept format: {fmt}" + + def test_validate_report_format_invalid(self): + """Test report format validation with invalid values.""" + invalid_formats = [ + "text", # Wrong value + "csv", # Wrong value + "markdown", # Wrong value + "SARIF", # Uppercase + "Json", # Mixed case + "json,sarif", # Comma-separated not allowed + "pdf", # Wrong value + ] + + for fmt in invalid_formats: + self.validator.clear_errors() + result = self.validator._validate_report_format(fmt, "report-format") + assert result is False, f"Should reject format: {fmt}" + assert self.validator.has_errors() + + def test_validate_linter_list_valid(self): + """Test linter list validation with valid values.""" + valid_lists = [ + "gosec", + "govet", + "staticcheck", + "gosec,govet,staticcheck", + "eslint,prettier,typescript-eslint", + "my_linter", + "my-linter", + "linter123", + "a,b,c", + "", # Empty is optional + ] + + for linter_list in valid_lists: + self.validator.clear_errors() + result = self.validator._validate_linter_list(linter_list, "enable-linters") + assert result is True, f"Should accept linter list: {linter_list}" + + def test_validate_linter_list_invalid(self): + """Test linter list validation with invalid values.""" + invalid_lists = [ + "linter;rm -rf /", # Dangerous characters + "linter1,,linter2", # Double comma + ",linter", # Leading comma + "linter,", # Trailing comma + "linter one", # Space + "linter@test", # @ not allowed + "linter$name", # $ not allowed + ] + + for linter_list in invalid_lists: + self.validator.clear_errors() + result = self.validator._validate_linter_list(linter_list, "enable-linters") + assert result is False, f"Should reject linter list: {linter_list}" + assert self.validator.has_errors() + + def test_validate_timeout_with_unit_valid(self): + """Test timeout with unit validation with valid values.""" + valid_timeouts = [ + "5m", + "30s", + "1h", + "500ms", + "100ns", + "1000us", + "1000ยตs", + "2h", + "90s", + "15m", + "", # Empty is optional + ] + + for timeout in valid_timeouts: + self.validator.clear_errors() + result = self.validator._validate_timeout_with_unit(timeout, "timeout") + assert result is True, f"Should accept timeout: {timeout}" + + def test_validate_timeout_with_unit_invalid(self): + """Test timeout with unit validation with invalid values.""" + invalid_timeouts = [ + "5", # Missing unit + "m", # Missing number + "5minutes", # Wrong unit + "5M", # Uppercase unit + "5 m", # Space + "-5m", # Negative not allowed + "5.5m", # Decimal not allowed + "5sec", # Wrong unit + "5min", # Wrong unit + ] + + for timeout in invalid_timeouts: + self.validator.clear_errors() + result = self.validator._validate_timeout_with_unit(timeout, "timeout") + assert result is False, f"Should reject timeout: {timeout}" + assert self.validator.has_errors() + + def test_validate_severity_enum_valid(self): + """Test severity enum validation with valid values.""" + valid_severities = [ + "CRITICAL", + "HIGH", + "MEDIUM", + "LOW", + "UNKNOWN", + "CRITICAL,HIGH", + "CRITICAL,HIGH,MEDIUM", + "LOW,MEDIUM,HIGH,CRITICAL", + "UNKNOWN,LOW,MEDIUM,HIGH,CRITICAL", + "", # Empty is optional + ] + + for severity in valid_severities: + self.validator.clear_errors() + result = self.validator._validate_severity_enum(severity, "severity") + assert result is True, f"Should accept severity: {severity}" + + def test_validate_severity_enum_invalid(self): + """Test severity enum validation with invalid values.""" + invalid_severities = [ + "INVALID", # Wrong value + "critical", # Lowercase not allowed + "Critical", # Mixed case + "CRITICAL,INVALID", # One invalid + "CRITICAL,,HIGH", # Double comma (empty severity) + ",CRITICAL", # Leading comma (empty severity) + "CRITICAL,", # Trailing comma (empty severity) + "CRIT", # Wrong abbreviation + "HI", # Wrong abbreviation + ] + + for severity in invalid_severities: + self.validator.clear_errors() + result = self.validator._validate_severity_enum(severity, "severity") + assert result is False, f"Should reject severity: {severity}" + assert self.validator.has_errors() + + def test_validate_severity_enum_with_spaces(self): + """Test that spaces after commas are handled correctly.""" + # These should be valid - spaces are stripped + valid_with_spaces = [ + "CRITICAL, HIGH", + "CRITICAL , HIGH", + "CRITICAL, HIGH", + "LOW, MEDIUM, HIGH", + ] + + for severity in valid_with_spaces: + self.validator.clear_errors() + result = self.validator._validate_severity_enum(severity, "severity") + assert result is True, f"Should accept severity with spaces: {severity}" + + def test_validate_comma_separated_list_pattern_based(self): + """Test comma-separated list validator with pattern-based validation.""" + # Valid pattern-based lists + valid_lists = [ + "item1", + "item1,item2", + "item-1,item_2,item3", + "", # Empty is optional + ] + + for value in valid_lists: + self.validator.clear_errors() + result = self.validator._validate_comma_separated_list( + value, "test-input", item_pattern=r"^[a-zA-Z0-9_-]+$", item_name="item" + ) + assert result is True, f"Should accept pattern-based list: {value}" + + # Invalid pattern-based lists + invalid_lists = [ + "item1,,item2", # Double comma (empty item) + ",item1", # Leading comma + "item1,", # Trailing comma + "item 1", # Space in item + "item@1", # Invalid character + ] + + for value in invalid_lists: + self.validator.clear_errors() + result = self.validator._validate_comma_separated_list( + value, "test-input", item_pattern=r"^[a-zA-Z0-9_-]+$", item_name="item" + ) + assert result is False, f"Should reject pattern-based list: {value}" + assert self.validator.has_errors() + + def test_validate_comma_separated_list_enum_based(self): + """Test comma-separated list validator with enum-based validation.""" + valid_items = ["vuln", "config", "secret", "license"] + + # Valid enum-based lists + valid_lists = [ + "vuln", + "vuln,config", + "vuln,config,secret,license", + "license,config", + "", # Empty is optional + ] + + for value in valid_lists: + self.validator.clear_errors() + result = self.validator._validate_comma_separated_list( + value, "scanners", valid_items=valid_items, item_name="scanner" + ) + assert result is True, f"Should accept enum-based list: {value}" + + # Invalid enum-based lists + invalid_lists = [ + "invalid", # Not in enum + "vuln,invalid", # One invalid item + "vuln,,config", # Double comma + ",vuln", # Leading comma + "config,", # Trailing comma + ] + + for value in invalid_lists: + self.validator.clear_errors() + result = self.validator._validate_comma_separated_list( + value, "scanners", valid_items=valid_items, item_name="scanner" + ) + assert result is False, f"Should reject enum-based list: {value}" + assert self.validator.has_errors() + + def test_validate_comma_separated_list_injection_check(self): + """Test comma-separated list validator with injection checking.""" + # Valid values (no injection) - using relaxed pattern that allows @# + valid_values = [ + "item1,item2", + "safe_value", + "item@host", # @ is not a shell injection vector + "item#comment", # # is not a shell injection vector + "", # Empty + ] + + for value in valid_values: + self.validator.clear_errors() + result = self.validator._validate_comma_separated_list( + value, + "test-input", + item_pattern=r"^[a-zA-Z0-9_@#-]+$", # Explicit pattern allowing @# + check_injection=True, + item_name="item", + ) + assert result is True, f"Should accept safe value: {value}" + + # Invalid values (shell injection patterns) + injection_values = [ + "item;ls", # Semicolon + "item&whoami", # Ampersand + "item|cat", # Pipe + "item`date`", # Backtick + "item$(echo)", # Command substitution + ] + + for value in injection_values: + self.validator.clear_errors() + result = self.validator._validate_comma_separated_list( + value, + "test-input", + item_pattern=r"^[a-zA-Z0-9_@#-]+$", # Same pattern for consistency + check_injection=True, + item_name="item", + ) + assert result is False, f"Should reject injection pattern: {value}" + assert self.validator.has_errors() + assert "injection" in self.validator.errors[0].lower() + + def test_validate_comma_separated_list_with_spaces(self): + """Test that comma-separated list handles spaces correctly.""" + # Spaces should be stripped + valid_with_spaces = [ + "item1, item2", + "item1 , item2", + "item1, item2", + "item1 , item2 ,item3", + ] + + for value in valid_with_spaces: + self.validator.clear_errors() + result = self.validator._validate_comma_separated_list( + value, "test-input", item_pattern=r"^[a-zA-Z0-9]+$", item_name="item" + ) + assert result is True, f"Should accept list with spaces: {value}" + + def test_validate_scanner_list_valid(self): + """Test scanner list validation with valid values.""" + valid_scanners = [ + "vuln", + "config", + "secret", + "license", + "vuln,config", + "vuln,config,secret", + "vuln,config,secret,license", + "license,secret,config,vuln", # Order doesn't matter + "", # Empty is optional + ] + + for scanners in valid_scanners: + self.validator.clear_errors() + result = self.validator._validate_scanner_list(scanners, "trivy-scanners") + assert result is True, f"Should accept scanner list: {scanners}" + + def test_validate_scanner_list_invalid(self): + """Test scanner list validation with invalid values.""" + invalid_scanners = [ + "invalid", # Not a valid scanner + "vuln,invalid", # One invalid + "vuln,,config", # Double comma + ",vuln", # Leading comma + "config,", # Trailing comma + "VULN", # Wrong case + "vulnerability", # Wrong name + ] + + for scanners in invalid_scanners: + self.validator.clear_errors() + result = self.validator._validate_scanner_list(scanners, "trivy-scanners") + assert result is False, f"Should reject scanner list: {scanners}" + assert self.validator.has_errors() + + def test_validate_binary_enum_valid(self): + """Test binary enum validation with valid values.""" + # Test default check/fix values + valid_values = ["check", "fix", ""] + + for value in valid_values: + self.validator.clear_errors() + result = self.validator._validate_binary_enum(value, "mode") + assert result is True, f"Should accept binary enum: {value}" + + # Test custom binary enum + valid_custom = ["enabled", "disabled", ""] + + for value in valid_custom: + self.validator.clear_errors() + result = self.validator._validate_binary_enum( + value, "status", valid_values=["enabled", "disabled"] + ) + assert result is True, f"Should accept custom binary enum: {value}" + + def test_validate_binary_enum_invalid(self): + """Test binary enum validation with invalid values.""" + # Test invalid values for default check/fix + invalid_values = ["invalid", "CHECK", "Fix", "checking", "fixed"] + + for value in invalid_values: + self.validator.clear_errors() + result = self.validator._validate_binary_enum(value, "mode") + assert result is False, f"Should reject binary enum: {value}" + assert self.validator.has_errors() + + # Test case-sensitive validation + case_sensitive_invalid = ["CHECK", "FIX", "Check"] + + for value in case_sensitive_invalid: + self.validator.clear_errors() + result = self.validator._validate_binary_enum(value, "mode", case_sensitive=True) + assert result is False, f"Should reject case-sensitive: {value}" + assert self.validator.has_errors() + + def test_validate_binary_enum_case_insensitive(self): + """Test binary enum with case-insensitive validation.""" + # Test case-insensitive validation + case_variations = ["check", "CHECK", "Check", "fix", "FIX", "Fix"] + + for value in case_variations: + self.validator.clear_errors() + result = self.validator._validate_binary_enum(value, "mode", case_sensitive=False) + assert result is True, f"Should accept case-insensitive: {value}" + + def test_validate_binary_enum_wrong_count(self): + """Test binary enum with wrong number of values.""" + # Should raise ValueError if not exactly 2 values + try: + self.validator._validate_binary_enum("test", "input", valid_values=["only_one"]) + raise AssertionError("Should raise ValueError for single value") + except ValueError as e: + assert "exactly 2 valid values" in str(e) + + try: + self.validator._validate_binary_enum( + "test", "input", valid_values=["one", "two", "three"] + ) + raise AssertionError("Should raise ValueError for three values") + except ValueError as e: + assert "exactly 2 valid values" in str(e) + + def test_validate_format_enum_valid(self): + """Test format enum validation with valid values.""" + # Test default comprehensive format list + valid_formats = [ + "json", + "sarif", + "checkstyle", + "github-actions", + "html", + "xml", + "junit-xml", + "stylish", + "", # Empty is optional + ] + + for fmt in valid_formats: + self.validator.clear_errors() + result = self.validator._validate_format_enum(fmt, "format") + assert result is True, f"Should accept format: {fmt}" + + # Test custom format list + custom_formats = ["json", "sarif", "text"] + valid_custom = ["json", "sarif", ""] + + for fmt in valid_custom: + self.validator.clear_errors() + result = self.validator._validate_format_enum( + fmt, "output-format", valid_formats=custom_formats + ) + assert result is True, f"Should accept custom format: {fmt}" + + def test_validate_format_enum_invalid(self): + """Test format enum validation with invalid values.""" + # Test invalid formats for default list + invalid_formats = ["invalid", "txt", "pdf", "markdown", "yaml"] + + for fmt in invalid_formats: + self.validator.clear_errors() + result = self.validator._validate_format_enum(fmt, "format") + assert result is False, f"Should reject format: {fmt}" + assert self.validator.has_errors() + + # Test format not in custom list + custom_formats = ["json", "sarif"] + invalid_custom = ["xml", "html", "text"] + + for fmt in invalid_custom: + self.validator.clear_errors() + result = self.validator._validate_format_enum( + fmt, "output-format", valid_formats=custom_formats + ) + assert result is False, f"Should reject custom format: {fmt}" + assert self.validator.has_errors() + + def test_validate_format_enum_allow_custom(self): + """Test format enum with allow_custom flag.""" + # Test that allow_custom=True accepts any format + any_formats = ["json", "custom-format", "my-tool-format", ""] + + for fmt in any_formats: + self.validator.clear_errors() + result = self.validator._validate_format_enum(fmt, "format", allow_custom=True) + assert result is True, f"Should accept any format with allow_custom: {fmt}" + + # Test that known formats still work with custom list + known_formats = ["json", "sarif", "xml"] + + for fmt in known_formats: + self.validator.clear_errors() + result = self.validator._validate_format_enum( + fmt, + "format", + valid_formats=["json", "sarif"], + allow_custom=True, + ) + assert result is True, f"Should accept format with allow_custom: {fmt}" + + def test_validate_multi_value_enum_valid(self): + """Test multi-value enum validation with valid values.""" + # Test 3-value enum + valid_values_3 = ["check", "fix", ""] + + for value in valid_values_3: + self.validator.clear_errors() + result = self.validator._validate_multi_value_enum( + value, "mode", valid_values=["check", "fix", "both"] + ) + assert result is True, f"Should accept 3-value enum: {value}" + + # Test 4-value enum + valid_values_4 = ["php", "python", "go", "dotnet", ""] + + for value in valid_values_4: + self.validator.clear_errors() + result = self.validator._validate_multi_value_enum( + value, "language", valid_values=["php", "python", "go", "dotnet"] + ) + assert result is True, f"Should accept 4-value enum: {value}" + + def test_validate_multi_value_enum_invalid(self): + """Test multi-value enum validation with invalid values.""" + # Test invalid values for 3-value enum + invalid_values = ["invalid", "CHECK", "Fix"] + + for value in invalid_values: + self.validator.clear_errors() + result = self.validator._validate_multi_value_enum( + value, "mode", valid_values=["check", "fix", "both"] + ) + assert result is False, f"Should reject multi-value enum: {value}" + assert self.validator.has_errors() + + def test_validate_multi_value_enum_case_insensitive(self): + """Test multi-value enum with case-insensitive validation.""" + # Test case variations + case_variations = ["check", "CHECK", "Check", "fix", "FIX", "both", "BOTH"] + + for value in case_variations: + self.validator.clear_errors() + result = self.validator._validate_multi_value_enum( + value, + "mode", + valid_values=["check", "fix", "both"], + case_sensitive=False, + ) + assert result is True, f"Should accept case-insensitive: {value}" + + def test_validate_multi_value_enum_wrong_count(self): + """Test multi-value enum with wrong number of values.""" + # Should raise ValueError if less than min_values + try: + self.validator._validate_multi_value_enum("test", "input", valid_values=["only_one"]) + raise AssertionError("Should raise ValueError for single value") + except ValueError as e: + assert "at least 2 valid values" in str(e) + + # Should raise ValueError if more than max_values + try: + self.validator._validate_multi_value_enum( + "test", + "input", + valid_values=["v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11"], + ) + raise AssertionError("Should raise ValueError for 11 values") + except ValueError as e: + assert "at most 10 valid values" in str(e) + + def test_validate_exit_code_list_valid(self): + """Test exit code list validation with valid values.""" + valid_codes = [ + "0", + "1", + "255", + "0,1,2", + "5,10,15", + "0,130", + "0,1,2,5,10", + "", # Empty is optional + ] + + for codes in valid_codes: + self.validator.clear_errors() + result = self.validator._validate_exit_code_list(codes, "success-codes") + assert result is True, f"Should accept exit codes: {codes}" + + def test_validate_exit_code_list_invalid(self): + """Test exit code list validation with invalid values.""" + invalid_codes = [ + "256", # Out of range + "0,256", # One out of range + "-1", # Negative + "0,-1", # One negative + "abc", # Non-numeric + "0,abc", # One non-numeric + "0,,1", # Double comma (empty) + ",0", # Leading comma + "0,", # Trailing comma + "999", # Way out of range + ] + + for codes in invalid_codes: + self.validator.clear_errors() + result = self.validator._validate_exit_code_list(codes, "success-codes") + assert result is False, f"Should reject exit codes: {codes}" + assert self.validator.has_errors() + + def test_validate_exit_code_list_edge_cases(self): + """Test exit code list with edge cases.""" + # Test boundary values + self.validator.clear_errors() + result = self.validator._validate_exit_code_list("0,255", "codes") + assert result is True, "Should accept boundary values 0 and 255" + + # Test with spaces (should be stripped) + self.validator.clear_errors() + result = self.validator._validate_exit_code_list("0, 1, 2", "codes") + assert result is True, "Should accept codes with spaces" + + # Phase 2B: High-value validators + + def test_validate_key_value_list_valid(self): + """Test valid key-value lists.""" + # Single key-value pair + self.validator.clear_errors() + result = self.validator._validate_key_value_list("KEY=value", "build-args") + assert result is True, "Should accept single key-value pair" + + # Multiple key-value pairs + self.validator.clear_errors() + result = self.validator._validate_key_value_list("KEY1=value1,KEY2=value2", "build-args") + assert result is True, "Should accept multiple key-value pairs" + + # Empty value (valid for some use cases) + self.validator.clear_errors() + result = self.validator._validate_key_value_list("KEY=", "build-args") + assert result is True, "Should accept empty value" + + # Value containing equals sign + self.validator.clear_errors() + result = self.validator._validate_key_value_list( + "CONNECTION_STRING=host=localhost;port=5432", "env-vars" + ) + assert result is False, "Should reject value with semicolon (injection risk)" + + # Underscores and hyphens in keys + self.validator.clear_errors() + result = self.validator._validate_key_value_list( + "BUILD_ARG=test,my-key=value", "build-args" + ) + assert result is True, "Should accept underscores and hyphens in keys" + + # Empty value (optional) + self.validator.clear_errors() + result = self.validator._validate_key_value_list("", "build-args") + assert result is True, "Should accept empty string" + + def test_validate_key_value_list_invalid(self): + """Test invalid key-value lists.""" + # Missing equals sign + self.validator.clear_errors() + result = self.validator._validate_key_value_list("KEY", "build-args") + assert result is False, "Should reject missing equals sign" + assert any("Expected format: KEY=VALUE" in err for err in self.validator.errors), ( + "Should have format error message" + ) + + # Empty key + self.validator.clear_errors() + result = self.validator._validate_key_value_list("=value", "build-args") + assert result is False, "Should reject empty key" + assert any("Key cannot be empty" in err for err in self.validator.errors), ( + "Should have empty key error" + ) + + # Empty pair after comma + self.validator.clear_errors() + result = self.validator._validate_key_value_list("KEY=value,", "build-args") + assert result is False, "Should reject trailing comma" + + # Invalid characters in key + self.validator.clear_errors() + result = self.validator._validate_key_value_list("KEY@=value", "build-args") + assert result is False, "Should reject invalid characters in key" + + def test_validate_key_value_list_injection(self): + """Test security checks for key-value lists.""" + # Semicolon (command separator) + self.validator.clear_errors() + result = self.validator._validate_key_value_list("KEY=value;whoami", "build-args") + assert result is False, "Should reject semicolon" + assert any("Potential injection" in err for err in self.validator.errors), ( + "Should have injection error" + ) + + # Pipe (command chaining) + self.validator.clear_errors() + result = self.validator._validate_key_value_list("KEY=value|ls", "build-args") + assert result is False, "Should reject pipe" + + # Backticks (command substitution) + self.validator.clear_errors() + result = self.validator._validate_key_value_list("KEY=`whoami`", "build-args") + assert result is False, "Should reject backticks" + + # Dollar sign (variable expansion) + self.validator.clear_errors() + result = self.validator._validate_key_value_list("KEY=$PATH", "build-args") + assert result is False, "Should reject dollar sign" + + # Parentheses (subshell) + self.validator.clear_errors() + result = self.validator._validate_key_value_list("KEY=(echo test)", "build-args") + assert result is False, "Should reject parentheses" + + def test_validate_path_list_valid(self): + """Test valid path lists.""" + # Single file path + self.validator.clear_errors() + result = self.validator._validate_path_list("src/index.js", "paths") + assert result is True, "Should accept single file path" + + # Multiple paths + self.validator.clear_errors() + result = self.validator._validate_path_list("src/,dist/,build/", "paths") + assert result is True, "Should accept multiple paths" + + # Glob patterns + self.validator.clear_errors() + result = self.validator._validate_path_list("src/**/*.js", "file-pattern") + assert result is True, "Should accept glob patterns" + + # Multiple glob patterns + self.validator.clear_errors() + result = self.validator._validate_path_list( + "*.js,src/**/*.ts,test/[ab].spec.js", "file-pattern" + ) + assert result is True, "Should accept multiple glob patterns" + + # Absolute paths + self.validator.clear_errors() + result = self.validator._validate_path_list("/usr/local/bin", "paths") + assert result is True, "Should accept absolute paths" + + # Empty value (optional) + self.validator.clear_errors() + result = self.validator._validate_path_list("", "paths") + assert result is True, "Should accept empty string" + + # Paths with special chars (@, ~, +) + self.validator.clear_errors() + result = self.validator._validate_path_list( + "@scope/package,~/config,node_modules/+utils", "paths" + ) + assert result is True, "Should accept @, ~, + in paths" + + def test_validate_path_list_invalid(self): + """Test invalid path lists.""" + # Empty path after comma + self.validator.clear_errors() + result = self.validator._validate_path_list("src/,", "paths") + assert result is False, "Should reject trailing comma" + assert any("Contains empty path" in err for err in self.validator.errors), ( + "Should have empty path error" + ) + + # Invalid characters (when glob disabled) + self.validator.clear_errors() + result = self.validator._validate_path_list("src/*.js", "paths", allow_glob=False) + assert result is False, "Should reject glob when disabled" + + def test_validate_path_list_security(self): + """Test security checks for path lists.""" + # Path traversal with ../ + self.validator.clear_errors() + result = self.validator._validate_path_list("../etc/passwd", "paths") + assert result is False, "Should reject ../ path traversal" + assert any("Path traversal detected" in err for err in self.validator.errors), ( + "Should have path traversal error" + ) + + # Path traversal in middle + self.validator.clear_errors() + result = self.validator._validate_path_list("src/../etc/passwd", "paths") + assert result is False, "Should reject path traversal in middle" + + # Path ending with /.. + self.validator.clear_errors() + result = self.validator._validate_path_list("src/..", "paths") + assert result is False, "Should reject path ending with /.." + + # Semicolon (command separator) + self.validator.clear_errors() + result = self.validator._validate_path_list("src/;rm -rf /", "paths") + assert result is False, "Should reject semicolon" + assert any("Potential injection" in err for err in self.validator.errors), ( + "Should have injection error" + ) + + # Pipe (command chaining) + self.validator.clear_errors() + result = self.validator._validate_path_list("src/|ls", "paths") + assert result is False, "Should reject pipe" + + # Backticks (command substitution) + self.validator.clear_errors() + result = self.validator._validate_path_list("src/`whoami`", "paths") + assert result is False, "Should reject backticks" + + # Dollar sign (variable expansion) + self.validator.clear_errors() + result = self.validator._validate_path_list("$HOME/config", "paths") + assert result is False, "Should reject dollar sign" + + # Quick wins: Additional enum validators + + def test_validate_network_mode_valid(self): + """Test valid Docker network modes.""" + # Valid network modes + self.validator.clear_errors() + result = self.validator._validate_network_mode("host", "network") + assert result is True, "Should accept 'host'" + + self.validator.clear_errors() + result = self.validator._validate_network_mode("none", "network") + assert result is True, "Should accept 'none'" + + self.validator.clear_errors() + result = self.validator._validate_network_mode("default", "network") + assert result is True, "Should accept 'default'" + + # Empty value (optional) + self.validator.clear_errors() + result = self.validator._validate_network_mode("", "network") + assert result is True, "Should accept empty string" + + def test_validate_network_mode_invalid(self): + """Test invalid Docker network modes.""" + # Invalid values + self.validator.clear_errors() + result = self.validator._validate_network_mode("bridge", "network") + assert result is False, "Should reject 'bridge'" + + # Case sensitive + self.validator.clear_errors() + result = self.validator._validate_network_mode("HOST", "network") + assert result is False, "Should reject uppercase" + + # Invalid mode + self.validator.clear_errors() + result = self.validator._validate_network_mode("custom", "network") + assert result is False, "Should reject unknown mode" + + def test_validate_language_enum_valid(self): + """Test valid language enum values.""" + # Valid languages + self.validator.clear_errors() + result = self.validator._validate_language_enum("php", "language") + assert result is True, "Should accept 'php'" + + self.validator.clear_errors() + result = self.validator._validate_language_enum("python", "language") + assert result is True, "Should accept 'python'" + + self.validator.clear_errors() + result = self.validator._validate_language_enum("go", "language") + assert result is True, "Should accept 'go'" + + self.validator.clear_errors() + result = self.validator._validate_language_enum("dotnet", "language") + assert result is True, "Should accept 'dotnet'" + + # Empty value (optional) + self.validator.clear_errors() + result = self.validator._validate_language_enum("", "language") + assert result is True, "Should accept empty string" + + def test_validate_language_enum_invalid(self): + """Test invalid language enum values.""" + # Invalid languages + self.validator.clear_errors() + result = self.validator._validate_language_enum("node", "language") + assert result is False, "Should reject 'node'" + + self.validator.clear_errors() + result = self.validator._validate_language_enum("ruby", "language") + assert result is False, "Should reject 'ruby'" + + # Case sensitive + self.validator.clear_errors() + result = self.validator._validate_language_enum("PHP", "language") + assert result is False, "Should reject uppercase" + + self.validator.clear_errors() + result = self.validator._validate_language_enum("Python", "language") + assert result is False, "Should reject mixed case" + + def test_validate_framework_mode_valid(self): + """Test valid PHP framework modes.""" + # Valid framework modes + self.validator.clear_errors() + result = self.validator._validate_framework_mode("auto", "framework") + assert result is True, "Should accept 'auto'" + + self.validator.clear_errors() + result = self.validator._validate_framework_mode("laravel", "framework") + assert result is True, "Should accept 'laravel'" + + self.validator.clear_errors() + result = self.validator._validate_framework_mode("generic", "framework") + assert result is True, "Should accept 'generic'" + + # Empty value (optional) + self.validator.clear_errors() + result = self.validator._validate_framework_mode("", "framework") + assert result is True, "Should accept empty string" + + def test_validate_framework_mode_invalid(self): + """Test invalid PHP framework modes.""" + # Invalid frameworks + self.validator.clear_errors() + result = self.validator._validate_framework_mode("symfony", "framework") + assert result is False, "Should reject 'symfony'" + + # Case sensitive + self.validator.clear_errors() + result = self.validator._validate_framework_mode("Auto", "framework") + assert result is False, "Should reject mixed case" + + self.validator.clear_errors() + result = self.validator._validate_framework_mode("LARAVEL", "framework") + assert result is False, "Should reject uppercase" + + # Phase 2C: Specialized validators + + def test_validate_json_format_valid(self): + """Test valid JSON formats.""" + # Valid JSON objects + self.validator.clear_errors() + result = self.validator._validate_json_format('{"key":"value"}', "platform-build-args") + assert result is True, "Should accept valid JSON object" + + # Valid JSON array + self.validator.clear_errors() + result = self.validator._validate_json_format('["item1","item2"]', "platform-build-args") + assert result is True, "Should accept valid JSON array" + + # Complex nested JSON + self.validator.clear_errors() + result = self.validator._validate_json_format( + '{"platforms":["linux/amd64","linux/arm64"],"args":{"GO_VERSION":"1.21"}}', + "platform-build-args", + ) + assert result is True, "Should accept complex nested JSON" + + # Empty value (optional) + self.validator.clear_errors() + result = self.validator._validate_json_format("", "platform-build-args") + assert result is True, "Should accept empty string" + + def test_validate_json_format_invalid(self): + """Test invalid JSON formats.""" + # Invalid JSON syntax + self.validator.clear_errors() + result = self.validator._validate_json_format("{invalid}", "platform-build-args") + assert result is False, "Should reject invalid JSON" + assert any("Invalid JSON" in err for err in self.validator.errors) + + # Missing quotes + self.validator.clear_errors() + result = self.validator._validate_json_format("{key:value}", "platform-build-args") + assert result is False, "Should reject unquoted keys" + + # Not JSON + self.validator.clear_errors() + result = self.validator._validate_json_format("plain text", "platform-build-args") + assert result is False, "Should reject plain text" + + def test_validate_cache_config_valid(self): + """Test valid Docker cache configurations.""" + # Registry cache + self.validator.clear_errors() + result = self.validator._validate_cache_config( + "type=registry,ref=user/repo:cache", "cache-from" + ) + assert result is True, "Should accept registry cache config" + + # Local cache + self.validator.clear_errors() + result = self.validator._validate_cache_config("type=local,dest=/tmp/cache", "cache-export") + assert result is True, "Should accept local cache config" + + # GitHub Actions cache + self.validator.clear_errors() + result = self.validator._validate_cache_config("type=gha", "cache-from") + assert result is True, "Should accept gha cache type" + + # Inline cache + self.validator.clear_errors() + result = self.validator._validate_cache_config("type=inline", "cache-export") + assert result is True, "Should accept inline cache type" + + # S3 cache with multiple parameters + self.validator.clear_errors() + result = self.validator._validate_cache_config( + "type=s3,region=us-east-1,bucket=my-bucket", "cache-export" + ) + assert result is True, "Should accept s3 cache with parameters" + + # Empty value (optional) + self.validator.clear_errors() + result = self.validator._validate_cache_config("", "cache-from") + assert result is True, "Should accept empty string" + + def test_validate_cache_config_invalid(self): + """Test invalid Docker cache configurations.""" + # Missing type + self.validator.clear_errors() + result = self.validator._validate_cache_config("registry", "cache-from") + assert result is False, "Should reject missing type" + assert any("Must start with 'type=" in err for err in self.validator.errors) + + # Invalid type + self.validator.clear_errors() + result = self.validator._validate_cache_config("type=invalid", "cache-from") + assert result is False, "Should reject invalid cache type" + assert any("Invalid cache type" in err for err in self.validator.errors) + + # Invalid format (missing =) + self.validator.clear_errors() + result = self.validator._validate_cache_config("type=local,destpath", "cache-export") + assert result is False, "Should reject invalid key=value format" diff --git a/validate-inputs/tests/test_docker_validator.py b/validate-inputs/tests/test_docker_validator.py index bb96a4c..9f2f159 100644 --- a/validate-inputs/tests/test_docker_validator.py +++ b/validate-inputs/tests/test_docker_validator.py @@ -274,6 +274,71 @@ class TestDockerValidator: result = self.validator.validate_inputs(inputs) assert isinstance(result, bool) + def test_validate_registry_valid(self): + """Test registry enum validation with valid values.""" + valid_registries = [ + "dockerhub", + "github", + "both", + ] + + for registry in valid_registries: + self.validator.errors = [] + result = self.validator.validate_registry(registry) + assert result is True, f"Should accept registry: {registry}" + + def test_validate_registry_invalid(self): + """Test registry enum validation with invalid values.""" + invalid_registries = [ + "", # Empty + " ", # Whitespace only + "docker", # Wrong value (should be dockerhub) + "hub", # Wrong value + "ghcr", # Wrong value + "gcr", # Wrong value + "both,github", # Comma-separated not allowed + "DOCKERHUB", # Uppercase + "DockerHub", # Mixed case + "docker hub", # Space + "github.com", # Full URL not allowed + ] + + for registry in invalid_registries: + self.validator.errors = [] + result = self.validator.validate_registry(registry) + assert result is False, f"Should reject registry: {registry}" + + def test_validate_sbom_format_valid(self): + """Test SBOM format validation with valid values.""" + valid_formats = [ + "spdx-json", + "cyclonedx-json", + "", # Empty is optional + ] + + for sbom_format in valid_formats: + self.validator.errors = [] + result = self.validator.validate_sbom_format(sbom_format) + assert result is True, f"Should accept SBOM format: {sbom_format}" + + def test_validate_sbom_format_invalid(self): + """Test SBOM format validation with invalid values.""" + invalid_formats = [ + "spdx", # Missing -json suffix + "cyclonedx", # Missing -json suffix + "json", # Just json + "spdx-xml", # Wrong format + "cyclonedx-xml", # Wrong format + "SPDX-JSON", # Uppercase + "spdx json", # Space + "invalid", # Invalid value + ] + + for sbom_format in invalid_formats: + self.validator.errors = [] + result = self.validator.validate_sbom_format(sbom_format) + assert result is False, f"Should reject SBOM format: {sbom_format}" + def test_empty_values_handling(self): """Test that empty values are handled appropriately.""" # Some Docker fields might be required, others optional @@ -281,3 +346,5 @@ class TestDockerValidator: assert isinstance(self.validator.validate_docker_tag(""), bool) assert isinstance(self.validator.validate_architectures(""), bool) assert isinstance(self.validator.validate_prefix(""), bool) + # Registry should reject empty values + assert self.validator.validate_registry("") is False diff --git a/validate-inputs/tests/test_update_validators.py b/validate-inputs/tests/test_update_validators.py index 3401f1d..579484a 100644 --- a/validate-inputs/tests/test_update_validators.py +++ b/validate-inputs/tests/test_update_validators.py @@ -151,7 +151,7 @@ class TestValidationRuleGenerator: generator = ValidationRuleGenerator() # Test special cases from the mapping - assert generator.detect_validation_type("build-args", {}) is None + assert generator.detect_validation_type("build-args", {}) == "key_value_list" assert generator.detect_validation_type("version", {}) == "flexible_version" assert ( generator.detect_validation_type("dotnet-version", {}) == "dotnet_version" diff --git a/validate-inputs/validators/conventions.py b/validate-inputs/validators/conventions.py index 45a5087..3d87df8 100644 --- a/validate-inputs/validators/conventions.py +++ b/validate-inputs/validators/conventions.py @@ -556,13 +556,33 @@ class ConventionBasedValidator(BaseValidator): self._validator_modules["codeql"] = codeql.CodeQLValidator() return self._validator_modules["codeql"], f"validate_{validator_type}" - # PHP-specific validators - if validator_type in ["php_extensions", "coverage_driver", "mode_enum"]: - # Return self for PHP-specific validation methods + # Convention-based validators + if validator_type in [ + "php_extensions", + "coverage_driver", + "mode_enum", + "binary_enum", + "multi_value_enum", + "report_format", + "format_enum", + "linter_list", + "timeout_with_unit", + "severity_enum", + "scanner_list", + "exit_code_list", + "key_value_list", + "path_list", + "network_mode", + "language_enum", + "framework_mode", + "json_format", + "cache_config", + ]: + # Return self for validation methods implemented in this class return self, f"_validate_{validator_type}" - # Package manager and report format validators - if validator_type in ["package_manager_enum", "report_format"]: + # Package manager validators + if validator_type in ["package_manager_enum"]: # These could be in a separate module, but for now we'll put them in file validator if "file" not in self._validator_modules: from . import file @@ -592,9 +612,104 @@ class ConventionBasedValidator(BaseValidator): # Default range return 0, 100 + def _validate_comma_separated_list( + self, + value: str, + input_name: str, + item_pattern: str | None = None, + valid_items: list | None = None, + check_injection: bool = False, + item_name: str = "item", + ) -> bool: + """Validate comma-separated list of items (generic validator). + + This is a generic validator that can be used for any comma-separated list + with either pattern-based or enum-based validation. + + Args: + value: The comma-separated list value + input_name: The input name for error messages + item_pattern: Regex pattern each item must match (default: alphanumeric+hyphens+underscores) + valid_items: Optional list of valid items for enum-style validation + check_injection: Whether to check for shell injection patterns + item_name: Descriptive name for items in error messages (e.g., "linter", "extension") + + Returns: + True if valid, False otherwise + + Examples: + >>> # Pattern-based validation + >>> validator._validate_comma_separated_list( + ... "gosec,govet", "enable-linters", + ... item_pattern=r'^[a-zA-Z0-9_-]+$', + ... item_name="linter" + ... ) + True + + >>> # Enum-based validation + >>> validator._validate_comma_separated_list( + ... "vuln,config", "scanners", + ... valid_items=["vuln", "config", "secret", "license"], + ... item_name="scanner" + ... ) + True + """ + import re + + if not value or value.strip() == "": + return True # Optional + + # Security check for injection patterns + if check_injection and re.search(r"[;&|`$()]", value): + self.add_error( + f"Potential injection detected in {input_name}: {value}. " + f"Avoid using shell metacharacters (;, &, |, `, $, parentheses)" + ) + return False + + # Split by comma and validate each item + items = [item.strip() for item in value.split(",")] + + for item in items: + if not item: # Empty after strip + self.add_error(f"Invalid {input_name}: {value}. Contains empty {item_name}") + return False + + # Enum-based validation (if valid_items provided) + if valid_items is not None: + if item not in valid_items: + self.add_error( + f"Invalid {item_name} '{item}' in {input_name}. " + f"Must be one of: {', '.join(valid_items)}" + ) + return False + + # Pattern-based validation (if no valid_items and pattern provided) + elif item_pattern is not None: + if not re.match(item_pattern, item): + self.add_error( + f"Invalid {item_name} '{item}' in {input_name}. " + f"Must match pattern: alphanumeric with hyphens/underscores" + ) + return False + + # Default pattern if neither valid_items nor item_pattern provided + elif not re.match(r"^[a-zA-Z0-9_-]+$", item): + self.add_error( + f"Invalid {item_name} '{item}' in {input_name}. " + f"Must be alphanumeric with hyphens/underscores" + ) + return False + + return True + def _validate_php_extensions(self, value: str, input_name: str) -> bool: """Validate PHP extensions format. + Wrapper for comma-separated list validator with PHP extension-specific rules. + Allows alphanumeric characters, underscores, and spaces. + Checks for shell injection patterns. + Args: value: The extensions value (comma-separated list) input_name: The input name for error messages @@ -602,59 +717,736 @@ class ConventionBasedValidator(BaseValidator): Returns: True if valid, False otherwise """ - import re + return self._validate_comma_separated_list( + value, + input_name, + item_pattern=r"^[a-zA-Z0-9_\s]+$", + check_injection=True, + item_name="extension", + ) - if not value: - return True + def _validate_binary_enum( + self, + value: str, + input_name: str, + valid_values: list | None = None, + case_sensitive: bool = True, + ) -> bool: + """Validate binary enum (two-value choice) (generic validator). - # Check for injection patterns - if re.search(r"[;&|`$()@#]", value): - self.add_error(f"Potential injection detected in {input_name}: {value}") + This is a generic validator for two-value enums (e.g., check/fix, enabled/disabled). + + Args: + value: The enum value + input_name: The input name for error messages + valid_values: List of exactly 2 valid values (default: ["check", "fix"]) + case_sensitive: Whether validation is case-sensitive (default: True) + + Returns: + True if valid, False otherwise + + Examples: + >>> # Default check/fix mode + >>> validator._validate_binary_enum("check", "mode") + True + + >>> # Custom binary enum + >>> validator._validate_binary_enum( + ... "enabled", "status", + ... valid_values=["enabled", "disabled"] + ... ) + True + """ + if valid_values is None: + valid_values = ["check", "fix"] + + if len(valid_values) != 2: + raise ValueError( + f"Binary enum requires exactly 2 valid values, got {len(valid_values)}" + ) + + if not value or value.strip() == "": + return True # Optional + + # Case-insensitive comparison if needed + if not case_sensitive: + value_lower = value.lower() + valid_values_lower = [v.lower() for v in valid_values] + if value_lower not in valid_values_lower: + self.add_error( + f"Invalid {input_name}: {value}. Must be one of: {', '.join(valid_values)}" + ) + return False + else: + if value not in valid_values: + self.add_error( + f"Invalid {input_name}: {value}. Must be one of: {', '.join(valid_values)}" + ) + return False + + return True + + def _validate_format_enum( + self, + value: str, + input_name: str, + valid_formats: list | None = None, + allow_custom: bool = False, + ) -> bool: + """Validate output format enum (generic validator). + + Generic validator for tool output formats (SARIF, JSON, XML, etc.). + Supports common formats across linting/analysis tools. + + Args: + value: The format value + input_name: The input name for error messages + valid_formats: List of valid formats (default: comprehensive list) + allow_custom: Whether to allow formats not in the predefined list (default: False) + + Returns: + True if valid, False otherwise + + Examples: + >>> # Default comprehensive format list + >>> validator._validate_format_enum("json", "format") + True + + >>> # Tool-specific format list + >>> validator._validate_format_enum( + ... "sarif", "output-format", + ... valid_formats=["json", "sarif", "text"] + ... ) + True + """ + if valid_formats is None: + # Comprehensive list of common formats across all tools + valid_formats = [ + "checkstyle", + "colored-line-number", + "compact", + "github-actions", + "html", + "json", + "junit", + "junit-xml", + "line-number", + "sarif", + "stylish", + "tab", + "teamcity", + "xml", + ] + + if not value or value.strip() == "": + return True # Optional + + # Check if format is valid + if value not in valid_formats and not allow_custom: + self.add_error( + f"Invalid {input_name}: {value}. Must be one of: {', '.join(valid_formats)}" + ) return False - # Check format - should be alphanumeric, underscores, commas, spaces only - if not re.match(r"^[a-zA-Z0-9_,\s]+$", value): - self.add_error(f"Invalid format for {input_name}: {value}") - return False + return True + + def _validate_multi_value_enum( + self, + value: str, + input_name: str, + valid_values: list | None = None, + case_sensitive: bool = True, + min_values: int = 2, + max_values: int = 10, + ) -> bool: + """Validate multi-value enum (2-10 value choice) (generic validator). + + Generic validator for enums with 2-10 predefined values. + For exactly 2 values, use _validate_binary_enum instead. + + Args: + value: The enum value + input_name: The input name for error messages + valid_values: List of valid values (2-10 items required) + case_sensitive: Whether validation is case-sensitive (default: True) + min_values: Minimum number of valid values (default: 2) + max_values: Maximum number of valid values (default: 10) + + Returns: + True if valid, False otherwise + + Examples: + >>> # Framework selection (3 values) + >>> validator._validate_multi_value_enum( + ... "laravel", "framework", + ... valid_values=["auto", "laravel", "generic"] + ... ) + True + + >>> # Language selection (4 values) + >>> validator._validate_multi_value_enum( + ... "python", "language", + ... valid_values=["php", "python", "go", "dotnet"] + ... ) + True + """ + if valid_values is None: + raise ValueError("valid_values is required for multi_value_enum validator") + + # Validate valid_values count + if len(valid_values) < min_values: + raise ValueError( + f"Multi-value enum requires at least {min_values} valid values, got {len(valid_values)}" + ) + + if len(valid_values) > max_values: + raise ValueError( + f"Multi-value enum supports at most {max_values} valid values, got {len(valid_values)}" + ) + + if not value or value.strip() == "": + return True # Optional + + # Case-insensitive comparison if needed + if not case_sensitive: + value_lower = value.lower() + valid_values_lower = [v.lower() for v in valid_values] + if value_lower not in valid_values_lower: + self.add_error( + f"Invalid {input_name}: {value}. Must be one of: {', '.join(valid_values)}" + ) + return False + else: + if value not in valid_values: + self.add_error( + f"Invalid {input_name}: {value}. Must be one of: {', '.join(valid_values)}" + ) + return False return True def _validate_coverage_driver(self, value: str, input_name: str) -> bool: """Validate coverage driver enum. + Wrapper for multi_value_enum validator with PHP coverage driver options. + Args: value: The coverage driver value input_name: The input name for error messages Returns: True if valid, False otherwise + + Examples: + Valid: "xdebug", "pcov", "xdebug3", "none", "" + Invalid: "xdebug2", "XDEBUG", "coverage" """ - valid_drivers = ["none", "xdebug", "pcov", "xdebug3"] - - if value and value not in valid_drivers: - self.add_error( - f"Invalid {input_name}: {value}. Must be one of: {', '.join(valid_drivers)}" - ) - return False - - return True + return self._validate_multi_value_enum( + value, + input_name, + valid_values=["none", "xdebug", "pcov", "xdebug3"], + case_sensitive=True, + ) def _validate_mode_enum(self, value: str, input_name: str) -> bool: """Validate mode enum for linting actions. + Wrapper for binary_enum validator with check/fix modes. + Args: value: The mode value input_name: The input name for error messages Returns: True if valid, False otherwise - """ - valid_modes = ["check", "fix"] - if value and value not in valid_modes: + Examples: + Valid: "check", "fix", "" + Invalid: "invalid", "CHECK", "Fix" + """ + return self._validate_binary_enum( + value, + input_name, + valid_values=["check", "fix"], + case_sensitive=True, + ) + + def _validate_report_format(self, value: str, input_name: str) -> bool: + """Validate report format for linting/analysis actions. + + Wrapper for format_enum validator with comprehensive format list. + Supports multiple report formats used across different tools. + + Args: + value: The report format value + input_name: The input name for error messages + + Returns: + True if valid, False otherwise + + Examples: + Valid: "json", "sarif", "checkstyle", "github-actions", "" + Invalid: "invalid", "txt", "pdf" + """ + return self._validate_format_enum(value, input_name) + + def _validate_linter_list(self, value: str, input_name: str) -> bool: + """Validate comma-separated list of linter names. + + Wrapper for comma-separated list validator with linter-specific rules. + Allows alphanumeric characters, hyphens, and underscores. + + Args: + value: The linter list value + input_name: The input name for error messages + + Returns: + True if valid, False otherwise + + Examples: + Valid: "gosec,govet,staticcheck", "errcheck" + Invalid: "gosec,,govet", "invalid linter", "linter@123" + """ + return self._validate_comma_separated_list( + value, + input_name, + item_pattern=r"^[a-zA-Z0-9_-]+$", + item_name="linter", + ) + + def _validate_timeout_with_unit(self, value: str, input_name: str) -> bool: + """Validate timeout duration with unit (Go duration format). + + Args: + value: The timeout value + input_name: The input name for error messages + + Returns: + True if valid, False otherwise + """ + import re + + if not value or value.strip() == "": + return True # Optional + + # Go duration format: number + unit (ns, us/ยตs, ms, s, m, h) + pattern = r"^[0-9]+(ns|us|ยตs|ms|s|m|h)$" + + if not re.match(pattern, value): self.add_error( - f"Invalid {input_name}: {value}. Must be one of: {', '.join(valid_modes)}" + f"Invalid {input_name}: {value}. Expected format: number with unit " + "(e.g., 5m, 30s, 1h, 500ms)" ) return False return True + + def _validate_severity_enum(self, value: str, input_name: str) -> bool: + """Validate severity levels enum (generalized). + + Generic validator for security tool severity levels. + Supports common severity formats used by various security tools. + + Default levels: UNKNOWN, LOW, MEDIUM, HIGH, CRITICAL (Trivy/CVSSv3 style) + Case-sensitive by default. + + Args: + value: The severity value (comma-separated for multiple levels) + input_name: The input name for error messages + + Returns: + True if valid, False otherwise + """ + if not value or value.strip() == "": + return True # Optional + + # Standard severity levels (Trivy/CVSSv3/OWASP compatible) + # Can be extended for specific tools by creating tool-specific validators + valid_severities = ["UNKNOWN", "LOW", "MEDIUM", "HIGH", "CRITICAL"] + + # Split by comma and validate each severity + severities = [s.strip() for s in value.split(",")] + + for severity in severities: + if not severity: # Empty after strip + self.add_error(f"Invalid {input_name}: {value}. Contains empty severity level") + return False + + # Case-sensitive validation + if severity not in valid_severities: + self.add_error( + f"Invalid {input_name}: {value}. Severity '{severity}' is not valid. " + f"Must be one of: {', '.join(valid_severities)}" + ) + return False + + return True + + def _validate_scanner_list(self, value: str, input_name: str) -> bool: + """Validate comma-separated list of scanner types (for Trivy). + + Wrapper for comma-separated list validator with Trivy scanner enum validation. + Supports: vuln, config, secret, license + + Args: + value: The scanner list value (comma-separated) + input_name: The input name for error messages + + Returns: + True if valid, False otherwise + + Examples: + Valid: "vuln,config,secret", "vuln", "config,license" + Invalid: "invalid", "vuln,invalid,config", "vuln,,config" + """ + return self._validate_comma_separated_list( + value, + input_name, + valid_items=["vuln", "config", "secret", "license"], + item_name="scanner", + ) + + def _validate_exit_code_list(self, value: str, input_name: str) -> bool: + """Validate comma-separated list of exit codes. + + Validates Unix/Linux exit codes (0-255) in comma-separated format. + Used for retry logic, success codes, and error handling. + + Args: + value: The exit code list value (comma-separated integers) + input_name: The input name for error messages + + Returns: + True if valid, False otherwise + + Examples: + Valid: "0", "0,1,2", "5,10,15", "0,130", "" + Invalid: "256", "0,256", "-1", "0,abc", "0,,1" + """ + import re + + if not value or value.strip() == "": + return True # Optional + + # Split by comma and validate each exit code + codes = [code.strip() for code in value.split(",")] + + for code in codes: + if not code: # Empty after strip + self.add_error(f"Invalid {input_name}: {value}. Contains empty exit code") + return False + + # Check if code is numeric + if not re.match(r"^[0-9]+$", code): + self.add_error( + f"Invalid exit code '{code}' in {input_name}. " + f"Exit codes must be integers (0-255)" + ) + return False + + # Validate range (0-255 for Unix/Linux exit codes) + code_int = int(code) + if code_int < 0 or code_int > 255: + self.add_error( + f"Invalid exit code '{code}' in {input_name}. Exit codes must be in range 0-255" + ) + return False + + return True + + def _validate_key_value_list( + self, + value: str, + input_name: str, + key_pattern: str | None = None, + check_injection: bool = True, + ) -> bool: + """Validate comma-separated list of key-value pairs (generic validator). + + Validates KEY=VALUE,KEY2=VALUE2 format commonly used for Docker build-args, + environment variables, and other configuration parameters. + + Args: + value: The key-value list value (comma-separated KEY=VALUE pairs) + input_name: The input name for error messages + key_pattern: Regex pattern for key validation (default: alphanumeric+underscores+hyphens) + check_injection: Whether to check for shell injection patterns in values (default: True) + + Returns: + True if valid, False otherwise + + Examples: + Valid: "KEY=value", "KEY1=value1,KEY2=value2", "BUILD_ARG=hello", "" + Invalid: "KEY", "=value", "KEY=", "KEY=value,", "KEY=val;whoami" + """ + import re + + if not value or value.strip() == "": + return True # Optional + + if key_pattern is None: + # Default: alphanumeric, underscores, hyphens (common for env vars and build args) + key_pattern = r"^[a-zA-Z0-9_-]+$" + + # Security check for injection patterns in the entire value + if check_injection and re.search(r"[;&|`$()]", value): + self.add_error( + f"Potential injection detected in {input_name}: {value}. " + f"Avoid using shell metacharacters (;, &, |, `, $, parentheses)" + ) + return False + + # Split by comma and validate each key-value pair + pairs = [pair.strip() for pair in value.split(",")] + + for pair in pairs: + if not pair: # Empty after strip + self.add_error(f"Invalid {input_name}: {value}. Contains empty key-value pair") + return False + + # Check for KEY=VALUE format + if "=" not in pair: + self.add_error( + f"Invalid key-value pair '{pair}' in {input_name}. Expected format: KEY=VALUE" + ) + return False + + # Split by first = only (value may contain =) + parts = pair.split("=", 1) + key = parts[0].strip() + + # Validate key is not empty + if not key: + self.add_error( + f"Invalid key-value pair '{pair}' in {input_name}. Key cannot be empty" + ) + return False + + # Validate key pattern + if not re.match(key_pattern, key): + self.add_error( + f"Invalid key '{key}' in {input_name}. " + f"Keys must be alphanumeric with underscores/hyphens" + ) + return False + + # Note: Value can be empty (KEY=) - this is valid for some use cases + # Value validation is optional and handled by the check_injection flag above + + return True + + def _validate_path_list( + self, + value: str, + input_name: str, + allow_glob: bool = True, + check_injection: bool = True, + ) -> bool: + """Validate comma-separated list of file paths or glob patterns (generic validator). + + Validates file paths and glob patterns commonly used for ignore-paths, + restore-keys, file-pattern, and other path-based inputs. + + Args: + value: The path list to validate + input_name: Name of the input being validated + allow_glob: Whether to allow glob patterns (*, **, ?, []) + check_injection: Whether to check for shell injection patterns + + Examples: + Valid: "*.js", "src/**/*.ts", "dist/,build/", ".github/workflows/*", "" + Invalid: "../etc/passwd", "file;rm -rf /", "path|whoami" + + Returns: + bool: True if valid, False otherwise + """ + import re + + if not value or value.strip() == "": + return True # Optional + + # Security check for injection patterns + if check_injection and re.search(r"[;&|`$()]", value): + self.add_error( + f"Potential injection detected in {input_name}: {value}. " + f"Avoid using shell metacharacters (;, &, |, `, $, parentheses)" + ) + return False + + # Split by comma and validate each path + paths = [path.strip() for path in value.split(",")] + + for path in paths: + if not path: # Empty after strip + self.add_error(f"Invalid {input_name}: {value}. Contains empty path") + return False + + # Check for path traversal attempts + if "../" in path or "/.." in path or path.startswith(".."): + self.add_error( + f"Path traversal detected in {input_name}: {path}. Avoid using '..' in paths" + ) + return False + + # Validate glob patterns if allowed + if allow_glob: + # Glob patterns are valid: *, **, ?, [], {} + # Check for valid glob characters + glob_pattern = r"^[a-zA-Z0-9_\-./\*\?\[\]\{\},@~+]+$" + if not re.match(glob_pattern, path): + self.add_error( + f"Invalid path '{path}' in {input_name}. " + f"Paths may contain alphanumeric characters, hyphens, underscores, " + f"slashes, and glob patterns (*, **, ?, [], {{}})" + ) + return False + else: + # No glob patterns allowed - only alphanumeric, hyphens, underscores, slashes + path_pattern = r"^[a-zA-Z0-9_\-./,@~+]+$" + if not re.match(path_pattern, path): + self.add_error( + f"Invalid path '{path}' in {input_name}. " + f"Paths may only contain alphanumeric characters, hyphens, " + f"underscores, and slashes" + ) + return False + + return True + + def _validate_network_mode(self, value: str, input_name: str) -> bool: + """Validate Docker network mode enum. + + Wrapper for multi_value_enum validator with Docker network mode options. + + Examples: + Valid: "host", "none", "default", "" + Invalid: "bridge", "NONE", "custom" + + Returns: + bool: True if valid, False otherwise + """ + return self._validate_multi_value_enum( + value, + input_name, + valid_values=["host", "none", "default"], + case_sensitive=True, + ) + + def _validate_language_enum(self, value: str, input_name: str) -> bool: + """Validate language enum for version detection. + + Wrapper for multi_value_enum validator with supported language options. + + Examples: + Valid: "php", "python", "go", "dotnet", "" + Invalid: "node", "ruby", "PHP" + + Returns: + bool: True if valid, False otherwise + """ + return self._validate_multi_value_enum( + value, + input_name, + valid_values=["php", "python", "go", "dotnet"], + case_sensitive=True, + ) + + def _validate_framework_mode(self, value: str, input_name: str) -> bool: + """Validate PHP framework detection mode. + + Wrapper for multi_value_enum validator with framework mode options. + + Examples: + Valid: "auto", "laravel", "generic", "" + Invalid: "symfony", "Auto", "LARAVEL" + + Returns: + bool: True if valid, False otherwise + """ + return self._validate_multi_value_enum( + value, + input_name, + valid_values=["auto", "laravel", "generic"], + case_sensitive=True, + ) + + def _validate_json_format(self, value: str, input_name: str) -> bool: + """Validate JSON format string. + + Validates that input is valid JSON. Used for structured configuration + data like platform-specific build arguments. + + Examples: + Valid: '{"key":"value"}', '[]', '{"platforms":["linux/amd64"]}', "" + Invalid: '{invalid}', 'not json', '{key:value}' + + Returns: + bool: True if valid, False otherwise + """ + import json + + if not value or value.strip() == "": + return True # Optional + + try: + json.loads(value) + return True + except json.JSONDecodeError as e: + self.add_error(f"Invalid JSON format in {input_name}: {value}. Error: {str(e)}") + return False + except Exception as e: + self.add_error(f"Failed to validate JSON in {input_name}: {str(e)}") + return False + + def _validate_cache_config(self, value: str, input_name: str) -> bool: + """Validate Docker BuildKit cache configuration. + + Validates Docker cache export/import configuration format. + Common formats: type=registry,ref=..., type=local,dest=..., type=gha + + Examples: + Valid: "type=registry,ref=user/repo:cache", "type=local,dest=/tmp/cache", + "type=gha", "type=inline", "" + Invalid: "invalid", "type=", "registry", "type=unknown" + + Returns: + bool: True if valid, False otherwise + """ + import re + + if not value or value.strip() == "": + return True # Optional + + # Check basic format: type=value[,key=value,...] + if not re.match(r"^type=[a-z0-9-]+", value): + self.add_error( + f"Invalid cache config in {input_name}: {value}. " + f"Must start with 'type='" + ) + return False + + # Valid cache types + valid_types = ["registry", "local", "gha", "inline", "s3", "azblob", "oci"] + + # Extract type + type_match = re.match(r"^type=([a-z0-9-]+)", value) + if type_match: + cache_type = type_match.group(1) + if cache_type not in valid_types: + self.add_error( + f"Invalid cache type '{cache_type}' in {input_name}. " + f"Valid types: {', '.join(valid_types)}" + ) + return False + + # Validate key=value pairs format + parts = value.split(",") + for part in parts: + if "=" not in part: + self.add_error( + f"Invalid cache config format in {input_name}: {value}. " + f"Each part must be in 'key=value' format" + ) + return False + + return True