mirror of
https://github.com/ivuorinen/actions.git
synced 2026-01-26 03:23:59 +00:00
* feat: use our own actions in our workflows * fix: add missing inputs to validate-inputs, refactor node * chore: cr comment fixes * fix: update-validators formatting * chore: update validators, add tests, conventions * feat: validate severity with severity_enum * feat: add 10 generic validators to improve input validation coverage Add comprehensive validation system improvements across multiple phases: Phase 2A - Quick Wins: - Add multi_value_enum validator for 2-10 value enumerations - Add exit_code_list validator for Unix/Linux exit codes (0-255) - Refactor coverage_driver to use multi_value_enum Phase 2B - High-Value Validators: - Add key_value_list validator with shell injection prevention - Add path_list validator with path traversal and glob support Quick Wins - Additional Enums: - Add network_mode validator for Docker network modes - Add language_enum validator for language detection - Add framework_mode validator for PHP framework modes - Update boolean pattern to include 'push' Phase 2C - Specialized Validators: - Add json_format validator for JSON syntax validation - Add cache_config validator for Docker BuildKit cache configs Improvements: - All validators include comprehensive security checks - Pattern-based validation with clear error messages - 23 new test methods with edge case coverage - Update special case mappings for 20+ inputs - Fix build-args mapping test expectation Coverage impact: 22 actions now at 100% validation (88% → 92%) Test suite: 762 → 785 tests (+23 tests, all passing) * chore: regenerate rules.yml with improved validator coverage Regenerate validation rules for all actions with new validators: - compress-images: 86% → 100% (+1 input: ignore-paths) - docker-build: 63% → 100% (+4 inputs: cache configs, platform-build-args) - docker-publish: 73% → 100% (+1 input: build-args) - language-version-detect: 67% → 100% (+1 input: language) - php-tests: 89% (fixed framework→framework_mode mapping) - prettier-lint: 86% → 100% (+2 inputs: file-pattern, plugins) - security-scan: 86% (maintained coverage) Overall: 23 of 25 actions now at 100% validation coverage (92%) * fix: address PR #377 review comments - Add | None type annotations to 6 optional parameters (PEP 604) - Standardize injection pattern: remove @# from comma_separated_list validator (@ and # are not shell injection vectors, allows npm scoped packages) - Remove dead code: unused value expression in key_value_list validator - Update tests to reflect injection pattern changes
400 lines
16 KiB
YAML
400 lines
16 KiB
YAML
---
|
|
# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json
|
|
name: PR Security Analysis
|
|
|
|
on:
|
|
pull_request:
|
|
paths:
|
|
- '**/package.json'
|
|
- '**/package-lock.json'
|
|
- '**/yarn.lock'
|
|
- '**/pnpm-lock.yaml'
|
|
- '**/requirements.txt'
|
|
- '**/Dockerfile'
|
|
- '**/*.py'
|
|
- '**/*.js'
|
|
- '**/*.ts'
|
|
- '**/*.yml'
|
|
- '**/*.yaml'
|
|
- '.github/workflows/**'
|
|
|
|
permissions:
|
|
contents: read
|
|
pull-requests: write
|
|
issues: write
|
|
actions: read
|
|
|
|
concurrency:
|
|
group: ${{ github.workflow }}-${{ github.event.pull_request.number }}
|
|
cancel-in-progress: true
|
|
|
|
jobs:
|
|
security-analysis:
|
|
name: Security Analysis
|
|
runs-on: ubuntu-latest
|
|
|
|
steps:
|
|
- name: Checkout PR
|
|
uses: actions/checkout@71cf2267d89c5cb81562390fa70a37fa40b1305e # v6-beta
|
|
with:
|
|
fetch-depth: 0
|
|
repository: ${{ github.event.pull_request.head.repo.full_name }}
|
|
ref: ${{ github.event.pull_request.head.sha }}
|
|
|
|
- name: Fetch PR Base
|
|
run: |
|
|
set -eu
|
|
# Fetch the base ref from base repository with authentication (works for private repos and forked PRs)
|
|
# Using ref instead of SHA because git fetch requires ref names, not raw commit IDs
|
|
# Use authenticated URL to avoid 403/404 on private repositories
|
|
git fetch --no-tags --depth=1 \
|
|
"https://x-access-token:${{ github.token }}@github.com/${{ github.event.pull_request.base.repo.full_name }}" \
|
|
${{ github.event.pull_request.base.ref }}:refs/remotes/origin-base/${{ github.event.pull_request.base.ref }}
|
|
# Record the base commit for diffing without checking it out
|
|
# Keep PR head checked out so scanners analyze the new changes
|
|
BASE_REF="refs/remotes/origin-base/${{ github.event.pull_request.base.ref }}"
|
|
echo "BASE_REF=${BASE_REF}" >> "$GITHUB_ENV"
|
|
echo "Base ref: ${BASE_REF}"
|
|
git log -1 --oneline "${BASE_REF}"
|
|
|
|
- name: OWASP Dependency Check
|
|
# Only run on pull_request, not pull_request_target to prevent executing
|
|
# untrusted third-party actions against PR head from forks
|
|
if: github.event_name == 'pull_request'
|
|
uses: dependency-check/Dependency-Check_Action@3102a65fd5f36d0000297576acc56a475b0de98d # main
|
|
with:
|
|
project: 'PR Security Analysis'
|
|
path: '.'
|
|
format: 'JSON'
|
|
out: 'reports'
|
|
args: >
|
|
--enableRetired --enableExperimental --failOnCVSS 0
|
|
continue-on-error: true
|
|
|
|
- name: Semgrep Static Analysis
|
|
uses: semgrep/semgrep-action@713efdd345f3035192eaa63f56867b88e63e4e5d # v1
|
|
with:
|
|
config: 'auto'
|
|
generateSarif: 'true'
|
|
env:
|
|
SEMGREP_APP_TOKEN: ${{ github.event_name != 'pull_request_target' && secrets.SEMGREP_APP_TOKEN || '' }}
|
|
continue-on-error: true
|
|
|
|
- name: TruffleHog Secret Scan
|
|
uses: trufflesecurity/trufflehog@0f58ae7c5036094a1e3e750d18772af92821b503
|
|
with:
|
|
path: ./
|
|
base: ${{ env.BASE_REF }}
|
|
head: HEAD
|
|
extra_args: --debug --only-verified --json --output /tmp/trufflehog_output.json
|
|
continue-on-error: true
|
|
|
|
- name: Analyze Security Results
|
|
id: analyze
|
|
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
|
|
with:
|
|
script: |-
|
|
const fs = require('fs');
|
|
const path = require('path');
|
|
|
|
// Unique marker to identify our bot comment
|
|
const SECURITY_COMMENT_MARKER = '<!-- security-analysis-bot-comment -->';
|
|
|
|
const findings = {
|
|
permissions: [],
|
|
actions: [],
|
|
secrets: [],
|
|
vulnerabilities: [],
|
|
dependencies: []
|
|
};
|
|
|
|
// Analyze GitHub Actions permission changes
|
|
const { execSync } = require('child_process');
|
|
const baseRef = process.env.BASE_REF;
|
|
try {
|
|
const changedWorkflows = execSync(
|
|
`git diff --name-only ${baseRef}...HEAD | grep -E "\.github/workflows/.*\.ya?ml$" || true`,
|
|
{ encoding: 'utf8' }
|
|
).trim().split('\n').filter(Boolean);
|
|
|
|
for (const workflow of changedWorkflows) {
|
|
if (!workflow) continue;
|
|
|
|
try {
|
|
const oldContent = execSync(`git show ${baseRef}:${workflow}`, { encoding: 'utf8' });
|
|
const newContent = fs.readFileSync(workflow, 'utf8');
|
|
|
|
// Simple permission extraction (could be enhanced with YAML parsing)
|
|
const oldPerms = oldContent.match(/permissions:\s*\n([\s\S]*?)(?=\n\w|\n$|$)/);
|
|
const newPerms = newContent.match(/permissions:\s*\n([\s\S]*?)(?=\n\w|\n$|$)/);
|
|
|
|
if (oldPerms?.[1] !== newPerms?.[1]) {
|
|
findings.permissions.push({
|
|
file: workflow,
|
|
old: oldPerms?.[1]?.trim() || 'None',
|
|
new: newPerms?.[1]?.trim() || 'None'
|
|
});
|
|
}
|
|
|
|
// Check for new actions
|
|
const oldActions = [...oldContent.matchAll(/uses:\s*([^\s\n]+)/g)].map(m => m[1]);
|
|
const newActions = [...newContent.matchAll(/uses:\s*([^\s\n]+)/g)].map(m => m[1]);
|
|
const addedActions = newActions.filter(action => !oldActions.includes(action));
|
|
|
|
if (addedActions.length > 0) {
|
|
findings.actions.push({
|
|
file: workflow,
|
|
added: addedActions
|
|
});
|
|
}
|
|
} catch (error) {
|
|
console.log(`Could not analyze ${workflow}: ${error.message}`);
|
|
}
|
|
}
|
|
} catch (error) {
|
|
console.log('No workflow changes detected');
|
|
}
|
|
|
|
// Parse OWASP Dependency Check results
|
|
try {
|
|
const owaspResults = JSON.parse(fs.readFileSync('reports/dependency-check-report.json', 'utf8'));
|
|
if (owaspResults.dependencies) {
|
|
owaspResults.dependencies.forEach(dep => {
|
|
if (dep.vulnerabilities && dep.vulnerabilities.length > 0) {
|
|
dep.vulnerabilities.forEach(vuln => {
|
|
findings.dependencies.push({
|
|
file: dep.fileName || 'Unknown',
|
|
cve: vuln.name,
|
|
severity: vuln.severity || 'Unknown',
|
|
description: vuln.description || 'No description'
|
|
});
|
|
});
|
|
}
|
|
});
|
|
}
|
|
} catch (error) {
|
|
console.log('No OWASP results found');
|
|
}
|
|
|
|
// Parse Semgrep SARIF results
|
|
try {
|
|
if (fs.existsSync('semgrep.sarif')) {
|
|
const sarifContent = JSON.parse(fs.readFileSync('semgrep.sarif', 'utf8'));
|
|
if (sarifContent.runs && sarifContent.runs[0] && sarifContent.runs[0].results) {
|
|
const run = sarifContent.runs[0];
|
|
const rules = run.tool?.driver?.rules || [];
|
|
run.results.forEach(result => {
|
|
const rule = rules.find(r => r.id === result.ruleId);
|
|
findings.vulnerabilities.push({
|
|
file: result.locations?.[0]?.physicalLocation?.artifactLocation?.uri || 'Unknown',
|
|
line: result.locations?.[0]?.physicalLocation?.region?.startLine || 0,
|
|
rule: result.ruleId,
|
|
severity: result.level?.toUpperCase() || 'INFO',
|
|
message: result.message?.text || rule?.shortDescription?.text || 'No description'
|
|
});
|
|
});
|
|
}
|
|
}
|
|
} catch (error) {
|
|
console.log('Semgrep SARIF parsing completed');
|
|
}
|
|
|
|
// Parse TruffleHog results (NDJSON format - one JSON object per line)
|
|
try {
|
|
const truffleOutput = execSync('cat /tmp/trufflehog_output.json || echo ""', { encoding: 'utf8' });
|
|
const truffleLines = truffleOutput.trim().split('\n').filter(line => line.length > 0);
|
|
|
|
truffleLines.forEach((line, index) => {
|
|
try {
|
|
const result = JSON.parse(line);
|
|
findings.secrets.push({
|
|
file: result.SourceMetadata?.Data?.Filesystem?.file || 'Unknown',
|
|
line: result.SourceMetadata?.Data?.Filesystem?.line || 0,
|
|
detector: result.DetectorName,
|
|
verified: result.Verified || false
|
|
});
|
|
} catch (parseError) {
|
|
// Log only safe metadata to avoid leaking secrets
|
|
console.log('Failed to parse TruffleHog line at index', index, '- Error:', parseError.message, '(line length:', line.length, 'chars)');
|
|
}
|
|
});
|
|
|
|
if (truffleLines.length === 0) {
|
|
console.log('No secrets detected');
|
|
}
|
|
} catch (error) {
|
|
console.log('No TruffleHog output file found');
|
|
}
|
|
|
|
// Generate clean comment sections
|
|
const sections = [];
|
|
|
|
// GitHub Actions Permissions Changes
|
|
if (findings.permissions.length > 0) {
|
|
const permSection = ['## 🔐 GitHub Actions Permissions Changes'];
|
|
findings.permissions.forEach(change => {
|
|
permSection.push(`\n**${change.file}**:`);
|
|
|
|
// Parse permissions into lines
|
|
const oldLines = (change.old === 'None' ? [] : change.old.split('\n').map(l => l.trim()).filter(Boolean));
|
|
const newLines = (change.new === 'None' ? [] : change.new.split('\n').map(l => l.trim()).filter(Boolean));
|
|
|
|
// Create sets for comparison
|
|
const oldSet = new Set(oldLines);
|
|
const newSet = new Set(newLines);
|
|
|
|
// Find added, removed, and unchanged
|
|
const removed = oldLines.filter(l => !newSet.has(l));
|
|
const added = newLines.filter(l => !oldSet.has(l));
|
|
const unchanged = oldLines.filter(l => newSet.has(l));
|
|
|
|
// Only show diff if there are actual changes
|
|
if (removed.length > 0 || added.length > 0) {
|
|
permSection.push('```diff');
|
|
|
|
// Show removed permissions
|
|
removed.forEach(line => permSection.push(`- ${line}`));
|
|
|
|
// Show added permissions
|
|
added.forEach(line => permSection.push(`+ ${line}`));
|
|
|
|
permSection.push('```');
|
|
|
|
// Summary for context
|
|
if (unchanged.length > 0 && unchanged.length <= 3) {
|
|
permSection.push(`<details><summary>Unchanged (${unchanged.length})</summary>\n\n${unchanged.map(l => `- ${l}`).join('\n')}\n</details>`);
|
|
} else if (unchanged.length > 3) {
|
|
permSection.push(`<sub>*${unchanged.length} permissions unchanged*</sub>`);
|
|
}
|
|
}
|
|
});
|
|
sections.push(permSection.join('\n'));
|
|
}
|
|
|
|
// New/Changed Actions
|
|
if (findings.actions.length > 0) {
|
|
const actionSection = ['## 🎯 New GitHub Actions'];
|
|
findings.actions.forEach(change => {
|
|
actionSection.push(`**${change.file}**:`);
|
|
change.added.forEach(action => {
|
|
actionSection.push(`- \`${action}\``);
|
|
});
|
|
});
|
|
sections.push(actionSection.join('\n'));
|
|
}
|
|
|
|
// Secrets Detected
|
|
if (findings.secrets.length > 0) {
|
|
const secretSection = ['## 🔑 Secrets Detected'];
|
|
findings.secrets.forEach(secret => {
|
|
const verified = secret.verified ? '🚨 **VERIFIED**' : '⚠️ Potential';
|
|
secretSection.push(`- ${verified} ${secret.detector} in \`${secret.file}:${secret.line}\``);
|
|
});
|
|
sections.push(secretSection.join('\n'));
|
|
}
|
|
|
|
// Security Vulnerabilities
|
|
if (findings.vulnerabilities.length > 0) {
|
|
const vulnSection = ['## ⚠️ Security Vulnerabilities'];
|
|
const groupedBySeverity = findings.vulnerabilities.reduce((acc, vuln) => {
|
|
const sev = vuln.severity.toUpperCase();
|
|
if (!acc[sev]) acc[sev] = [];
|
|
acc[sev].push(vuln);
|
|
return acc;
|
|
}, {});
|
|
|
|
['ERROR', 'WARNING', 'INFO'].forEach(severity => {
|
|
if (groupedBySeverity[severity]) {
|
|
vulnSection.push(`\n**${severity} Severity:**`);
|
|
groupedBySeverity[severity].forEach(vuln => {
|
|
vulnSection.push(`- \`${vuln.file}:${vuln.line}\` - ${vuln.message}`);
|
|
vulnSection.push(` - Rule: \`${vuln.rule}\``);
|
|
});
|
|
}
|
|
});
|
|
sections.push(vulnSection.join('\n'));
|
|
}
|
|
|
|
// Dependency Issues
|
|
if (findings.dependencies.length > 0) {
|
|
const depSection = ['## 📦 Dependency Vulnerabilities'];
|
|
const groupedBySeverity = findings.dependencies.reduce((acc, dep) => {
|
|
const sev = dep.severity.toUpperCase();
|
|
if (!acc[sev]) acc[sev] = [];
|
|
acc[sev].push(dep);
|
|
return acc;
|
|
}, {});
|
|
|
|
['CRITICAL', 'HIGH', 'MEDIUM', 'LOW'].forEach(severity => {
|
|
if (groupedBySeverity[severity]) {
|
|
depSection.push(`\n**${severity} Severity:**`);
|
|
groupedBySeverity[severity].forEach(dep => {
|
|
depSection.push(`- **${dep.cve}** in \`${dep.file}\``);
|
|
depSection.push(` - ${dep.description.substring(0, 100)}...`);
|
|
});
|
|
}
|
|
});
|
|
sections.push(depSection.join('\n'));
|
|
}
|
|
|
|
// Count critical issues for output
|
|
const criticalCount =
|
|
findings.secrets.filter(s => s.verified).length +
|
|
(findings.vulnerabilities.filter(v => v.severity.toUpperCase() === 'ERROR').length || 0) +
|
|
(findings.dependencies.filter(d => d.severity.toUpperCase() === 'CRITICAL').length || 0);
|
|
|
|
// Export critical count as output
|
|
core.setOutput('critical_issues', criticalCount.toString());
|
|
|
|
// Generate final comment with unique marker
|
|
let comment = `${SECURITY_COMMENT_MARKER}\n## ✅ Security Analysis\n\n`;
|
|
if (sections.length === 0) {
|
|
comment += 'No security issues detected in this PR.';
|
|
} else {
|
|
comment += sections.join('\n\n');
|
|
}
|
|
|
|
// Find existing security comment using unique marker
|
|
const { data: comments } = await github.rest.issues.listComments({
|
|
owner: context.repo.owner,
|
|
repo: context.repo.repo,
|
|
issue_number: context.issue.number
|
|
});
|
|
|
|
const existingComment = comments.find(comment =>
|
|
comment.body && comment.body.includes(SECURITY_COMMENT_MARKER)
|
|
);
|
|
|
|
if (existingComment) {
|
|
// Update existing comment
|
|
await github.rest.issues.updateComment({
|
|
owner: context.repo.owner,
|
|
repo: context.repo.repo,
|
|
comment_id: existingComment.id,
|
|
body: comment
|
|
});
|
|
} else {
|
|
// Create new comment
|
|
await github.rest.issues.createComment({
|
|
owner: context.repo.owner,
|
|
repo: context.repo.repo,
|
|
issue_number: context.issue.number,
|
|
body: comment
|
|
});
|
|
}
|
|
|
|
- name: Check Critical Issues
|
|
if: always()
|
|
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
|
|
env:
|
|
CRITICAL_COUNT: ${{ steps.analyze.outputs.critical_issues || '0' }}
|
|
with:
|
|
script: |-
|
|
const criticalCount = parseInt(process.env.CRITICAL_COUNT || '0', 10);
|
|
|
|
if (criticalCount > 0) {
|
|
core.setFailed(`Found ${criticalCount} critical security issue(s). Please review and address them before merging.`);
|
|
} else {
|
|
console.log('No critical security issues found.');
|
|
}
|