mirror of
https://github.com/ivuorinen/actions.git
synced 2026-01-26 11:34:00 +00:00
Compare commits
112 Commits
25.9.15
...
v2025.12.0
| Author | SHA1 | Date | |
|---|---|---|---|
| 44a11e9773 | |||
|
|
a52399cf74 | ||
|
|
803165db8f | ||
|
|
d69ed9e999 | ||
|
|
8eea6f781b | ||
|
|
4889586a94 | ||
|
|
e02ca4d843 | ||
|
|
13ef0db9ba | ||
|
|
c366e99ee3 | ||
| fbbb487332 | |||
| abe24f8570 | |||
| 9aa16a8164 | |||
| e58465e5d3 | |||
| 9fe05efeec | |||
| 449669120c | |||
|
|
d9098ddead | ||
| f37d940c72 | |||
|
|
eea547998d | ||
|
|
49159fc895 | ||
|
|
89fd0f3627 | ||
|
|
83cf08ff76 | ||
|
|
90ab7c645c | ||
|
|
d05e898ea9 | ||
|
|
650ebb87b8 | ||
|
|
13316bd827 | ||
|
|
350fd30043 | ||
|
|
587853a9cd | ||
|
|
6cde6d088d | ||
| 5cc7373a22 | |||
|
|
8fb52522ab | ||
|
|
bcf49f55b5 | ||
|
|
060afb8871 | ||
|
|
a0ebb00853 | ||
|
|
227cf7f56f | ||
|
|
e28c56c7cf | ||
|
|
504debcb8d | ||
|
|
ed438428b2 | ||
| a88bb34369 | |||
| ab371bdebf | |||
|
|
842e6c1878 | ||
|
|
cea720416b | ||
|
|
2b1c797263 | ||
| 681e0f828a | |||
|
|
4e3e2a559e | ||
|
|
80f0e018cd | ||
|
|
d0687ee76e | ||
|
|
fd3c871d7d | ||
|
|
7de94a65a6 | ||
|
|
8112d86ab7 | ||
|
|
22ca79df3c | ||
|
|
953659172d | ||
| 5c5f1c3d54 | |||
|
|
8599e8913f | ||
|
|
a261fcd118 | ||
| a1c0435c22 | |||
| 2f1c73dd8b | |||
| fd49ff6968 | |||
|
|
82edd1dc12 | ||
| 63a18808a0 | |||
|
|
8527166fbb | ||
| fb5a978260 | |||
|
|
ca7fc1a5ff | ||
| 42a40cfaf1 | |||
| b06748cbef | |||
| cbbb0c8b8c | |||
|
|
1a8997715c | ||
|
|
f50ab425b8 | ||
|
|
41b1778849 | ||
|
|
bbb05559e6 | ||
|
|
7c18e12b06 | ||
|
|
88053f4197 | ||
|
|
ee9a4877e8 | ||
|
|
c32f2813f0 | ||
|
|
e416c272b5 | ||
| 74968d942f | |||
| e2222afff1 | |||
|
|
81f54fda92 | ||
| a09e59aa7c | |||
| 2d8ff47548 | |||
|
|
a3fb0bd8db | ||
|
|
42312cdbe4 | ||
|
|
222a2fa571 | ||
| 6ebc5a21d5 | |||
|
|
020a8fd26c | ||
| 7061aafd35 | |||
|
|
d3c2de1bd1 | ||
|
|
f48f914224 | ||
|
|
5f14fd7ed3 | ||
|
|
277d5edf5c | ||
| 57cbd83dc6 | |||
| 33631ad911 | |||
| 78fdad69e5 | |||
|
|
d3cc8d4790 | ||
|
|
dc895c40ff | ||
|
|
0b6f65379c | ||
|
|
0a78a1131a | ||
|
|
7314e5ae00 | ||
|
|
9df3b0bff7 | ||
|
|
0a227e6673 | ||
|
|
da961c5cf7 | ||
|
|
646169c13f | ||
|
|
e47a7c4077 | ||
|
|
8b4edff06b | ||
|
|
240334baad | ||
|
|
db9915d73f | ||
|
|
27df3acbcf | ||
|
|
1e4637971d | ||
|
|
4a3c30cceb | ||
|
|
4b6870953c | ||
|
|
55bc98d6df | ||
|
|
cb3ac94b35 | ||
|
|
5c468117d8 |
18
.coderabbit.yaml
Normal file
18
.coderabbit.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
---
|
||||
# yaml-language-server: $schema=https://www.coderabbit.ai/integrations/schema.v2.json
|
||||
remote_config:
|
||||
url: 'https://raw.githubusercontent.com/ivuorinen/coderabbit/1985ff756ef62faf7baad0c884719339ffb652bd/coderabbit.yaml'
|
||||
path_instructions:
|
||||
- path: '.serena/**/*'
|
||||
instructions: >-
|
||||
- These are files for Serena LLM. Do not review them.
|
||||
- path: '**/*/README.md'
|
||||
instructions: >-
|
||||
- README.md files next to action.yml files are autogenerated
|
||||
and should not be reviewed.
|
||||
- README.md file in the root of the repository should be reviewed.
|
||||
- README.md files for actions use `@main` version for the action as an illustration.
|
||||
Do not review them.
|
||||
- path: '**/*.md'
|
||||
instructions: >-
|
||||
- The repository uses CalVer for versioning. Do not review version numbers in the documentation.
|
||||
@@ -10,6 +10,11 @@ max_line_length = 200
|
||||
tab_width = 2
|
||||
trim_trailing_whitespace = true
|
||||
|
||||
[*.md]
|
||||
max_line_length = 120
|
||||
trim_trailing_whitespace = false
|
||||
[*.py]
|
||||
indent_size = 4
|
||||
|
||||
[Makefile]
|
||||
indent_style = tab
|
||||
|
||||
[{**/*.spec.sh}]
|
||||
ignore = true
|
||||
|
||||
42
.github/CODE_OF_CONDUCT.md
vendored
42
.github/CODE_OF_CONDUCT.md
vendored
@@ -33,15 +33,15 @@ fullest extent, we want to know.
|
||||
|
||||
The following behaviors are expected and requested of all community members:
|
||||
|
||||
* Participate in an authentic and active way. In doing so, you contribute to the
|
||||
- Participate in an authentic and active way. In doing so, you contribute to the
|
||||
health and longevity of this community.
|
||||
* Exercise consideration and respect in your speech and actions.
|
||||
* Attempt collaboration before conflict.
|
||||
* Refrain from demeaning, discriminatory, or harassing behavior and speech.
|
||||
* Be mindful of your surroundings and of your fellow participants. Alert
|
||||
- Exercise consideration and respect in your speech and actions.
|
||||
- Attempt collaboration before conflict.
|
||||
- Refrain from demeaning, discriminatory, or harassing behavior and speech.
|
||||
- Be mindful of your surroundings and of your fellow participants. Alert
|
||||
community leaders if you notice a dangerous situation, someone in distress, or
|
||||
violations of this Code of Conduct, even if they seem inconsequential.
|
||||
* Remember that community event venues may be shared with members of the public;
|
||||
- Remember that community event venues may be shared with members of the public;
|
||||
please be respectful to all patrons of these locations.
|
||||
|
||||
## 4. Unacceptable Behavior
|
||||
@@ -49,23 +49,23 @@ The following behaviors are expected and requested of all community members:
|
||||
The following behaviors are considered harassment and are unacceptable within
|
||||
our community:
|
||||
|
||||
* Violence, threats of violence or violent language directed against another
|
||||
- Violence, threats of violence or violent language directed against another
|
||||
person.
|
||||
* Sexist, racist, homophobic, transphobic, ableist or otherwise discriminatory
|
||||
- Sexist, racist, homophobic, transphobic, ableist or otherwise discriminatory
|
||||
jokes and language.
|
||||
* Posting or displaying sexually explicit or violent material.
|
||||
* Posting or threatening to post other people's personally identifying
|
||||
- Posting or displaying sexually explicit or violent material.
|
||||
- Posting or threatening to post other people's personally identifying
|
||||
information ("doxing").
|
||||
* Personal insults, particularly those related to gender, sexual orientation,
|
||||
- Personal insults, particularly those related to gender, sexual orientation,
|
||||
race, religion, or disability.
|
||||
* Inappropriate photography or recording.
|
||||
* Inappropriate physical contact. You should have someone's consent before
|
||||
- Inappropriate photography or recording.
|
||||
- Inappropriate physical contact. You should have someone's consent before
|
||||
touching them.
|
||||
* Unwelcome sexual attention. This includes, sexualized comments or jokes;
|
||||
- Unwelcome sexual attention. This includes, sexualized comments or jokes;
|
||||
inappropriate touching, groping, and unwelcomed sexual advances.
|
||||
* Deliberate intimidation, stalking or following (online or in person).
|
||||
* Advocating for, or encouraging, any of the above behavior.
|
||||
* Sustained disruption of community events, including talks and presentations.
|
||||
- Deliberate intimidation, stalking or following (online or in person).
|
||||
- Advocating for, or encouraging, any of the above behavior.
|
||||
- Sustained disruption of community events, including talks and presentations.
|
||||
|
||||
## 5. Weapons Policy
|
||||
|
||||
@@ -133,10 +133,10 @@ under a [Creative Commons Attribution-ShareAlike license][cc-by-sa].
|
||||
Portions of text derived from the [Django Code of Conduct][django] and
|
||||
the [Geek Feminism Anti-Harassment Policy][geek-feminism].
|
||||
|
||||
* _Revision 2.3. Posted 6 March 2017._
|
||||
* _Revision 2.2. Posted 4 February 2016._
|
||||
* _Revision 2.1. Posted 23 June 2014._
|
||||
* _Revision 2.0, adopted by the [Stumptown Syndicate][stumptown] board on 10
|
||||
- _Revision 2.3. Posted 6 March 2017._
|
||||
- _Revision 2.2. Posted 4 February 2016._
|
||||
- _Revision 2.1. Posted 23 June 2014._
|
||||
- _Revision 2.0, adopted by the [Stumptown Syndicate][stumptown] board on 10
|
||||
January 2013. Posted 17 March 2013._
|
||||
|
||||
[stumptown]: https://github.com/stumpsyn
|
||||
|
||||
1
.github/ISSUE_TEMPLATE/bug_report.md
vendored
1
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -4,7 +4,6 @@ about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: bug
|
||||
assignees: ivuorinen
|
||||
|
||||
---
|
||||
|
||||
**Describe the bug**
|
||||
|
||||
1
.github/ISSUE_TEMPLATE/feature_request.md
vendored
1
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@@ -4,7 +4,6 @@ about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: enhancement
|
||||
assignees: ivuorinen
|
||||
|
||||
---
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
|
||||
76
.github/SECURITY.md
vendored
76
.github/SECURITY.md
vendored
@@ -3,7 +3,7 @@
|
||||
## Supported Versions
|
||||
|
||||
| Version | Supported |
|
||||
|---------| ------------------ |
|
||||
| ------- | ------------------ |
|
||||
| main | :white_check_mark: |
|
||||
|
||||
## Reporting a Vulnerability
|
||||
@@ -23,15 +23,13 @@ We will respond within 48 hours and work on a fix if validated.
|
||||
This repository implements:
|
||||
|
||||
- CodeQL scanning
|
||||
- OWASP Dependency Check
|
||||
- Snyk vulnerability scanning
|
||||
- Semgrep static analysis
|
||||
- Gitleaks secret scanning
|
||||
- Trivy vulnerability scanner
|
||||
- Dependency Review
|
||||
- MegaLinter code analysis
|
||||
- Regular security updates
|
||||
- Automated fix PRs
|
||||
- Daily security scans
|
||||
- Weekly metrics collection
|
||||
- Continuous security scanning on PRs
|
||||
|
||||
## Security Best Practices
|
||||
|
||||
@@ -46,39 +44,67 @@ When using these actions:
|
||||
|
||||
## Required Secrets
|
||||
|
||||
The following secrets should be configured in your repository:
|
||||
> **Note**: `GITHUB_TOKEN` is automatically provided by GitHub Actions and does
|
||||
> not require manual repository secret configuration.
|
||||
|
||||
| Secret Name | Description | Required |
|
||||
|-------------|-------------|----------|
|
||||
| `SNYK_TOKEN` | Token for Snyk vulnerability scanning | Optional |
|
||||
| `GITLEAKS_LICENSE` | License for Gitleaks scanning | Optional |
|
||||
| `SLACK_WEBHOOK` | Webhook URL for Slack notifications | Optional |
|
||||
| `SONAR_TOKEN` | Token for SonarCloud analysis | Optional |
|
||||
| `FIXIMUS_TOKEN` | Token for automated fixes | Optional |
|
||||
The following table shows available secrets (auto-provisioned secrets are provided by
|
||||
GitHub, optional secrets require manual repository configuration):
|
||||
|
||||
| Secret Name | Description | Requirement |
|
||||
| ------------------- | ----------------------------------------------------------------- | ----------- |
|
||||
| `GITHUB_TOKEN` | GitHub token for workflow authentication (automatically provided) | Auto |
|
||||
| `GITLEAKS_LICENSE` | License for Gitleaks scanning | Optional |
|
||||
| `FIXIMUS_TOKEN` | Enhanced token for automated fix PRs | Optional |
|
||||
| `SEMGREP_APP_TOKEN` | Token for Semgrep static analysis | Optional |
|
||||
|
||||
## Security Workflows
|
||||
|
||||
This repository includes several security-focused workflows:
|
||||
|
||||
1. **Daily Security Checks** (`security.yml`)
|
||||
- Runs comprehensive security scans
|
||||
1. **PR Security Analysis** (`security-suite.yml`)
|
||||
- Comprehensive security scanning on pull requests
|
||||
- Semgrep static analysis
|
||||
- Dependency vulnerability checks
|
||||
- Creates automated fix PRs
|
||||
- Generates security reports
|
||||
|
||||
2. **Action Security** (`action-security.yml`)
|
||||
- Validates GitHub Action files
|
||||
- Checks for hardcoded credentials
|
||||
- Scans for vulnerabilities
|
||||
- Gitleaks secret scanning
|
||||
- Scans for vulnerabilities in action definitions
|
||||
|
||||
3. **CodeQL Analysis** (`codeql.yml`)
|
||||
3. **CodeQL Analysis** (`codeql.yml` and `codeql-new.yml`)
|
||||
- Analyzes code for security issues
|
||||
- Runs on multiple languages
|
||||
- Weekly scheduled scans
|
||||
- Runs on multiple languages (Python, JavaScript/TypeScript)
|
||||
- Automated on pushes and pull requests
|
||||
- SARIF report generation
|
||||
|
||||
4. **Security Metrics** (`security-metrics.yml`)
|
||||
- Collects security metrics
|
||||
- Generates trend reports
|
||||
- Weekly analysis
|
||||
4. **Dependency Review** (`dependency-review.yml`)
|
||||
- Reviews dependency changes in pull requests
|
||||
- Checks for known vulnerabilities
|
||||
- License compliance validation
|
||||
- Fails PRs with critical vulnerabilities (gated by branch protection)
|
||||
|
||||
How to enforce gating
|
||||
- Update .github/workflows/dependency-review.yml: add the `fail-on-severity: critical`
|
||||
input to the Dependency Review step. Example:
|
||||
|
||||
```yaml
|
||||
- name: Dependency Review
|
||||
uses: github/dependency-review-action@v3
|
||||
with:
|
||||
fail-on-severity: critical
|
||||
```
|
||||
|
||||
- Require the Dependency Review workflow in branch protection:
|
||||
- Go to Repository → Settings → Branches → Branch protection rules → Edit (or create)
|
||||
rule for your protected branch.
|
||||
- Under "Require status checks to pass before merging", add the exact status check
|
||||
name shown in PR checks (e.g., "Dependency Review") and save.
|
||||
- Verify: open a test PR with a simulated critical vulnerability or run the workflow
|
||||
to confirm it fails and the branch protection blocks merging until the check is green.
|
||||
- Optional: If you manage protections via config or API, add the workflow status
|
||||
check name to your protection rule programmatically.
|
||||
|
||||
## Security Reports
|
||||
|
||||
|
||||
169
.github/actions/setup-test-environment/action.yml
vendored
Normal file
169
.github/actions/setup-test-environment/action.yml
vendored
Normal file
@@ -0,0 +1,169 @@
|
||||
---
|
||||
# yaml-language-server: $schema=https://json.schemastore.org/github-action.json
|
||||
name: Setup Test Environment
|
||||
description: Common setup for test jobs (Python, Node, system tools, ShellSpec)
|
||||
|
||||
inputs:
|
||||
install-act:
|
||||
description: Whether to install act for integration tests
|
||||
required: false
|
||||
default: 'false'
|
||||
install-kcov:
|
||||
description: Whether to build and install kcov from source for coverage (v42)
|
||||
required: false
|
||||
default: 'false'
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@1e862dfacbd1d6d858c55d9b792c756523627244 # v7.1.4
|
||||
with:
|
||||
enable-cache: true
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
with:
|
||||
python-version-file: pyproject.toml
|
||||
|
||||
- name: Install Python dependencies
|
||||
shell: bash
|
||||
run: uv sync --frozen
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0
|
||||
with:
|
||||
node-version: '24'
|
||||
cache: npm
|
||||
|
||||
- name: Install Node dependencies
|
||||
shell: bash
|
||||
run: npm ci
|
||||
|
||||
- name: Install system tools
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y --no-install-recommends jq shellcheck
|
||||
|
||||
- name: Install kcov from source
|
||||
if: inputs.install-kcov == 'true'
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Installing kcov build dependencies..."
|
||||
sudo apt-get install -y --no-install-recommends \
|
||||
cmake \
|
||||
libcurl4-openssl-dev \
|
||||
libdw-dev \
|
||||
libelf-dev \
|
||||
libiberty-dev \
|
||||
pkg-config \
|
||||
zlib1g-dev
|
||||
|
||||
echo "Building kcov from source..."
|
||||
cd /tmp
|
||||
git clone --depth 1 --branch v42 https://github.com/SimonKagstrom/kcov.git
|
||||
cd kcov
|
||||
mkdir build
|
||||
cd build
|
||||
cmake ..
|
||||
make
|
||||
sudo make install
|
||||
cd /
|
||||
rm -rf /tmp/kcov
|
||||
|
||||
echo "Verifying kcov installation..."
|
||||
kcov --version
|
||||
|
||||
- name: Install ShellSpec
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
# Pin to specific version to avoid supply-chain risks
|
||||
SHELLSPEC_VERSION="0.28.1"
|
||||
SHELLSPEC_URL="https://github.com/shellspec/shellspec/archive/refs/tags/${SHELLSPEC_VERSION}.tar.gz"
|
||||
# Pinned SHA-256 checksum for ShellSpec 0.28.1
|
||||
# Source: https://github.com/shellspec/shellspec/archive/refs/tags/0.28.1.tar.gz
|
||||
EXPECTED_CHECKSUM="400d835466429a5fe6c77a62775a9173729d61dd43e05dfa893e8cf6cb511783"
|
||||
|
||||
echo "Downloading ShellSpec ${SHELLSPEC_VERSION}..."
|
||||
curl -fsSL "${SHELLSPEC_URL}" -o "/tmp/shellspec.tar.gz"
|
||||
|
||||
echo "Verifying checksum..."
|
||||
ACTUAL_CHECKSUM="$(sha256sum /tmp/shellspec.tar.gz | awk '{print $1}')"
|
||||
if [[ "${ACTUAL_CHECKSUM}" != "${EXPECTED_CHECKSUM}" ]]; then
|
||||
echo "Error: Checksum mismatch for ShellSpec ${SHELLSPEC_VERSION}" >&2
|
||||
echo "Expected: ${EXPECTED_CHECKSUM}" >&2
|
||||
echo "Got: ${ACTUAL_CHECKSUM}" >&2
|
||||
rm -f /tmp/shellspec.tar.gz
|
||||
exit 1
|
||||
fi
|
||||
echo "Checksum verified successfully"
|
||||
|
||||
echo "Installing ShellSpec..."
|
||||
mkdir -p ~/.local/lib
|
||||
tar -xzf /tmp/shellspec.tar.gz -C ~/.local/lib
|
||||
mv ~/.local/lib/shellspec-${SHELLSPEC_VERSION} ~/.local/lib/shellspec
|
||||
rm /tmp/shellspec.tar.gz
|
||||
|
||||
sudo ln -s ~/.local/lib/shellspec/shellspec /usr/local/bin/shellspec
|
||||
|
||||
- name: Install act
|
||||
if: inputs.install-act == 'true'
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
# Pin to specific version to avoid supply-chain risks
|
||||
ACT_VERSION="0.2.82"
|
||||
ACT_ARCH="Linux_x86_64"
|
||||
ACT_TARBALL="act_${ACT_ARCH}.tar.gz"
|
||||
ACT_URL="https://github.com/nektos/act/releases/download/v${ACT_VERSION}/${ACT_TARBALL}"
|
||||
ACT_CHECKSUM_URL="https://github.com/nektos/act/releases/download/v${ACT_VERSION}/checksums.txt"
|
||||
|
||||
echo "Downloading act v${ACT_VERSION}..."
|
||||
curl -fsSL "${ACT_URL}" -o "/tmp/${ACT_TARBALL}"
|
||||
|
||||
echo "Downloading checksums..."
|
||||
curl -fsSL "${ACT_CHECKSUM_URL}" -o "/tmp/act-checksums.txt"
|
||||
|
||||
echo "Verifying checksum..."
|
||||
# Extract the checksum for our specific file and verify
|
||||
# Use cd to match the filename format in checksums.txt
|
||||
cd /tmp
|
||||
if ! grep "${ACT_TARBALL}" act-checksums.txt | sha256sum -c -; then
|
||||
echo "Error: Checksum verification failed for ${ACT_TARBALL}" >&2
|
||||
rm -f "${ACT_TARBALL}" act-checksums.txt
|
||||
exit 1
|
||||
fi
|
||||
echo "Checksum verified successfully"
|
||||
|
||||
echo "Installing act..."
|
||||
tar -xzf "${ACT_TARBALL}" -C /tmp
|
||||
sudo install -m 755 /tmp/act /usr/local/bin/act
|
||||
rm -f "${ACT_TARBALL}" /tmp/act act-checksums.txt
|
||||
|
||||
echo "Verifying act installation..."
|
||||
act --version
|
||||
|
||||
- name: Setup Docker and act configuration
|
||||
if: inputs.install-act == 'true'
|
||||
shell: bash
|
||||
run: |
|
||||
# Ensure Docker is running
|
||||
docker ps > /dev/null 2>&1 || (echo "Docker is not running" && exit 1)
|
||||
|
||||
# Pre-pull the act Docker image to avoid interactive prompts
|
||||
docker pull catthehacker/ubuntu:act-latest
|
||||
|
||||
- name: Verify tools
|
||||
shell: bash
|
||||
run: |
|
||||
shellspec --version
|
||||
jq --version
|
||||
uv --version
|
||||
if [[ "${{ inputs.install-act }}" == "true" ]]; then
|
||||
act --version
|
||||
docker --version
|
||||
fi
|
||||
33
.github/codeql/codeql-config.yml
vendored
Normal file
33
.github/codeql/codeql-config.yml
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
---
|
||||
# CodeQL configuration for GitHub Actions repository
|
||||
name: 'Actions Security Scanning'
|
||||
|
||||
# Exclude third-party and generated code from analysis
|
||||
paths-ignore:
|
||||
- node_modules/**
|
||||
- '**/node_modules/**'
|
||||
- '**/*.min.js'
|
||||
- '_tests/reports/**'
|
||||
- '_tests/coverage/**'
|
||||
- '*.sarif'
|
||||
- '**/*.sarif'
|
||||
|
||||
# Use security and quality query suite
|
||||
queries:
|
||||
- uses: security-and-quality
|
||||
|
||||
# Suppress specific false positives
|
||||
# These findings have been manually reviewed and determined to be false positives
|
||||
# with appropriate security controls in place
|
||||
query-filters:
|
||||
# docker-publish: Code injection in validated context
|
||||
# False positive: User input is validated and sanitized before use
|
||||
# - Only relative paths and trusted git URLs are allowed
|
||||
# - Absolute paths and arbitrary URLs are rejected
|
||||
# - Path traversal attempts are blocked
|
||||
# - Custom contexts require explicit opt-in via use-custom-context: true
|
||||
# - Wraps docker/build-push-action (trusted Docker-maintained action)
|
||||
# - Action is designed for trusted workflows only (documented in action.yml)
|
||||
- exclude:
|
||||
id: js/actions/code-injection
|
||||
kind: problem
|
||||
134
.github/copilot-instructions.md
vendored
Normal file
134
.github/copilot-instructions.md
vendored
Normal file
@@ -0,0 +1,134 @@
|
||||
# GitHub Actions Monorepo - AI Coding Instructions
|
||||
|
||||
## Project Architecture
|
||||
|
||||
This is a **flat-structure GitHub Actions monorepo** with over 40 self-contained actions. Each action directory contains:
|
||||
|
||||
- `action.yml` - Action definition with inputs/outputs/branding
|
||||
- `README.md` - Auto-generated documentation
|
||||
- `rules.yml` - Auto-generated validation rules (do not edit manually)
|
||||
- `CustomValidator.py` - Custom validation logic (for actions requiring it)
|
||||
|
||||
**Core principle**: Actions are designed for external consumption with pinned refs like `ivuorinen/actions/action-name@2025-01-15`.
|
||||
|
||||
## Essential Workflows
|
||||
|
||||
### Development Commands
|
||||
|
||||
```bash
|
||||
make all # Complete workflow: docs + format + lint + test
|
||||
make dev # Quick dev cycle: format + lint only
|
||||
make test # Run all tests (ShellSpec + pytest)
|
||||
make test-action ACTION=node-setup # Test specific action
|
||||
```
|
||||
|
||||
### Documentation Generation
|
||||
|
||||
- `make docs` auto-generates all README.md files from action.yml using action-docs
|
||||
- `npm run update-catalog` rebuilds the main README.md action listing
|
||||
- **Never manually edit** generated sections marked with `<!--LISTING-->`
|
||||
|
||||
### Validation System
|
||||
|
||||
- Each action has auto-generated `rules.yml` defining input validation
|
||||
- `validate-inputs/` contains centralized Python validation framework
|
||||
- `make test-update-validators` regenerates all rules.yml files
|
||||
- Custom validators in `CustomValidator.py` handle action-specific logic
|
||||
|
||||
## Critical Patterns
|
||||
|
||||
### Action Structure
|
||||
|
||||
```yaml
|
||||
# All actions follow this schema pattern:
|
||||
name: Action Name
|
||||
description: 'Brief description with key features'
|
||||
branding:
|
||||
icon: server # Choose appropriate icon
|
||||
color: green # Choose appropriate color
|
||||
|
||||
inputs:
|
||||
# Required inputs first, then optional with defaults
|
||||
some-input:
|
||||
description: 'Clear description'
|
||||
required: false
|
||||
default: 'sensible-default'
|
||||
|
||||
outputs:
|
||||
# Always include relevant outputs for chaining
|
||||
result:
|
||||
description: 'What this output contains'
|
||||
```
|
||||
|
||||
### Testing Framework
|
||||
|
||||
- **ShellSpec** for action testing in `_tests/unit/`
|
||||
- **pytest** for Python validation testing
|
||||
- Use `_tests/shared/` for common test utilities
|
||||
- Integration tests use `nektos/act` for local GitHub Actions simulation
|
||||
|
||||
### Language Detection Actions
|
||||
|
||||
Actions like `node-setup`, `php-version-detect` follow auto-detection patterns:
|
||||
|
||||
1. Check project files (package.json, composer.json, go.mod, etc.)
|
||||
2. Fallback to `default-version` input
|
||||
3. Support `force-version` override
|
||||
4. Output detected version for downstream actions
|
||||
|
||||
### Error Handling
|
||||
|
||||
- All actions use structured error messages
|
||||
- Python validators inherit from `BaseValidator` class
|
||||
- Shell scripts use `set -euo pipefail` pattern
|
||||
- Always provide actionable error messages with context
|
||||
|
||||
## Development Standards
|
||||
|
||||
### Code Quality (Zero Tolerance)
|
||||
|
||||
- All linting must pass: markdownlint, yamllint, shellcheck, pylint
|
||||
- All tests must pass: unit + integration
|
||||
- No warnings allowed in production
|
||||
- Use `make all` before committing
|
||||
|
||||
### Documentation
|
||||
|
||||
- Action descriptions must be concise and feature-focused
|
||||
- Include examples in README.md (auto-generated from action.yml)
|
||||
- Update CLAUDE.md for significant architectural changes
|
||||
- Never edit auto-generated content manually
|
||||
|
||||
### Security
|
||||
|
||||
- Use `validate-inputs` action for all user-provided input
|
||||
- Pin action versions in workflows with commit SHAs
|
||||
- Follow least-privilege token permissions
|
||||
- Implement proper secret handling patterns
|
||||
|
||||
## Key Files to Reference
|
||||
|
||||
- `CLAUDE.md` - Current architectural decisions and action inventory
|
||||
- `Makefile` - Complete build system with all targets
|
||||
- `validate-inputs/validators/` - Validation logic patterns
|
||||
- `_tests/shared/` - Testing utilities and patterns
|
||||
- `_tools/fix-local-action-refs.py` - Reference resolution tooling
|
||||
|
||||
## Anti-Patterns to Avoid
|
||||
|
||||
- **Don't** manually edit `rules.yml` files (use `make test-update-validators`)
|
||||
- **Don't** edit README.md between `<!--LISTING-->` markers
|
||||
- **Don't** create actions without proper input validation
|
||||
- **Don't** skip the `make all` verification step
|
||||
- **Don't** use relative paths in action references (use `./action-name`)
|
||||
|
||||
## Integration Points
|
||||
|
||||
Actions are designed for composition:
|
||||
|
||||
1. **Setup actions** (node-setup, php-version-detect) prepare environment
|
||||
2. **Linting actions** (eslint-check, biome-check) validate code quality
|
||||
3. **Build actions** (docker-build, go-build) create artifacts
|
||||
4. **Publishing actions** (npm-publish, docker-publish) deploy results
|
||||
|
||||
Use outputs from setup actions as inputs to subsequent actions for proper chaining.
|
||||
26
.github/renovate.json
vendored
26
.github/renovate.json
vendored
@@ -1,20 +1,34 @@
|
||||
{
|
||||
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
|
||||
"extends": ["github>ivuorinen/renovate-config"],
|
||||
"extends": [
|
||||
"github>ivuorinen/renovate-config",
|
||||
"customManagers:biomeVersions"
|
||||
],
|
||||
"packageRules": [
|
||||
{
|
||||
"matchUpdateTypes": ["minor", "patch"],
|
||||
"matchUpdateTypes": [
|
||||
"minor",
|
||||
"patch"
|
||||
],
|
||||
"matchCurrentVersion": "!/^0/",
|
||||
"automerge": true
|
||||
},
|
||||
{
|
||||
"matchDepTypes": ["devDependencies"],
|
||||
"matchDepTypes": [
|
||||
"devDependencies"
|
||||
],
|
||||
"automerge": true
|
||||
}
|
||||
],
|
||||
"schedule": ["before 4am on monday"],
|
||||
"schedule": [
|
||||
"before 4am on monday"
|
||||
],
|
||||
"vulnerabilityAlerts": {
|
||||
"labels": ["security"],
|
||||
"assignees": ["ivuorinen"]
|
||||
"labels": [
|
||||
"security"
|
||||
],
|
||||
"assignees": [
|
||||
"ivuorinen"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
1
.github/tag-changelog-config.js
vendored
1
.github/tag-changelog-config.js
vendored
@@ -1,6 +1,7 @@
|
||||
module.exports = {
|
||||
types: [
|
||||
{ types: ['feat', 'feature', 'Feat'], label: '🎉 New Features' },
|
||||
{ types: ['security'], label: '🔐 Security' },
|
||||
{ types: ['fix', 'bugfix', 'Fix'], label: '🐛 Bugfixes' },
|
||||
{ types: ['improvements', 'enhancement'], label: '🔨 Improvements' },
|
||||
{ types: ['perf'], label: '🏎️ Performance Improvements' },
|
||||
|
||||
212
.github/workflows/action-security.yml
vendored
212
.github/workflows/action-security.yml
vendored
@@ -35,216 +35,34 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout Repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@71cf2267d89c5cb81562390fa70a37fa40b1305e # v6-beta
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Check Required Configurations
|
||||
id: check-configs
|
||||
shell: bash
|
||||
run: |
|
||||
# Initialize all flags as false
|
||||
{
|
||||
echo "run_gitleaks=false"
|
||||
echo "run_trivy=true"
|
||||
} >> "$GITHUB_OUTPUT"
|
||||
|
||||
# Check Gitleaks configuration and license
|
||||
if [ -f ".gitleaks.toml" ] && [ -n "${{ secrets.GITLEAKS_LICENSE }}" ]; then
|
||||
echo "Gitleaks config and license found"
|
||||
echo "run_gitleaks=true" >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
echo "::warning::Gitleaks config or license missing - skipping Gitleaks scan"
|
||||
fi
|
||||
|
||||
- name: Run actionlint
|
||||
uses: raven-actions/actionlint@3a24062651993d40fed1019b58ac6fbdfbf276cc # v2.0.1
|
||||
- name: Run Security Scan
|
||||
id: security-scan
|
||||
uses: ./security-scan
|
||||
with:
|
||||
cache: true
|
||||
fail-on-error: true
|
||||
shellcheck: false
|
||||
|
||||
- name: Run Gitleaks
|
||||
if: steps.check-configs.outputs.run_gitleaks == 'true'
|
||||
uses: gitleaks/gitleaks-action@ff98106e4c7b2bc287b24eaf42907196329070c7 # v2.3.9
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
GITLEAKS_LICENSE: ${{ secrets.GITLEAKS_LICENSE }}
|
||||
with:
|
||||
config-path: .gitleaks.toml
|
||||
report-format: sarif
|
||||
report-path: gitleaks-report.sarif
|
||||
|
||||
- name: Run Trivy vulnerability scanner
|
||||
uses: aquasecurity/trivy-action@a11da62073708815958ea6d84f5650c78a3ef85b # master
|
||||
with:
|
||||
scan-type: 'fs'
|
||||
scanners: 'vuln,config,secret'
|
||||
format: 'sarif'
|
||||
output: 'trivy-results.sarif'
|
||||
severity: 'CRITICAL,HIGH'
|
||||
timeout: '10m'
|
||||
|
||||
- name: Verify SARIF files
|
||||
id: verify-sarif
|
||||
shell: bash
|
||||
run: |
|
||||
# Initialize outputs
|
||||
{
|
||||
echo "has_trivy=false"
|
||||
echo "has_gitleaks=false"
|
||||
} >> "$GITHUB_OUTPUT"
|
||||
|
||||
# Check Trivy results
|
||||
if [ -f "trivy-results.sarif" ]; then
|
||||
if jq -e . </dev/null 2>&1 <"trivy-results.sarif"; then
|
||||
echo "has_trivy=true" >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
echo "::warning::Trivy SARIF file exists but is not valid JSON"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Check Gitleaks results if it ran
|
||||
if [ "${{ steps.check-configs.outputs.run_gitleaks }}" = "true" ]; then
|
||||
if [ -f "gitleaks-report.sarif" ]; then
|
||||
if jq -e . </dev/null 2>&1 <"gitleaks-report.sarif"; then
|
||||
echo "has_gitleaks=true" >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
echo "::warning::Gitleaks SARIF file exists but is not valid JSON"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
- name: Upload Trivy results
|
||||
if: steps.verify-sarif.outputs.has_trivy == 'true'
|
||||
uses: github/codeql-action/upload-sarif@192325c86100d080feab897ff886c34abd4c83a3 # v3.30.3
|
||||
with:
|
||||
sarif_file: 'trivy-results.sarif'
|
||||
category: 'trivy'
|
||||
|
||||
- name: Upload Gitleaks results
|
||||
if: steps.verify-sarif.outputs.has_gitleaks == 'true'
|
||||
uses: github/codeql-action/upload-sarif@192325c86100d080feab897ff886c34abd4c83a3 # v3.30.3
|
||||
with:
|
||||
sarif_file: 'gitleaks-report.sarif'
|
||||
category: 'gitleaks'
|
||||
|
||||
- name: Archive security reports
|
||||
if: always()
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: security-reports-${{ github.run_id }}
|
||||
path: |
|
||||
${{ steps.verify-sarif.outputs.has_trivy == 'true' && 'trivy-results.sarif' || '' }}
|
||||
${{ steps.verify-sarif.outputs.has_gitleaks == 'true' && 'gitleaks-report.sarif' || '' }}
|
||||
retention-days: 30
|
||||
|
||||
- name: Analyze Results
|
||||
if: always()
|
||||
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0
|
||||
with:
|
||||
script: |
|
||||
const fs = require('fs');
|
||||
|
||||
try {
|
||||
let totalIssues = 0;
|
||||
let criticalIssues = 0;
|
||||
|
||||
const analyzeSarif = (file, tool) => {
|
||||
if (!fs.existsSync(file)) {
|
||||
console.log(`No results file found for ${tool}`);
|
||||
return null;
|
||||
}
|
||||
|
||||
try {
|
||||
const sarif = JSON.parse(fs.readFileSync(file, 'utf8'));
|
||||
return sarif.runs.reduce((acc, run) => {
|
||||
if (!run.results) return acc;
|
||||
|
||||
const critical = run.results.filter(r =>
|
||||
r.level === 'error' ||
|
||||
r.level === 'critical' ||
|
||||
(r.ruleId || '').toLowerCase().includes('critical')
|
||||
).length;
|
||||
|
||||
return {
|
||||
total: acc.total + run.results.length,
|
||||
critical: acc.critical + critical
|
||||
};
|
||||
}, { total: 0, critical: 0 });
|
||||
} catch (error) {
|
||||
console.log(`Error analyzing ${tool} results: ${error.message}`);
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
// Only analyze results from tools that ran successfully
|
||||
const results = {
|
||||
trivy: ${{ steps.verify-sarif.outputs.has_trivy }} ?
|
||||
analyzeSarif('trivy-results.sarif', 'trivy') : null,
|
||||
gitleaks: ${{ steps.verify-sarif.outputs.has_gitleaks }} ?
|
||||
analyzeSarif('gitleaks-report.sarif', 'gitleaks') : null
|
||||
};
|
||||
|
||||
// Aggregate results
|
||||
Object.entries(results).forEach(([tool, result]) => {
|
||||
if (result) {
|
||||
totalIssues += result.total;
|
||||
criticalIssues += result.critical;
|
||||
console.log(`${tool}: ${result.total} total, ${result.critical} critical issues`);
|
||||
}
|
||||
});
|
||||
|
||||
// Create summary
|
||||
const summary = `## Security Scan Summary
|
||||
|
||||
- Total Issues Found: ${totalIssues}
|
||||
- Critical Issues: ${criticalIssues}
|
||||
|
||||
### Tool Breakdown
|
||||
${Object.entries(results)
|
||||
.filter(([_, r]) => r)
|
||||
.map(([tool, r]) =>
|
||||
`- ${tool}: ${r.total} total, ${r.critical} critical`
|
||||
).join('\n')}
|
||||
|
||||
### Tools Run Status
|
||||
- Trivy: ${{ steps.verify-sarif.outputs.has_trivy }}
|
||||
- Gitleaks: ${{ steps.check-configs.outputs.run_gitleaks }}
|
||||
`;
|
||||
|
||||
// Set output
|
||||
core.setOutput('total_issues', totalIssues);
|
||||
core.setOutput('critical_issues', criticalIssues);
|
||||
|
||||
// Add job summary
|
||||
await core.summary
|
||||
.addRaw(summary)
|
||||
.write();
|
||||
|
||||
// Fail if critical issues found
|
||||
if (criticalIssues > 0) {
|
||||
core.setFailed(`Found ${criticalIssues} critical security issues`);
|
||||
}
|
||||
} catch (error) {
|
||||
core.setFailed(`Analysis failed: ${error.message}`);
|
||||
}
|
||||
gitleaks-license: ${{ secrets.GITLEAKS_LICENSE }}
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Notify on Critical Issues
|
||||
if: failure()
|
||||
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0
|
||||
if: failure() && steps.security-scan.outputs.critical_issues != '0'
|
||||
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
|
||||
with:
|
||||
script: |
|
||||
script: |-
|
||||
const { repo, owner } = context.repo;
|
||||
const critical = core.getInput('critical_issues');
|
||||
const critical = '${{ steps.security-scan.outputs.critical_issues }}';
|
||||
const total = '${{ steps.security-scan.outputs.total_issues }}';
|
||||
|
||||
const body = `🚨 Critical security issues found in GitHub Actions
|
||||
|
||||
${critical} critical security issues were found during the security scan.
|
||||
${critical} critical security issues (out of ${total} total) were found during the security scan.
|
||||
|
||||
### Scan Results
|
||||
- Trivy: ${{ steps.verify-sarif.outputs.has_trivy == 'true' && 'Completed' || 'Skipped/Failed' }}
|
||||
- Gitleaks: ${{ steps.check-configs.outputs.run_gitleaks == 'true' && 'Completed' || 'Skipped' }}
|
||||
- Actionlint: Completed
|
||||
- Trivy: ${{ steps.security-scan.outputs.has_trivy_results == 'true' && 'Completed' || 'Skipped/Failed' }}
|
||||
- Gitleaks: ${{ steps.security-scan.outputs.has_gitleaks_results == 'true' && 'Completed' || 'Skipped' }}
|
||||
|
||||
[View detailed scan results](https://github.com/${owner}/${repo}/actions/runs/${context.runId})
|
||||
|
||||
|
||||
110
.github/workflows/build-testing-image.yml
vendored
Normal file
110
.github/workflows/build-testing-image.yml
vendored
Normal file
@@ -0,0 +1,110 @@
|
||||
---
|
||||
# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json
|
||||
name: Build Testing Docker Image
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- '_tools/docker-testing-tools/**'
|
||||
- '.github/workflows/build-testing-image.yml'
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- '_tools/docker-testing-tools/**'
|
||||
- '.github/workflows/build-testing-image.yml'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tag:
|
||||
description: 'Docker image tag'
|
||||
required: false
|
||||
default: 'latest'
|
||||
type: string
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
jobs:
|
||||
build-and-push:
|
||||
name: Build and Push Testing Image
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@71cf2267d89c5cb81562390fa70a37fa40b1305e # v6-beta
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Log in to GitHub Container Registry
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata
|
||||
id: meta
|
||||
uses: docker/metadata-action@c299e40c65443455700f0fdfc63efafe5b349051 # v5.10.0
|
||||
with:
|
||||
images: ghcr.io/${{ github.repository_owner }}/actions
|
||||
tags: |
|
||||
type=ref,event=branch,suffix=-testing-tools
|
||||
type=ref,event=pr,suffix=-testing-tools
|
||||
type=raw,value=testing-tools,enable={{is_default_branch}}
|
||||
type=raw,value=${{ github.event.inputs.tag }},enable=${{ github.event.inputs.tag != '' }}
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
with:
|
||||
context: _tools/docker-testing-tools
|
||||
file: _tools/docker-testing-tools/Dockerfile
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
platforms: linux/amd64,linux/arm64
|
||||
|
||||
- name: Test image
|
||||
if: github.event_name != 'pull_request'
|
||||
run: |
|
||||
# Test the built image works correctly
|
||||
docker run --rm ghcr.io/${{ github.repository_owner }}/actions:testing-tools shellspec --version
|
||||
docker run --rm ghcr.io/${{ github.repository_owner }}/actions:testing-tools act --version
|
||||
docker run --rm ghcr.io/${{ github.repository_owner }}/actions:testing-tools trivy --version
|
||||
|
||||
- name: Generate image summary
|
||||
if: github.event_name != 'pull_request'
|
||||
run: |
|
||||
{
|
||||
echo "## 🐋 Docker Image Built Successfully"
|
||||
echo ""
|
||||
echo "**Image**: \`ghcr.io/${{ github.repository_owner }}/actions:testing-tools\`"
|
||||
echo "**Tags**: ${{ steps.meta.outputs.tags }}"
|
||||
echo ""
|
||||
echo "### Usage in GitHub Actions"
|
||||
echo ""
|
||||
echo "\`\`\`yaml"
|
||||
echo "jobs:"
|
||||
echo " test:"
|
||||
echo " runs-on: ubuntu-latest"
|
||||
echo " container: ghcr.io/${{ github.repository_owner }}/actions:testing-tools"
|
||||
echo " steps:"
|
||||
echo " - uses: actions/checkout@v5"
|
||||
echo " - run: shellspec _tests/unit/your-action/"
|
||||
echo "\`\`\`"
|
||||
echo ""
|
||||
echo "### Pre-installed Tools"
|
||||
echo "- ShellSpec"
|
||||
echo "- nektos/act (latest)"
|
||||
echo "- Trivy security scanner (latest)"
|
||||
echo "- TruffleHog secrets scanner (latest)"
|
||||
echo "- actionlint (latest)"
|
||||
echo "- shellcheck, jq, kcov, GitHub CLI"
|
||||
echo "- Node.js LTS, Python 3, build tools"
|
||||
} >> "$GITHUB_STEP_SUMMARY"
|
||||
46
.github/workflows/codeql-new.yml
vendored
Normal file
46
.github/workflows/codeql-new.yml
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
---
|
||||
# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json
|
||||
name: 'CodeQL (New Action)'
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
pull_request:
|
||||
branches:
|
||||
- 'main'
|
||||
schedule:
|
||||
- cron: '30 1 * * 0' # Run at 1:30 AM UTC every Sunday
|
||||
merge_group:
|
||||
|
||||
permissions:
|
||||
actions: read
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze (${{ matrix.language }})
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
security-events: write
|
||||
contents: read
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
language:
|
||||
- 'actions'
|
||||
- 'javascript'
|
||||
- 'python'
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@71cf2267d89c5cb81562390fa70a37fa40b1305e # v6-beta
|
||||
|
||||
- name: Run CodeQL Analysis
|
||||
uses: ./codeql-analysis
|
||||
with:
|
||||
language: ${{ matrix.language }}
|
||||
queries: security-and-quality
|
||||
config-file: .github/codeql/codeql-config.yml
|
||||
token: ${{ github.token }}
|
||||
46
.github/workflows/codeql.yml
vendored
46
.github/workflows/codeql.yml
vendored
@@ -1,46 +0,0 @@
|
||||
---
|
||||
# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json
|
||||
name: 'CodeQL'
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: ['main']
|
||||
pull_request:
|
||||
branches: ['main']
|
||||
schedule:
|
||||
- cron: '30 1 * * 0' # Run at 1:30 AM UTC every Sunday
|
||||
merge_group:
|
||||
|
||||
permissions:
|
||||
actions: read
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
security-events: write
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
language: ['actions', 'javascript'] # Add languages used in your actions
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@192325c86100d080feab897ff886c34abd4c83a3 # v3.30.3
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
queries: security-and-quality
|
||||
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@192325c86100d080feab897ff886c34abd4c83a3 # v3.30.3
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@192325c86100d080feab897ff886c34abd4c83a3 # v3.30.3
|
||||
with:
|
||||
category: '/language:${{matrix.language}}'
|
||||
7
.github/workflows/dependency-review.yml
vendored
7
.github/workflows/dependency-review.yml
vendored
@@ -1,7 +1,8 @@
|
||||
---
|
||||
# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json
|
||||
name: 'Dependency Review'
|
||||
on: [pull_request]
|
||||
on:
|
||||
- pull_request
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -11,6 +12,6 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: 'Checkout Repository'
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@71cf2267d89c5cb81562390fa70a37fa40b1305e # v6-beta
|
||||
- name: 'Dependency Review'
|
||||
uses: actions/dependency-review-action@595b5aeba73380359d98a5e087f648dbb0edce1b # v4.7.3
|
||||
uses: actions/dependency-review-action@3c4e3dcb1aa7874d2c16be7d79418e9b7efd6261 # v4.8.2
|
||||
|
||||
43
.github/workflows/issue-stats.yml
vendored
Normal file
43
.github/workflows/issue-stats.yml
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
---
|
||||
name: Monthly issue metrics
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '3 2 1 * *'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: issue metrics
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: read
|
||||
steps:
|
||||
- name: Get dates for last month
|
||||
shell: sh
|
||||
run: |
|
||||
# Calculate the first day of the previous month
|
||||
first_day=$(date -d "last month" +%Y-%m-01)
|
||||
|
||||
# Calculate the last day of the previous month
|
||||
last_day=$(date -d "$first_day +1 month -1 day" +%Y-%m-%d)
|
||||
|
||||
#Set an environment variable with the date range
|
||||
echo "$first_day..$last_day"
|
||||
echo "last_month=$first_day..$last_day" >> "$GITHUB_ENV"
|
||||
|
||||
- name: Run issue-metrics tool
|
||||
uses: github/issue-metrics@55bb0b704982057a101ab7515fb72b2293927c8a # v3.25.4
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
SEARCH_QUERY: 'repo:ivuorinen/actions is:issue created:${{ env.last_month }} -reason:"not planned"'
|
||||
|
||||
- name: Create issue
|
||||
uses: peter-evans/create-issue-from-file@fca9117c27cdc29c6c4db3b86c48e4115a786710 # v6.0.0
|
||||
with:
|
||||
title: Monthly issue metrics report
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
content-filepath: ./issue_metrics.md
|
||||
41
.github/workflows/new-release.yml
vendored
41
.github/workflows/new-release.yml
vendored
@@ -20,27 +20,30 @@ jobs:
|
||||
version: ${{ steps.daily-version.outputs.version }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/checkout@71cf2267d89c5cb81562390fa70a37fa40b1305e # v6-beta
|
||||
|
||||
- name: Create tag if necessary
|
||||
uses: fregante/daily-version-action@fb1a60b7c4daf1410cd755e360ebec3901e58588 # v2.1.3
|
||||
- name: Create daily release
|
||||
id: daily-version
|
||||
run: |
|
||||
set -eu
|
||||
|
||||
- name: Create changelog text
|
||||
if: steps.daily-version.outputs.created
|
||||
id: changelog
|
||||
uses: loopwerk/tag-changelog@941366edb8920e2071eae0449031830984b9f26e # v1.3.0
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
config_file: .github/tag-changelog-config.js
|
||||
VERSION="v$(date '+%Y.%m.%d')"
|
||||
printf '%s\n' "version=$VERSION" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Create release
|
||||
if: steps.daily-version.outputs.created
|
||||
uses: ncipollo/release-action@b7eabc95ff50cbeeedec83973935c8f306dfcd0b # v1.20.0
|
||||
# Check if release already exists
|
||||
if gh release view "$VERSION" >/dev/null 2>&1; then
|
||||
printf '%s\n' "created=false" >> "$GITHUB_OUTPUT"
|
||||
printf '%s\n' "Release $VERSION already exists - skipping"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Create release with auto-generated changelog (also creates tag)
|
||||
gh release create "$VERSION" \
|
||||
--title "Release $VERSION" \
|
||||
--generate-notes \
|
||||
--target main
|
||||
|
||||
printf '%s\n' "created=true" >> "$GITHUB_OUTPUT"
|
||||
printf '%s\n' "Created release $VERSION"
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
tag: ${{ steps.daily-version.outputs.version }}
|
||||
name: Release ${{ steps.daily-version.outputs.version }}
|
||||
body: ${{ steps.changelog.outputs.changes }}
|
||||
allowUpdates: true
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
124
.github/workflows/pr-lint.yml
vendored
124
.github/workflows/pr-lint.yml
vendored
@@ -24,17 +24,9 @@ on:
|
||||
merge_group:
|
||||
|
||||
env:
|
||||
# Apply linter fixes configuration
|
||||
APPLY_FIXES: all
|
||||
APPLY_FIXES_EVENT: pull_request
|
||||
APPLY_FIXES_MODE: commit
|
||||
|
||||
# Disable linters that do not work or conflict
|
||||
# MegaLinter configuration - these override the action's defaults
|
||||
DISABLE_LINTERS: REPOSITORY_DEVSKIM
|
||||
|
||||
# Additional settings
|
||||
VALIDATE_ALL_CODEBASE: ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' }}
|
||||
GITHUB_TOKEN: ${{ secrets.FIXIMUS_TOKEN || secrets.GITHUB_TOKEN }}
|
||||
|
||||
# Report configuration
|
||||
REPORT_OUTPUT_FOLDER: megalinter-reports
|
||||
@@ -47,6 +39,7 @@ concurrency:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: read # Required for private dependencies
|
||||
|
||||
jobs:
|
||||
megalinter:
|
||||
@@ -55,122 +48,43 @@ jobs:
|
||||
timeout-minutes: 30
|
||||
|
||||
permissions:
|
||||
actions: write
|
||||
checks: write # Create and update check runs
|
||||
contents: write
|
||||
issues: write
|
||||
packages: read # Access private packages
|
||||
pull-requests: write
|
||||
security-events: write
|
||||
statuses: write
|
||||
|
||||
steps:
|
||||
- name: Checkout Code
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@71cf2267d89c5cb81562390fa70a37fa40b1305e # v6-beta
|
||||
with:
|
||||
token: ${{ secrets.FIXIMUS_TOKEN || secrets.GITHUB_TOKEN }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: MegaLinter
|
||||
id: ml
|
||||
uses: oxsecurity/megalinter/flavors/cupcake@e08c2b05e3dbc40af4c23f41172ef1e068a7d651 # v8.8.0
|
||||
|
||||
- name: Check MegaLinter Results
|
||||
id: check-results
|
||||
if: always()
|
||||
shell: bash
|
||||
run: |
|
||||
echo "status=success" >> "$GITHUB_OUTPUT"
|
||||
|
||||
if [ -f "${{ env.REPORT_OUTPUT_FOLDER }}/megalinter.log" ]; then
|
||||
if grep -q "ERROR\|CRITICAL" "${{ env.REPORT_OUTPUT_FOLDER }}/megalinter.log"; then
|
||||
echo "Linting errors found"
|
||||
echo "status=failure" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
else
|
||||
echo "::warning::MegaLinter log file not found"
|
||||
fi
|
||||
|
||||
- name: Upload Reports
|
||||
if: always()
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
- name: Run MegaLinter
|
||||
id: pr-lint
|
||||
uses: ./pr-lint
|
||||
with:
|
||||
name: MegaLinter reports
|
||||
path: |
|
||||
megalinter-reports
|
||||
mega-linter.log
|
||||
retention-days: 30
|
||||
token: ${{ secrets.FIXIMUS_TOKEN || secrets.GITHUB_TOKEN }}
|
||||
username: fiximus
|
||||
email: github-bot@ivuorinen.net
|
||||
|
||||
- name: Upload SARIF Report
|
||||
if: always() && hashFiles('megalinter-reports/sarif/*.sarif')
|
||||
uses: github/codeql-action/upload-sarif@192325c86100d080feab897ff886c34abd4c83a3 # v3.30.3
|
||||
uses: github/codeql-action/upload-sarif@cf1bb45a277cb3c205638b2cd5c984db1c46a412 # v4.31.7
|
||||
with:
|
||||
sarif_file: megalinter-reports/sarif
|
||||
category: megalinter
|
||||
|
||||
- name: Prepare Git for Fixes
|
||||
if: steps.ml.outputs.has_updated_sources == 1
|
||||
shell: bash
|
||||
run: |
|
||||
sudo chown -Rc $UID .git/
|
||||
git config --global user.name "fiximus"
|
||||
git config --global user.email "github-bot@ivuorinen.net"
|
||||
|
||||
- name: Create Pull Request
|
||||
if: |
|
||||
steps.ml.outputs.has_updated_sources == 1 &&
|
||||
(env.APPLY_FIXES_EVENT == 'all' || env.APPLY_FIXES_EVENT == github.event_name) &&
|
||||
env.APPLY_FIXES_MODE == 'pull_request' &&
|
||||
(github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository) &&
|
||||
!contains(github.event.head_commit.message, 'skip fix')
|
||||
uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8
|
||||
id: cpr
|
||||
with:
|
||||
token: ${{ secrets.FIXIMUS_TOKEN || secrets.GITHUB_TOKEN }}
|
||||
commit-message: '[MegaLinter] Apply linters automatic fixes'
|
||||
title: '[MegaLinter] Apply linters automatic fixes'
|
||||
labels: bot
|
||||
branch: megalinter/fixes-${{ github.ref_name }}
|
||||
branch-suffix: timestamp
|
||||
delete-branch: true
|
||||
body: |
|
||||
## MegaLinter Fixes
|
||||
|
||||
MegaLinter has identified and fixed code style issues.
|
||||
|
||||
### 🔍 Changes Made
|
||||
- Automated code style fixes
|
||||
- Formatting improvements
|
||||
- Lint error corrections
|
||||
|
||||
### 📝 Notes
|
||||
- Please review the changes carefully
|
||||
- Run tests before merging
|
||||
- Verify formatting matches project standards
|
||||
|
||||
> Generated automatically by MegaLinter
|
||||
|
||||
- name: Commit Fixes
|
||||
if: |
|
||||
steps.ml.outputs.has_updated_sources == 1 &&
|
||||
(env.APPLY_FIXES_EVENT == 'all' || env.APPLY_FIXES_EVENT == github.event_name) &&
|
||||
env.APPLY_FIXES_MODE == 'commit' &&
|
||||
github.ref != 'refs/heads/main' &&
|
||||
(github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository) &&
|
||||
!contains(github.event.head_commit.message, 'skip fix')
|
||||
uses: stefanzweifel/git-auto-commit-action@778341af668090896ca464160c2def5d1d1a3eb0 # v6.0.1
|
||||
with:
|
||||
branch: ${{ github.event.pull_request.head.ref || github.head_ref || github.ref }}
|
||||
commit_message: |
|
||||
style: apply MegaLinter fixes
|
||||
|
||||
[skip ci]
|
||||
commit_user_name: fiximus
|
||||
commit_user_email: github-bot@ivuorinen.net
|
||||
push_options: --force
|
||||
|
||||
- name: Create Status Check
|
||||
- name: Check Results
|
||||
if: always()
|
||||
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0
|
||||
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
|
||||
with:
|
||||
script: |
|
||||
const status = '${{ steps.check-results.outputs.status }}';
|
||||
const status = '${{ steps.pr-lint.outputs.validation_status }}';
|
||||
const conclusion = status === 'success' ? 'success' : 'failure';
|
||||
|
||||
const summary = `## MegaLinter Results
|
||||
@@ -190,8 +104,8 @@ jobs:
|
||||
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
shell: bash
|
||||
run: |
|
||||
shell: sh
|
||||
run: |-
|
||||
# Remove temporary files but keep reports
|
||||
find . -type f -name "megalinter.*" ! -name "megalinter-reports" -delete || true
|
||||
find . -type d -name ".megalinter" -exec rm -rf {} + || true
|
||||
|
||||
4
.github/workflows/release.yml
vendored
4
.github/workflows/release.yml
vendored
@@ -16,7 +16,7 @@ jobs:
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: softprops/action-gh-release@6cbd405e2c4e67a21c47fa9e383d020e4e28b836 # v2.3.3
|
||||
- uses: actions/checkout@71cf2267d89c5cb81562390fa70a37fa40b1305e # v6-beta
|
||||
- uses: softprops/action-gh-release@a06a81a03ee405af7f2048a818ed3f03bbf83c7b # v2.5.0
|
||||
with:
|
||||
generate_release_notes: true
|
||||
|
||||
632
.github/workflows/security-suite.yml
vendored
632
.github/workflows/security-suite.yml
vendored
@@ -1,11 +1,8 @@
|
||||
---
|
||||
# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json
|
||||
name: Security Suite
|
||||
name: PR Security Analysis
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '55 23 * * 0' # Every Sunday at 23:55
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
paths:
|
||||
- '**/package.json'
|
||||
@@ -17,339 +14,386 @@ on:
|
||||
- '**/*.py'
|
||||
- '**/*.js'
|
||||
- '**/*.ts'
|
||||
- '**/workflows/*.yml'
|
||||
merge_group:
|
||||
push:
|
||||
branches: [main]
|
||||
- '**/*.yml'
|
||||
- '**/*.yaml'
|
||||
- '.github/workflows/**'
|
||||
|
||||
permissions: read-all
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
issues: write
|
||||
actions: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
check-secrets:
|
||||
name: Check Required Secrets
|
||||
security-analysis:
|
||||
name: Security Analysis
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
run_snyk: ${{ steps.check.outputs.run_snyk }}
|
||||
run_slack: ${{ steps.check.outputs.run_slack }}
|
||||
run_sonarcloud: ${{ steps.check.outputs.run_sonarcloud }}
|
||||
|
||||
steps:
|
||||
- name: Check Required Secrets
|
||||
id: check
|
||||
shell: bash
|
||||
- name: Checkout PR
|
||||
uses: actions/checkout@71cf2267d89c5cb81562390fa70a37fa40b1305e # v6-beta
|
||||
with:
|
||||
fetch-depth: 0
|
||||
repository: ${{ github.event.pull_request.head.repo.full_name }}
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
|
||||
- name: Fetch PR Base
|
||||
run: |
|
||||
{
|
||||
echo "run_snyk=false"
|
||||
echo "run_slack=false"
|
||||
echo "run_sonarcloud=false"
|
||||
} >> "$GITHUB_OUTPUT"
|
||||
set -eu
|
||||
# Fetch the base ref from base repository with authentication (works for private repos and forked PRs)
|
||||
# Using ref instead of SHA because git fetch requires ref names, not raw commit IDs
|
||||
# Use authenticated URL to avoid 403/404 on private repositories
|
||||
git fetch --no-tags --depth=1 \
|
||||
"https://x-access-token:${{ github.token }}@github.com/${{ github.event.pull_request.base.repo.full_name }}" \
|
||||
${{ github.event.pull_request.base.ref }}:refs/remotes/origin-base/${{ github.event.pull_request.base.ref }}
|
||||
# Record the base commit for diffing without checking it out
|
||||
# Keep PR head checked out so scanners analyze the new changes
|
||||
BASE_REF="refs/remotes/origin-base/${{ github.event.pull_request.base.ref }}"
|
||||
echo "BASE_REF=${BASE_REF}" >> "$GITHUB_ENV"
|
||||
echo "Base ref: ${BASE_REF}"
|
||||
git log -1 --oneline "${BASE_REF}"
|
||||
|
||||
if [ -n "${{ secrets.SNYK_TOKEN }}" ]; then
|
||||
echo "run_snyk=true" >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
echo "::warning::SNYK_TOKEN not set - Snyk scans will be skipped"
|
||||
fi
|
||||
|
||||
if [ -n "${{ secrets.SLACK_WEBHOOK }}" ]; then
|
||||
echo "run_slack=true" >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
echo "::warning::SLACK_WEBHOOK not set - Slack notifications will be skipped"
|
||||
fi
|
||||
|
||||
if [ -n "${{ secrets.SONAR_TOKEN }}" ]; then
|
||||
echo "run_sonarcloud=true" >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
echo "::warning::SONAR_TOKEN not set - SonarCloud analysis will be skipped"
|
||||
fi
|
||||
|
||||
owasp:
|
||||
name: OWASP Dependency Check
|
||||
runs-on: ubuntu-latest
|
||||
needs: check-secrets
|
||||
permissions:
|
||||
security-events: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- name: Run OWASP Dependency Check
|
||||
- name: OWASP Dependency Check
|
||||
# Only run on pull_request, not pull_request_target to prevent executing
|
||||
# untrusted third-party actions against PR head from forks
|
||||
if: github.event_name == 'pull_request'
|
||||
uses: dependency-check/Dependency-Check_Action@3102a65fd5f36d0000297576acc56a475b0de98d # main
|
||||
with:
|
||||
project: 'GitHub Actions'
|
||||
project: 'PR Security Analysis'
|
||||
path: '.'
|
||||
format: 'SARIF'
|
||||
format: 'JSON'
|
||||
out: 'reports'
|
||||
args: >
|
||||
--enableRetired
|
||||
--enableExperimental
|
||||
--failOnCVSS 7
|
||||
- name: Upload OWASP Results
|
||||
uses: github/codeql-action/upload-sarif@192325c86100d080feab897ff886c34abd4c83a3 # v3.30.3
|
||||
with:
|
||||
sarif_file: reports/dependency-check-report.sarif
|
||||
category: owasp-dependency-check
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: owasp-results
|
||||
path: reports/dependency-check-report.sarif
|
||||
|
||||
snyk:
|
||||
name: Snyk Security Scan
|
||||
runs-on: ubuntu-latest
|
||||
needs: check-secrets
|
||||
if: needs.check-secrets.outputs.run_snyk == 'true'
|
||||
permissions:
|
||||
security-events: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0
|
||||
with:
|
||||
node-version: 'lts/*'
|
||||
cache: 'npm'
|
||||
- name: Run Snyk Scan
|
||||
uses: snyk/actions/node@cdb760004ba9ea4d525f2e043745dfe85bb9077e # master
|
||||
--enableRetired --enableExperimental --failOnCVSS 0
|
||||
continue-on-error: true
|
||||
|
||||
- name: Semgrep Static Analysis
|
||||
uses: semgrep/semgrep-action@713efdd345f3035192eaa63f56867b88e63e4e5d # v1
|
||||
with:
|
||||
config: 'auto'
|
||||
generateSarif: 'true'
|
||||
env:
|
||||
SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }}
|
||||
with:
|
||||
args: --all-projects --sarif-file-output=snyk-results.sarif
|
||||
- name: Upload Snyk Results
|
||||
uses: github/codeql-action/upload-sarif@192325c86100d080feab897ff886c34abd4c83a3 # v3.30.3
|
||||
with:
|
||||
sarif_file: snyk-results.sarif
|
||||
category: snyk
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: snyk-results
|
||||
path: snyk-results.sarif
|
||||
SEMGREP_APP_TOKEN: ${{ github.event_name != 'pull_request_target' && secrets.SEMGREP_APP_TOKEN || '' }}
|
||||
continue-on-error: true
|
||||
|
||||
scorecard:
|
||||
name: OSSF Scorecard
|
||||
runs-on: ubuntu-latest
|
||||
needs: check-secrets
|
||||
permissions:
|
||||
security-events: write
|
||||
id-token: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- name: Run Scorecard
|
||||
uses: ossf/scorecard-action@05b42c624433fc40578a4040d5cf5e36ddca8cde # v2.4.2
|
||||
- name: TruffleHog Secret Scan
|
||||
uses: trufflesecurity/trufflehog@0f58ae7c5036094a1e3e750d18772af92821b503
|
||||
with:
|
||||
results_file: scorecard-results.sarif
|
||||
results_format: sarif
|
||||
publish_results: true
|
||||
- name: Upload Scorecard Results
|
||||
uses: github/codeql-action/upload-sarif@192325c86100d080feab897ff886c34abd4c83a3 # v3.30.3
|
||||
with:
|
||||
sarif_file: scorecard-results.sarif
|
||||
category: scorecard
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: scorecard-results
|
||||
path: scorecard-results.sarif
|
||||
path: ./
|
||||
base: ${{ env.BASE_REF }}
|
||||
head: HEAD
|
||||
extra_args: --debug --only-verified --json --output /tmp/trufflehog_output.json
|
||||
continue-on-error: true
|
||||
|
||||
analyze:
|
||||
name: Analyze Results
|
||||
runs-on: ubuntu-latest
|
||||
needs: [check-secrets, owasp, scorecard, snyk]
|
||||
if: always()
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Download scan results
|
||||
uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0
|
||||
- name: Analyze Security Results
|
||||
id: analyze
|
||||
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
|
||||
with:
|
||||
path: ./results
|
||||
|
||||
- name: Analyze Results
|
||||
id: analysis
|
||||
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0
|
||||
with:
|
||||
script: |
|
||||
script: |-
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
|
||||
async function analyzeResults() {
|
||||
const metrics = {
|
||||
timestamp: new Date().toISOString(),
|
||||
vulnerabilities: { critical: 0, high: 0, medium: 0, low: 0 },
|
||||
scorecard: null,
|
||||
trends: {},
|
||||
tools: {}
|
||||
};
|
||||
// Unique marker to identify our bot comment
|
||||
const SECURITY_COMMENT_MARKER = '<!-- security-analysis-bot-comment -->';
|
||||
|
||||
function analyzeSarif(file, tool) {
|
||||
if (!fs.existsSync(file)) return null;
|
||||
|
||||
try {
|
||||
const data = JSON.parse(fs.readFileSync(file, 'utf8'));
|
||||
const results = {
|
||||
total: 0,
|
||||
bySeverity: { critical: 0, high: 0, medium: 0, low: 0 },
|
||||
details: []
|
||||
};
|
||||
|
||||
data.runs.forEach(run => {
|
||||
if (!run.results) return;
|
||||
|
||||
run.results.forEach(result => {
|
||||
results.total++;
|
||||
const severity = result.level === 'error' ? 'high' :
|
||||
result.level === 'warning' ? 'medium' : 'low';
|
||||
|
||||
results.bySeverity[severity]++;
|
||||
metrics.vulnerabilities[severity]++;
|
||||
|
||||
results.details.push({
|
||||
title: result.message?.text || 'Unnamed issue',
|
||||
severity,
|
||||
location: result.locations?.[0]?.physicalLocation?.artifactLocation?.uri || 'Unknown',
|
||||
description: result.message?.text || '',
|
||||
ruleId: result.ruleId || ''
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
return results;
|
||||
} catch (error) {
|
||||
console.error(`Error analyzing ${tool} results:`, error);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
// Analyze all SARIF files
|
||||
metrics.tools = {
|
||||
owasp: analyzeSarif('./results/owasp-results/dependency-check-report.sarif', 'OWASP'),
|
||||
snyk: ${{ needs.check-secrets.outputs.run_snyk == 'true' }} ?
|
||||
analyzeSarif('./results/snyk-results/snyk-results.sarif', 'Snyk') : null,
|
||||
scorecard: analyzeSarif('./results/scorecard-results/scorecard-results.sarif', 'Scorecard')
|
||||
};
|
||||
|
||||
// Save results
|
||||
fs.writeFileSync('security-results.json', JSON.stringify(metrics, null, 2));
|
||||
|
||||
// Set outputs
|
||||
core.setOutput('total_critical', metrics.vulnerabilities.critical);
|
||||
core.setOutput('total_high', metrics.vulnerabilities.high);
|
||||
|
||||
return metrics;
|
||||
}
|
||||
|
||||
return await analyzeResults();
|
||||
|
||||
- name: Generate Reports
|
||||
if: always()
|
||||
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0
|
||||
with:
|
||||
script: |
|
||||
const fs = require('fs');
|
||||
const metrics = JSON.parse(fs.readFileSync('security-results.json', 'utf8'));
|
||||
|
||||
// Find existing security report issue
|
||||
const issues = await github.rest.issues.listForRepo({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
state: 'open',
|
||||
labels: ['security-report'],
|
||||
per_page: 1
|
||||
});
|
||||
|
||||
const severityEmoji = {
|
||||
critical: '🚨',
|
||||
high: '⚠️',
|
||||
medium: '⚡',
|
||||
low: '📝'
|
||||
const findings = {
|
||||
permissions: [],
|
||||
actions: [],
|
||||
secrets: [],
|
||||
vulnerabilities: [],
|
||||
dependencies: []
|
||||
};
|
||||
|
||||
// Generate report body
|
||||
const report = `## Security Scan Report ${new Date().toISOString()}
|
||||
// Analyze GitHub Actions permission changes
|
||||
const { execSync } = require('child_process');
|
||||
const baseRef = process.env.BASE_REF;
|
||||
try {
|
||||
const changedWorkflows = execSync(
|
||||
`git diff --name-only ${baseRef}...HEAD | grep -E "\.github/workflows/.*\.ya?ml$" || true`,
|
||||
{ encoding: 'utf8' }
|
||||
).trim().split('\n').filter(Boolean);
|
||||
|
||||
### Summary
|
||||
${Object.entries(metrics.vulnerabilities)
|
||||
.map(([sev, count]) => `${severityEmoji[sev]} ${sev}: ${count}`)
|
||||
.join('\n')}
|
||||
for (const workflow of changedWorkflows) {
|
||||
if (!workflow) continue;
|
||||
|
||||
### Tool Results
|
||||
${Object.entries(metrics.tools)
|
||||
.filter(([_, results]) => results)
|
||||
.map(([tool, results]) => `
|
||||
#### ${tool.toUpperCase()}
|
||||
- Total issues: ${results.total}
|
||||
${Object.entries(results.bySeverity)
|
||||
.filter(([_, count]) => count > 0)
|
||||
.map(([sev, count]) => `- ${sev}: ${count}`)
|
||||
.join('\n')}
|
||||
try {
|
||||
const oldContent = execSync(`git show ${baseRef}:${workflow}`, { encoding: 'utf8' });
|
||||
const newContent = fs.readFileSync(workflow, 'utf8');
|
||||
|
||||
${results.details
|
||||
.filter(issue => ['critical', 'high'].includes(issue.severity))
|
||||
.map(issue => `- ${severityEmoji[issue.severity]} ${issue.title} (${issue.severity})
|
||||
- Location: \`${issue.location}\`
|
||||
- Rule: \`${issue.ruleId}\``)
|
||||
.join('\n')}
|
||||
`).join('\n')}
|
||||
// Simple permission extraction (could be enhanced with YAML parsing)
|
||||
const oldPerms = oldContent.match(/permissions:\s*\n([\s\S]*?)(?=\n\w|\n$|$)/);
|
||||
const newPerms = newContent.match(/permissions:\s*\n([\s\S]*?)(?=\n\w|\n$|$)/);
|
||||
|
||||
### Action Items
|
||||
${metrics.vulnerabilities.critical + metrics.vulnerabilities.high > 0 ?
|
||||
`- [ ] Address ${metrics.vulnerabilities.critical} critical and ${metrics.vulnerabilities.high} high severity issues
|
||||
- [ ] Review automated fix PRs
|
||||
- [ ] Update dependencies with known vulnerabilities` :
|
||||
'✅ No critical or high severity issues found'}
|
||||
if (oldPerms?.[1] !== newPerms?.[1]) {
|
||||
findings.permissions.push({
|
||||
file: workflow,
|
||||
old: oldPerms?.[1]?.trim() || 'None',
|
||||
new: newPerms?.[1]?.trim() || 'None'
|
||||
});
|
||||
}
|
||||
|
||||
### Links
|
||||
- [Workflow Run](${process.env.GITHUB_SERVER_URL}/${context.repo.owner}/${context.repo.repo}/actions/runs/${context.runId})
|
||||
- [Security Overview](${process.env.GITHUB_SERVER_URL}/${context.repo.owner}/${context.repo.repo}/security)
|
||||
// Check for new actions
|
||||
const oldActions = [...oldContent.matchAll(/uses:\s*([^\s\n]+)/g)].map(m => m[1]);
|
||||
const newActions = [...newContent.matchAll(/uses:\s*([^\s\n]+)/g)].map(m => m[1]);
|
||||
const addedActions = newActions.filter(action => !oldActions.includes(action));
|
||||
|
||||
> Last updated: ${new Date().toISOString()}`;
|
||||
if (addedActions.length > 0) {
|
||||
findings.actions.push({
|
||||
file: workflow,
|
||||
added: addedActions
|
||||
});
|
||||
}
|
||||
} catch (error) {
|
||||
console.log(`Could not analyze ${workflow}: ${error.message}`);
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.log('No workflow changes detected');
|
||||
}
|
||||
|
||||
// Update or create issue
|
||||
if (issues.data.length > 0) {
|
||||
await github.rest.issues.update({
|
||||
// Parse OWASP Dependency Check results
|
||||
try {
|
||||
const owaspResults = JSON.parse(fs.readFileSync('reports/dependency-check-report.json', 'utf8'));
|
||||
if (owaspResults.dependencies) {
|
||||
owaspResults.dependencies.forEach(dep => {
|
||||
if (dep.vulnerabilities && dep.vulnerabilities.length > 0) {
|
||||
dep.vulnerabilities.forEach(vuln => {
|
||||
findings.dependencies.push({
|
||||
file: dep.fileName || 'Unknown',
|
||||
cve: vuln.name,
|
||||
severity: vuln.severity || 'Unknown',
|
||||
description: vuln.description || 'No description'
|
||||
});
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
} catch (error) {
|
||||
console.log('No OWASP results found');
|
||||
}
|
||||
|
||||
// Parse Semgrep SARIF results
|
||||
try {
|
||||
if (fs.existsSync('semgrep.sarif')) {
|
||||
const sarifContent = JSON.parse(fs.readFileSync('semgrep.sarif', 'utf8'));
|
||||
if (sarifContent.runs && sarifContent.runs[0] && sarifContent.runs[0].results) {
|
||||
const run = sarifContent.runs[0];
|
||||
const rules = run.tool?.driver?.rules || [];
|
||||
run.results.forEach(result => {
|
||||
const rule = rules.find(r => r.id === result.ruleId);
|
||||
findings.vulnerabilities.push({
|
||||
file: result.locations?.[0]?.physicalLocation?.artifactLocation?.uri || 'Unknown',
|
||||
line: result.locations?.[0]?.physicalLocation?.region?.startLine || 0,
|
||||
rule: result.ruleId,
|
||||
severity: result.level?.toUpperCase() || 'INFO',
|
||||
message: result.message?.text || rule?.shortDescription?.text || 'No description'
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.log('Semgrep SARIF parsing completed');
|
||||
}
|
||||
|
||||
// Parse TruffleHog results (NDJSON format - one JSON object per line)
|
||||
try {
|
||||
const truffleOutput = execSync('cat /tmp/trufflehog_output.json || echo ""', { encoding: 'utf8' });
|
||||
const truffleLines = truffleOutput.trim().split('\n').filter(line => line.length > 0);
|
||||
|
||||
truffleLines.forEach((line, index) => {
|
||||
try {
|
||||
const result = JSON.parse(line);
|
||||
findings.secrets.push({
|
||||
file: result.SourceMetadata?.Data?.Filesystem?.file || 'Unknown',
|
||||
line: result.SourceMetadata?.Data?.Filesystem?.line || 0,
|
||||
detector: result.DetectorName,
|
||||
verified: result.Verified || false
|
||||
});
|
||||
} catch (parseError) {
|
||||
// Log only safe metadata to avoid leaking secrets
|
||||
console.log('Failed to parse TruffleHog line at index', index, '- Error:', parseError.message, '(line length:', line.length, 'chars)');
|
||||
}
|
||||
});
|
||||
|
||||
if (truffleLines.length === 0) {
|
||||
console.log('No secrets detected');
|
||||
}
|
||||
} catch (error) {
|
||||
console.log('No TruffleHog output file found');
|
||||
}
|
||||
|
||||
// Generate clean comment sections
|
||||
const sections = [];
|
||||
|
||||
// GitHub Actions Permissions Changes
|
||||
if (findings.permissions.length > 0) {
|
||||
const permSection = ['## 🔐 GitHub Actions Permissions Changes'];
|
||||
findings.permissions.forEach(change => {
|
||||
permSection.push(`\n**${change.file}**:`);
|
||||
|
||||
// Parse permissions into lines
|
||||
const oldLines = (change.old === 'None' ? [] : change.old.split('\n').map(l => l.trim()).filter(Boolean));
|
||||
const newLines = (change.new === 'None' ? [] : change.new.split('\n').map(l => l.trim()).filter(Boolean));
|
||||
|
||||
// Create sets for comparison
|
||||
const oldSet = new Set(oldLines);
|
||||
const newSet = new Set(newLines);
|
||||
|
||||
// Find added, removed, and unchanged
|
||||
const removed = oldLines.filter(l => !newSet.has(l));
|
||||
const added = newLines.filter(l => !oldSet.has(l));
|
||||
const unchanged = oldLines.filter(l => newSet.has(l));
|
||||
|
||||
// Only show diff if there are actual changes
|
||||
if (removed.length > 0 || added.length > 0) {
|
||||
permSection.push('```diff');
|
||||
|
||||
// Show removed permissions
|
||||
removed.forEach(line => permSection.push(`- ${line}`));
|
||||
|
||||
// Show added permissions
|
||||
added.forEach(line => permSection.push(`+ ${line}`));
|
||||
|
||||
permSection.push('```');
|
||||
|
||||
// Summary for context
|
||||
if (unchanged.length > 0 && unchanged.length <= 3) {
|
||||
permSection.push(`<details><summary>Unchanged (${unchanged.length})</summary>\n\n${unchanged.map(l => `- ${l}`).join('\n')}\n</details>`);
|
||||
} else if (unchanged.length > 3) {
|
||||
permSection.push(`<sub>*${unchanged.length} permissions unchanged*</sub>`);
|
||||
}
|
||||
}
|
||||
});
|
||||
sections.push(permSection.join('\n'));
|
||||
}
|
||||
|
||||
// New/Changed Actions
|
||||
if (findings.actions.length > 0) {
|
||||
const actionSection = ['## 🎯 New GitHub Actions'];
|
||||
findings.actions.forEach(change => {
|
||||
actionSection.push(`**${change.file}**:`);
|
||||
change.added.forEach(action => {
|
||||
actionSection.push(`- \`${action}\``);
|
||||
});
|
||||
});
|
||||
sections.push(actionSection.join('\n'));
|
||||
}
|
||||
|
||||
// Secrets Detected
|
||||
if (findings.secrets.length > 0) {
|
||||
const secretSection = ['## 🔑 Secrets Detected'];
|
||||
findings.secrets.forEach(secret => {
|
||||
const verified = secret.verified ? '🚨 **VERIFIED**' : '⚠️ Potential';
|
||||
secretSection.push(`- ${verified} ${secret.detector} in \`${secret.file}:${secret.line}\``);
|
||||
});
|
||||
sections.push(secretSection.join('\n'));
|
||||
}
|
||||
|
||||
// Security Vulnerabilities
|
||||
if (findings.vulnerabilities.length > 0) {
|
||||
const vulnSection = ['## ⚠️ Security Vulnerabilities'];
|
||||
const groupedBySeverity = findings.vulnerabilities.reduce((acc, vuln) => {
|
||||
const sev = vuln.severity.toUpperCase();
|
||||
if (!acc[sev]) acc[sev] = [];
|
||||
acc[sev].push(vuln);
|
||||
return acc;
|
||||
}, {});
|
||||
|
||||
['ERROR', 'WARNING', 'INFO'].forEach(severity => {
|
||||
if (groupedBySeverity[severity]) {
|
||||
vulnSection.push(`\n**${severity} Severity:**`);
|
||||
groupedBySeverity[severity].forEach(vuln => {
|
||||
vulnSection.push(`- \`${vuln.file}:${vuln.line}\` - ${vuln.message}`);
|
||||
vulnSection.push(` - Rule: \`${vuln.rule}\``);
|
||||
});
|
||||
}
|
||||
});
|
||||
sections.push(vulnSection.join('\n'));
|
||||
}
|
||||
|
||||
// Dependency Issues
|
||||
if (findings.dependencies.length > 0) {
|
||||
const depSection = ['## 📦 Dependency Vulnerabilities'];
|
||||
const groupedBySeverity = findings.dependencies.reduce((acc, dep) => {
|
||||
const sev = dep.severity.toUpperCase();
|
||||
if (!acc[sev]) acc[sev] = [];
|
||||
acc[sev].push(dep);
|
||||
return acc;
|
||||
}, {});
|
||||
|
||||
['CRITICAL', 'HIGH', 'MEDIUM', 'LOW'].forEach(severity => {
|
||||
if (groupedBySeverity[severity]) {
|
||||
depSection.push(`\n**${severity} Severity:**`);
|
||||
groupedBySeverity[severity].forEach(dep => {
|
||||
depSection.push(`- **${dep.cve}** in \`${dep.file}\``);
|
||||
depSection.push(` - ${dep.description.substring(0, 100)}...`);
|
||||
});
|
||||
}
|
||||
});
|
||||
sections.push(depSection.join('\n'));
|
||||
}
|
||||
|
||||
// Count critical issues for output
|
||||
const criticalCount =
|
||||
findings.secrets.filter(s => s.verified).length +
|
||||
(findings.vulnerabilities.filter(v => v.severity.toUpperCase() === 'ERROR').length || 0) +
|
||||
(findings.dependencies.filter(d => d.severity.toUpperCase() === 'CRITICAL').length || 0);
|
||||
|
||||
// Export critical count as output
|
||||
core.setOutput('critical_issues', criticalCount.toString());
|
||||
|
||||
// Generate final comment with unique marker
|
||||
let comment = `${SECURITY_COMMENT_MARKER}\n## ✅ Security Analysis\n\n`;
|
||||
if (sections.length === 0) {
|
||||
comment += 'No security issues detected in this PR.';
|
||||
} else {
|
||||
comment += sections.join('\n\n');
|
||||
}
|
||||
|
||||
// Find existing security comment using unique marker
|
||||
const { data: comments } = await github.rest.issues.listComments({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number
|
||||
});
|
||||
|
||||
const existingComment = comments.find(comment =>
|
||||
comment.body && comment.body.includes(SECURITY_COMMENT_MARKER)
|
||||
);
|
||||
|
||||
if (existingComment) {
|
||||
// Update existing comment
|
||||
await github.rest.issues.updateComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: issues.data[0].number,
|
||||
body: report,
|
||||
state: 'open'
|
||||
comment_id: existingComment.id,
|
||||
body: comment
|
||||
});
|
||||
} else {
|
||||
await github.rest.issues.create({
|
||||
// Create new comment
|
||||
await github.rest.issues.createComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
title: '🔒 Security Scan Report',
|
||||
body: report,
|
||||
labels: ['security-report', 'automated'],
|
||||
assignees: ['ivuorinen']
|
||||
issue_number: context.issue.number,
|
||||
body: comment
|
||||
});
|
||||
}
|
||||
|
||||
// Add summary to workflow
|
||||
await core.summary
|
||||
.addRaw(report)
|
||||
.write();
|
||||
|
||||
- name: Archive Results
|
||||
- name: Check Critical Issues
|
||||
if: always()
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
|
||||
env:
|
||||
CRITICAL_COUNT: ${{ steps.analyze.outputs.critical_issues || '0' }}
|
||||
with:
|
||||
name: security-results
|
||||
path: |
|
||||
reports/
|
||||
*.sarif
|
||||
security-results.json
|
||||
retention-days: 30
|
||||
script: |-
|
||||
const criticalCount = parseInt(process.env.CRITICAL_COUNT || '0', 10);
|
||||
|
||||
- name: Notify on Failure
|
||||
if: failure() && needs.check-secrets.outputs.run_slack == 'true'
|
||||
run: |
|
||||
curl -X POST -H 'Content-type: application/json' \
|
||||
--data '{"text":"❌ Security checks failed! Check the logs for details."}' \
|
||||
${{ secrets.SLACK_WEBHOOK }}
|
||||
if (criticalCount > 0) {
|
||||
core.setFailed(`Found ${criticalCount} critical security issue(s). Please review and address them before merging.`);
|
||||
} else {
|
||||
console.log('No critical security issues found.');
|
||||
}
|
||||
|
||||
2
.github/workflows/stale.yml
vendored
2
.github/workflows/stale.yml
vendored
@@ -25,7 +25,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: 🚀 Run stale
|
||||
uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9.1.0
|
||||
uses: actions/stale@997185467fa4f803885201cee163a9f38240193d # v10.1.1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
days-before-stale: 30
|
||||
|
||||
2
.github/workflows/sync-labels.yml
vendored
2
.github/workflows/sync-labels.yml
vendored
@@ -35,6 +35,6 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: ⤵️ Checkout Repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@71cf2267d89c5cb81562390fa70a37fa40b1305e # v6-beta
|
||||
- name: ⤵️ Sync Latest Labels Definitions
|
||||
uses: ./sync-labels
|
||||
|
||||
313
.github/workflows/test-actions.yml
vendored
Normal file
313
.github/workflows/test-actions.yml
vendored
Normal file
@@ -0,0 +1,313 @@
|
||||
---
|
||||
# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json
|
||||
name: Test GitHub Actions
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- '*/action.yml'
|
||||
- '_tests/**'
|
||||
- 'Makefile'
|
||||
- '.github/workflows/test-actions.yml'
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- '*/action.yml'
|
||||
- '_tests/**'
|
||||
- 'Makefile'
|
||||
- '.github/workflows/test-actions.yml'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
test-type:
|
||||
description: 'Type of tests to run'
|
||||
required: true
|
||||
default: 'all'
|
||||
type: choice
|
||||
options:
|
||||
- all
|
||||
- unit
|
||||
- integration
|
||||
action-filter:
|
||||
description: 'Filter tests by action name (optional)'
|
||||
required: false
|
||||
type: string
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
unit-tests:
|
||||
name: Unit Tests
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
actions: write
|
||||
security-events: write
|
||||
timeout-minutes: 10
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@71cf2267d89c5cb81562390fa70a37fa40b1305e # v6-beta
|
||||
|
||||
- name: Setup test environment
|
||||
uses: ./.github/actions/setup-test-environment
|
||||
|
||||
- name: Run unit tests
|
||||
shell: sh
|
||||
run: |
|
||||
if [ "${{ github.event.inputs.test-type }}" = "unit" ] || [ "${{ github.event.inputs.test-type }}" = "all" ] || [ -z "${{ github.event.inputs.test-type }}" ]; then
|
||||
if [ -n "${{ github.event.inputs.action-filter }}" ]; then
|
||||
make test-action ACTION="${{ github.event.inputs.action-filter }}"
|
||||
else
|
||||
make test-unit
|
||||
fi
|
||||
else
|
||||
echo "Skipping unit tests (test-type: ${{ github.event.inputs.test-type }})"
|
||||
fi
|
||||
|
||||
- name: Generate SARIF report
|
||||
shell: sh
|
||||
run: ./_tests/run-tests.sh --type unit --format sarif
|
||||
if: always()
|
||||
|
||||
- name: Upload SARIF file
|
||||
uses: github/codeql-action/upload-sarif@cf1bb45a277cb3c205638b2cd5c984db1c46a412 # v4.31.7
|
||||
if: always() && hashFiles('_tests/reports/test-results.sarif') != ''
|
||||
with:
|
||||
sarif_file: _tests/reports/test-results.sarif
|
||||
category: github-actions-tests
|
||||
|
||||
- name: Upload unit test results
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
if: always()
|
||||
with:
|
||||
name: unit-test-results
|
||||
path: _tests/reports/unit/
|
||||
retention-days: 7
|
||||
if-no-files-found: ignore
|
||||
|
||||
integration-tests:
|
||||
name: Integration Tests
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
actions: write
|
||||
timeout-minutes: 20
|
||||
if: github.event.inputs.test-type != 'unit'
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@71cf2267d89c5cb81562390fa70a37fa40b1305e # v6-beta
|
||||
|
||||
- name: Setup test environment
|
||||
uses: ./.github/actions/setup-test-environment
|
||||
with:
|
||||
install-act: 'true'
|
||||
|
||||
- name: Run integration tests
|
||||
shell: sh
|
||||
run: |
|
||||
if [ "${{ github.event.inputs.test-type }}" = "integration" ] || [ "${{ github.event.inputs.test-type }}" = "all" ] || [ -z "${{ github.event.inputs.test-type }}" ]; then
|
||||
if [ -n "${{ github.event.inputs.action-filter }}" ]; then
|
||||
./_tests/run-tests.sh --type integration --action "${{ github.event.inputs.action-filter }}"
|
||||
else
|
||||
make test-integration
|
||||
fi
|
||||
else
|
||||
echo "Skipping integration tests (test-type: ${{ github.event.inputs.test-type }})"
|
||||
fi
|
||||
|
||||
- name: Check for integration test reports
|
||||
id: check-integration-reports
|
||||
if: always()
|
||||
shell: sh
|
||||
run: |
|
||||
if [ -d "_tests/reports/integration" ] && [ -n "$(find _tests/reports/integration -type f 2>/dev/null)" ]; then
|
||||
printf '%s\n' "reports-found=true" >> "$GITHUB_OUTPUT"
|
||||
echo "Integration test reports found"
|
||||
else
|
||||
printf '%s\n' "reports-found=false" >> "$GITHUB_OUTPUT"
|
||||
echo "No integration test reports found"
|
||||
fi
|
||||
|
||||
- name: Upload integration test results
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
if: always() && steps.check-integration-reports.outputs.reports-found == 'true'
|
||||
with:
|
||||
name: integration-test-results
|
||||
path: _tests/reports/integration/
|
||||
retention-days: 7
|
||||
if-no-files-found: ignore
|
||||
|
||||
coverage:
|
||||
name: Test Coverage
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
actions: write
|
||||
issues: write
|
||||
pull-requests: write
|
||||
timeout-minutes: 15
|
||||
needs:
|
||||
- unit-tests
|
||||
if: (github.event_name == 'push' && github.ref == 'refs/heads/main') || github.event_name == 'pull_request'
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@71cf2267d89c5cb81562390fa70a37fa40b1305e # v6-beta
|
||||
|
||||
- name: Setup test environment
|
||||
uses: ./.github/actions/setup-test-environment
|
||||
with:
|
||||
install-kcov: 'true'
|
||||
|
||||
- name: Generate coverage report
|
||||
run: make test-coverage
|
||||
|
||||
- name: Upload coverage report
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: coverage-report
|
||||
path: _tests/coverage/
|
||||
retention-days: 30
|
||||
if-no-files-found: warn
|
||||
|
||||
- name: Comment coverage summary
|
||||
if: github.event_name == 'pull_request'
|
||||
shell: sh
|
||||
run: |
|
||||
if [ -f _tests/coverage/summary.json ]; then
|
||||
coverage=$(jq -r '.coverage_percent' _tests/coverage/summary.json)
|
||||
tested_actions=$(jq -r '.tested_actions' _tests/coverage/summary.json)
|
||||
total_actions=$(jq -r '.total_actions' _tests/coverage/summary.json)
|
||||
|
||||
cat > coverage_comment.md <<EOF
|
||||
## 📊 Test Coverage Report
|
||||
|
||||
- **Action Coverage**: ${coverage}% (${tested_actions}/${total_actions} actions)
|
||||
- **Generated**: $(date)
|
||||
|
||||
EOF
|
||||
|
||||
echo "Coverage: ${coverage}%"
|
||||
fi
|
||||
|
||||
- name: Post coverage comment
|
||||
if: github.event_name == 'pull_request' && hashFiles('coverage_comment.md') != ''
|
||||
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
script: |
|
||||
const fs = require('fs');
|
||||
const body = fs.readFileSync('coverage_comment.md', 'utf8');
|
||||
const { owner, repo } = context.repo;
|
||||
const issue_number = context.issue.number;
|
||||
// Create or update a sticky comment
|
||||
const marker = '<!-- coverage-comment -->';
|
||||
const list = await github.rest.issues.listComments({ owner, repo, issue_number });
|
||||
const existing = list.data.find(c => c.body && c.body.includes(marker));
|
||||
const finalBody = `${marker}\n` + body;
|
||||
if (existing) {
|
||||
await github.rest.issues.updateComment({ owner, repo, comment_id: existing.id, body: finalBody });
|
||||
} else {
|
||||
await github.rest.issues.createComment({ owner, repo, issue_number, body: finalBody });
|
||||
}
|
||||
|
||||
security-scan:
|
||||
name: Security Scan
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
timeout-minutes: 10
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@71cf2267d89c5cb81562390fa70a37fa40b1305e # v6-beta
|
||||
|
||||
- name: Setup test environment
|
||||
uses: ./.github/actions/setup-test-environment
|
||||
with:
|
||||
install-kcov: 'true'
|
||||
|
||||
- name: Scan for secrets
|
||||
uses: trufflesecurity/trufflehog@0f58ae7c5036094a1e3e750d18772af92821b503
|
||||
with:
|
||||
path: ./
|
||||
base: ${{ github.event_name == 'pull_request' && github.event.repository.default_branch || '' }}
|
||||
head: ${{ github.event_name == 'pull_request' && 'HEAD' || '' }}
|
||||
extra_args: --debug --only-verified
|
||||
|
||||
- name: Scan shell scripts
|
||||
shell: sh
|
||||
run: |
|
||||
# Scan all shell scripts in _tests/
|
||||
find _tests/ -name "*.sh" -exec shellcheck -x {} \; || {
|
||||
echo "❌ Shell script security issues found"
|
||||
exit 1
|
||||
}
|
||||
|
||||
echo "✅ Shell script security scan passed"
|
||||
|
||||
test-summary:
|
||||
name: Test Summary
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
actions: read # Required to download artifacts
|
||||
needs:
|
||||
- unit-tests
|
||||
- integration-tests
|
||||
if: always()
|
||||
|
||||
steps:
|
||||
- name: Download test results
|
||||
uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
|
||||
with:
|
||||
pattern: '*-test-results'
|
||||
merge-multiple: true
|
||||
path: test-results/
|
||||
|
||||
- name: Generate test summary
|
||||
shell: sh
|
||||
run: |
|
||||
{
|
||||
echo "## 🧪 Test Results Summary"
|
||||
echo ""
|
||||
|
||||
# Unit tests
|
||||
unit_count=$(find test-results -type f -path "*/unit/*.txt" | wc -l || true)
|
||||
if [ "${unit_count:-0}" -gt 0 ]; then
|
||||
echo "- **Unit Tests**: $unit_count action(s) tested"
|
||||
fi
|
||||
|
||||
# Integration tests
|
||||
integration_count=$(find test-results -type f -path "*/integration/*.txt" | wc -l || true)
|
||||
if [ "${integration_count:-0}" -gt 0 ]; then
|
||||
echo "- **Integration Tests**: $integration_count action(s) tested"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
unit_success="${{ needs.unit-tests.result == 'success' }}"
|
||||
integration_ok="${{ needs.integration-tests.result == 'success' || needs.integration-tests.result == 'skipped' }}"
|
||||
if [ "$unit_success" = "true" ] && [ "$integration_ok" = "true" ]; then
|
||||
status="✅ All tests passed"
|
||||
else
|
||||
status="❌ Some tests failed"
|
||||
fi
|
||||
echo "**Status**: $status"
|
||||
|
||||
# Job status details
|
||||
echo ""
|
||||
echo "### Job Details"
|
||||
echo "- Unit Tests: ${{ needs.unit-tests.result }}"
|
||||
echo "- Integration Tests: ${{ needs.integration-tests.result }}"
|
||||
} >> "$GITHUB_STEP_SUMMARY"
|
||||
|
||||
- name: Fail if tests failed
|
||||
if: needs.unit-tests.result == 'failure' || needs.integration-tests.result == 'failure'
|
||||
shell: sh
|
||||
run: |-
|
||||
echo "❌ One or more test jobs failed"
|
||||
exit 1
|
||||
123
.github/workflows/version-maintenance.yml
vendored
Normal file
123
.github/workflows/version-maintenance.yml
vendored
Normal file
@@ -0,0 +1,123 @@
|
||||
---
|
||||
name: Version Maintenance
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# Run weekly on Monday at 9 AM UTC
|
||||
- cron: '0 9 * * 1'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
major-version:
|
||||
description: 'Major version to check (e.g., v2025)'
|
||||
required: false
|
||||
type: string
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
issues: write
|
||||
|
||||
jobs:
|
||||
check-and-update:
|
||||
name: Check Version References
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout Repository
|
||||
uses: actions/checkout@71cf2267d89c5cb81562390fa70a37fa40b1305e # v6-beta
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Determine Major Version
|
||||
id: version
|
||||
shell: sh
|
||||
run: |
|
||||
if [ -n "${{ inputs.major-version }}" ]; then
|
||||
printf '%s\n' "major=${{ inputs.major-version }}" >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
current_year=$(date +%Y)
|
||||
printf '%s\n' "major=v$current_year" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
||||
- name: Run Action Versioning
|
||||
id: action-versioning
|
||||
uses: ./action-versioning
|
||||
with:
|
||||
major-version: ${{ steps.version.outputs.major }}
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Create Pull Request
|
||||
if: steps.action-versioning.outputs.updated == 'true'
|
||||
uses: peter-evans/create-pull-request@22a9089034f40e5a961c8808d113e2c98fb63676 # v7.0.11
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
commit-message: 'chore: update action references to ${{ steps.version.outputs.major }}'
|
||||
title: 'chore: Update action references to ${{ steps.version.outputs.major }}'
|
||||
body: |
|
||||
## Version Maintenance
|
||||
|
||||
This PR updates all internal action references to match the latest ${{ steps.version.outputs.major }} tag.
|
||||
|
||||
**Updated SHA**: `${{ steps.action-versioning.outputs.commit-sha }}`
|
||||
|
||||
### Changes
|
||||
- Updated all `*/action.yml` files to reference the current ${{ steps.version.outputs.major }} SHA
|
||||
|
||||
### Verification
|
||||
```bash
|
||||
make check-version-refs
|
||||
```
|
||||
branch: automated/version-update-${{ steps.version.outputs.major }}
|
||||
delete-branch: true
|
||||
labels: |
|
||||
automated
|
||||
dependencies
|
||||
|
||||
- name: Check for Annual Bump
|
||||
if: steps.action-versioning.outputs.needs-annual-bump == 'true'
|
||||
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
|
||||
with:
|
||||
script: |
|
||||
const currentYear = new Date().getFullYear();
|
||||
const majorVersion = '${{ steps.version.outputs.major }}';
|
||||
|
||||
await github.rest.issues.create({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
title: `🔄 Annual Version Bump Needed: ${majorVersion} → v${currentYear}`,
|
||||
body: `## Annual Version Bump Required
|
||||
|
||||
It's time to bump the major version from ${majorVersion} to v${currentYear}.
|
||||
|
||||
### Steps
|
||||
|
||||
1. **Create the new major version tag:**
|
||||
\`\`\`bash
|
||||
git tag -a v${currentYear} -m "Major version v${currentYear}"
|
||||
git push origin v${currentYear}
|
||||
\`\`\`
|
||||
|
||||
2. **Bump all action references:**
|
||||
\`\`\`bash
|
||||
make bump-major-version OLD=${majorVersion} NEW=v${currentYear}
|
||||
\`\`\`
|
||||
|
||||
3. **Update documentation:**
|
||||
\`\`\`bash
|
||||
make docs
|
||||
\`\`\`
|
||||
|
||||
4. **Commit and push:**
|
||||
\`\`\`bash
|
||||
git push origin main
|
||||
\`\`\`
|
||||
|
||||
### Verification
|
||||
|
||||
\`\`\`bash
|
||||
make check-version-refs
|
||||
\`\`\`
|
||||
`,
|
||||
labels: ['maintenance', 'high-priority']
|
||||
});
|
||||
198
.gitignore
vendored
198
.gitignore
vendored
@@ -1,137 +1,87 @@
|
||||
.php-cs-fixer.cache
|
||||
.php-cs-fixer.php
|
||||
composer.phar
|
||||
/vendor/
|
||||
.phpunit.result.cache
|
||||
.phpunit.cache
|
||||
/app/phpunit.xml
|
||||
/phpunit.xml
|
||||
/build/
|
||||
logs
|
||||
*.log
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
lerna-debug.log*
|
||||
.pnpm-debug.log*
|
||||
report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
|
||||
pids
|
||||
*.pid
|
||||
*.seed
|
||||
*.pid.lock
|
||||
lib-cov
|
||||
coverage
|
||||
*.iws
|
||||
*.lcov
|
||||
.nyc_output
|
||||
.grunt
|
||||
bower_components
|
||||
.lock-wscript
|
||||
build/Release
|
||||
node_modules/
|
||||
jspm_packages/
|
||||
web_modules/
|
||||
*.tsbuildinfo
|
||||
.npm
|
||||
.eslintcache
|
||||
.stylelintcache
|
||||
.rpt2_cache/
|
||||
.rts2_cache_cjs/
|
||||
.rts2_cache_es/
|
||||
.rts2_cache_umd/
|
||||
.node_repl_history
|
||||
*.log
|
||||
*.pem
|
||||
*.pid
|
||||
*.pid.lock
|
||||
*.seed
|
||||
*.tgz
|
||||
.yarn-integrity
|
||||
.env
|
||||
.env.development.local
|
||||
.env.test.local
|
||||
.env.production.local
|
||||
.env.local
|
||||
*.vim
|
||||
*~
|
||||
./update_*
|
||||
.DS_Store
|
||||
.cache
|
||||
.parcel-cache
|
||||
.next
|
||||
out
|
||||
.nuxt
|
||||
dist
|
||||
.cache/
|
||||
.vuepress/dist
|
||||
.temp
|
||||
.coverage
|
||||
.worktrees/
|
||||
.coverage.*
|
||||
.docusaurus
|
||||
.serverless/
|
||||
.fusebox/
|
||||
.dynamodb/
|
||||
.tern-port
|
||||
.vscode-test
|
||||
.yarn/cache
|
||||
.yarn/unplugged
|
||||
.yarn/build-state.yml
|
||||
.yarn/install-state.gz
|
||||
.env
|
||||
!.env.example
|
||||
!.env.sample
|
||||
.env*.local
|
||||
.env.development.local
|
||||
.env.local
|
||||
.env.production.local
|
||||
.env.test.local
|
||||
.eslintcache
|
||||
.fusebox/
|
||||
.idea/**/aws.xml
|
||||
.idea/**/contentModel.xml
|
||||
.idea/**/dataSources.ids
|
||||
.idea/**/dataSources.local.xml
|
||||
.idea/**/dataSources/
|
||||
.idea/**/dbnavigator.xml
|
||||
.idea/**/dictionaries
|
||||
.idea/**/dynamic.xml
|
||||
.idea/**/gradle.xml
|
||||
.idea/**/libraries
|
||||
.idea/**/mongoSettings.xml
|
||||
.idea/**/shelf
|
||||
.idea/**/sqlDataSources.xml
|
||||
.idea/**/tasks.xml
|
||||
.idea/**/uiDesigner.xml
|
||||
.idea/**/usage.statistics.xml
|
||||
.idea/**/workspace.xml
|
||||
.idea/caches/build_file_checksums.ser
|
||||
.idea/httpRequests
|
||||
.idea/replstate.xml
|
||||
.idea/sonarlint/
|
||||
.idea_modules/
|
||||
.netrwhist
|
||||
.next
|
||||
.node_repl_history
|
||||
.npm
|
||||
.nuxt
|
||||
.parcel-cache
|
||||
.pnp.*
|
||||
.pnp.js
|
||||
.temp
|
||||
.tern-port
|
||||
.vercel
|
||||
.yarn/*
|
||||
/.next/
|
||||
/.pnp
|
||||
/.vagrant
|
||||
/vendor/
|
||||
[._]*.s[a-v][a-z]
|
||||
!*.svg # comment out if you don't need vector files
|
||||
[._]*.sw[a-p]
|
||||
[._]*.un~
|
||||
[._]s[a-rt-v][a-z]
|
||||
[._]ss[a-gi-z]
|
||||
[._]sw[a-p]
|
||||
Session.vim
|
||||
Sessionx.vim
|
||||
.netrwhist
|
||||
*~
|
||||
tags
|
||||
[._]*.un~
|
||||
.idea/**/workspace.xml
|
||||
.idea/**/tasks.xml
|
||||
.idea/**/usage.statistics.xml
|
||||
.idea/**/dictionaries
|
||||
.idea/**/shelf
|
||||
.idea/**/aws.xml
|
||||
.idea/**/contentModel.xml
|
||||
.idea/**/dataSources/
|
||||
.idea/**/dataSources.ids
|
||||
.idea/**/dataSources.local.xml
|
||||
.idea/**/sqlDataSources.xml
|
||||
.idea/**/dynamic.xml
|
||||
.idea/**/uiDesigner.xml
|
||||
.idea/**/dbnavigator.xml
|
||||
.idea/**/gradle.xml
|
||||
.idea/**/libraries
|
||||
cmake-build-*/
|
||||
.idea/**/mongoSettings.xml
|
||||
*.iws
|
||||
out/
|
||||
.idea_modules/
|
||||
atlassian-ide-plugin.xml
|
||||
.idea/replstate.xml
|
||||
.idea/sonarlint/
|
||||
com_crashlytics_export_strings.xml
|
||||
crashlytics.properties
|
||||
crashlytics-build.properties
|
||||
fabric.properties
|
||||
.idea/httpRequests
|
||||
.idea/caches/build_file_checksums.ser
|
||||
npm-debug.log
|
||||
yarn-error.log
|
||||
bootstrap/compiled.php
|
||||
app/storage/
|
||||
public/storage
|
||||
public/hot
|
||||
public_html/storage
|
||||
public_html/hot
|
||||
storage/*.key
|
||||
Homestead.yaml
|
||||
Homestead.json
|
||||
/.vagrant
|
||||
/node_modules
|
||||
/.pnp
|
||||
.pnp.js
|
||||
/coverage
|
||||
/.next/
|
||||
/out/
|
||||
/build
|
||||
.DS_Store
|
||||
*.pem
|
||||
.env*.local
|
||||
.vercel
|
||||
next-env.d.ts
|
||||
|
||||
__pycache__
|
||||
_tests/.tmp
|
||||
_tests/coverage
|
||||
_tests/reports
|
||||
megalinter-reports/
|
||||
./update_*
|
||||
node_modules/
|
||||
out/
|
||||
reports/**/*.xml
|
||||
tags
|
||||
tests/reports/**/*.json
|
||||
!uv.lock
|
||||
code-scanning-report-*
|
||||
*.sarif
|
||||
TODO.md
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
[extend]
|
||||
useDefault = true
|
||||
|
||||
[allowlist]
|
||||
description = "Allowlisted files"
|
||||
paths = [
|
||||
@@ -6,16 +9,6 @@ paths = [
|
||||
'''dist''',
|
||||
'''yarn.lock''',
|
||||
'''package-lock.json''',
|
||||
'''pnpm-lock.yaml'''
|
||||
'''pnpm-lock.yaml''',
|
||||
'''_tests'''
|
||||
]
|
||||
|
||||
[rules]
|
||||
[rules.github-token]
|
||||
description = "GitHub Token"
|
||||
regex = '''ghp_[0-9a-zA-Z]{36}'''
|
||||
tags = ["token", "github"]
|
||||
|
||||
[rules.secrets]
|
||||
description = "Generic Secret Pattern"
|
||||
regex = '''(?i)(secret|token|key|password|cert)[\s]*[=:]\s*['"][^'"]*['"]'''
|
||||
tags = ["key", "secret"]
|
||||
|
||||
2
.markdownlintignore
Normal file
2
.markdownlintignore
Normal file
@@ -0,0 +1,2 @@
|
||||
node_modules/
|
||||
.worktrees/
|
||||
@@ -32,4 +32,4 @@ JAVASCRIPT_ES_CONFIG_FILE: .eslintrc.json
|
||||
TYPESCRIPT_ES_CONFIG_FILE: .eslintrc.json
|
||||
|
||||
FILTER_REGEX_EXCLUDE: >
|
||||
(node_modules|\.automation/test|docs/json-schemas)
|
||||
(node_modules|\.automation/test|docs/json-schemas|\.worktrees)
|
||||
|
||||
@@ -1,12 +1,33 @@
|
||||
---
|
||||
# Configure pre-commit to use uv for Python hooks
|
||||
# Pre-commit 3.6.0+ automatically detects and uses uv when available
|
||||
default_install_hook_types: [pre-commit, commit-msg]
|
||||
|
||||
repos:
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: generate-docs-format-lint
|
||||
name: Generate docs, format, and lint
|
||||
entry: bash -c 'make all'
|
||||
language: system
|
||||
pass_filenames: false
|
||||
types: [markdown, python, yaml]
|
||||
files: ^(docs/.*|README\.md|CONTRIBUTING\.md|CHANGELOG\.md|.*\.py|.*\.ya?ml)$
|
||||
- repo: https://github.com/astral-sh/uv-pre-commit
|
||||
rev: 0.9.13
|
||||
hooks:
|
||||
- id: uv-lock
|
||||
- id: uv-sync
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v6.0.0
|
||||
hooks:
|
||||
- id: requirements-txt-fixer
|
||||
- id: detect-private-key
|
||||
exclude: ^validate-inputs/validators/security\.py$
|
||||
- id: destroyed-symlinks
|
||||
- id: trailing-whitespace
|
||||
args: [--markdown-linebreak-ext=md]
|
||||
- id: check-ast
|
||||
- id: check-case-conflict
|
||||
- id: check-merge-conflict
|
||||
- id: check-executables-have-shebangs
|
||||
@@ -22,42 +43,54 @@ repos:
|
||||
- id: pretty-format-json
|
||||
args: [--autofix, --no-sort-keys]
|
||||
|
||||
- repo: https://github.com/igorshubovych/markdownlint-cli
|
||||
rev: v0.45.0
|
||||
- repo: https://github.com/DavidAnson/markdownlint-cli2
|
||||
rev: v0.19.1
|
||||
hooks:
|
||||
- id: markdownlint
|
||||
args: [-c, .markdownlint.json, --fix]
|
||||
- id: markdownlint-cli2
|
||||
args: [--fix]
|
||||
|
||||
- repo: https://github.com/adrienverge/yamllint
|
||||
rev: v1.37.1
|
||||
hooks:
|
||||
- id: yamllint
|
||||
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.14.7
|
||||
hooks:
|
||||
# Run the linter with auto-fix
|
||||
- id: ruff-check
|
||||
args: [--fix]
|
||||
# Run the formatter
|
||||
- id: ruff-format
|
||||
|
||||
- repo: https://github.com/scop/pre-commit-shfmt
|
||||
rev: v3.11.0-1
|
||||
rev: v3.12.0-2
|
||||
hooks:
|
||||
- id: shfmt
|
||||
args: ['--apply-ignore']
|
||||
exclude: '^_tests/.*\.sh$'
|
||||
|
||||
- repo: https://github.com/koalaman/shellcheck-precommit
|
||||
rev: v0.11.0
|
||||
- repo: https://github.com/shellcheck-py/shellcheck-py
|
||||
rev: v0.11.0.1
|
||||
hooks:
|
||||
- id: shellcheck
|
||||
args: ['--severity=warning']
|
||||
args: ['-x']
|
||||
exclude: '^_tests/.*\.sh$'
|
||||
|
||||
- repo: https://github.com/rhysd/actionlint
|
||||
rev: v1.7.7
|
||||
rev: v1.7.9
|
||||
hooks:
|
||||
- id: actionlint
|
||||
args: ['-shellcheck=']
|
||||
|
||||
- repo: https://github.com/renovatebot/pre-commit-hooks
|
||||
rev: 41.97.10
|
||||
hooks:
|
||||
- id: renovate-config-validator
|
||||
|
||||
- repo: https://github.com/bridgecrewio/checkov.git
|
||||
rev: '3.2.471'
|
||||
rev: '3.2.495'
|
||||
hooks:
|
||||
- id: checkov
|
||||
args:
|
||||
- '--quiet'
|
||||
|
||||
- repo: https://github.com/gitleaks/gitleaks
|
||||
rev: v8.30.0
|
||||
hooks:
|
||||
- id: gitleaks
|
||||
|
||||
3
.prettierignore
Normal file
3
.prettierignore
Normal file
@@ -0,0 +1,3 @@
|
||||
.github/renovate.json
|
||||
.venv
|
||||
.worktrees/
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
printWidth: 120
|
||||
printWidth: 200
|
||||
tabWidth: 2
|
||||
useTabs: false
|
||||
semi: true
|
||||
|
||||
1
.python-version
Normal file
1
.python-version
Normal file
@@ -0,0 +1 @@
|
||||
3.14.0
|
||||
1
.serena/.gitignore
vendored
Normal file
1
.serena/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
/cache
|
||||
384
.serena/memories/code_style_conventions.md
Normal file
384
.serena/memories/code_style_conventions.md
Normal file
@@ -0,0 +1,384 @@
|
||||
# Code Style and Conventions
|
||||
|
||||
## Critical Prevention Guidelines
|
||||
|
||||
1. **ALWAYS** add `id:` when step outputs will be referenced
|
||||
- Missing IDs cause `steps.*.outputs.*` to be undefined at runtime
|
||||
- Example: `id: detect-version` required before `steps.detect-version.outputs.version`
|
||||
|
||||
2. **ALWAYS** check tool availability before use
|
||||
- Not all tools (jq, bc, terraform) are available on all runner types
|
||||
- Pattern: `if command -v jq >/dev/null 2>&1; then ... else fallback; fi`
|
||||
|
||||
3. **ALWAYS** sanitize user input before writing to `$GITHUB_OUTPUT`
|
||||
- Malicious inputs with newlines can inject additional outputs
|
||||
- Use `printf '%s\n' "$value"` or heredoc instead of `echo "$value"`
|
||||
|
||||
4. **ALWAYS** pin external actions to commit SHAs, not branches
|
||||
- `@main` or `@v1` tags can change, breaking reproducibility
|
||||
- Use full SHA: `actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683`
|
||||
|
||||
5. **ALWAYS** quote shell variables to handle spaces
|
||||
- Unquoted variables cause word splitting and globbing
|
||||
- Example: `"$variable"` not `$variable`, `basename -- "$path"` not `basename $path`
|
||||
|
||||
6. **ALWAYS** use SHA-pinned references for internal actions in action.yml
|
||||
- Security: immutable, auditable, portable when used externally
|
||||
- Pattern: `uses: ivuorinen/actions/common-cache@7061aafd35a2f21b57653e34f2b634b2a19334a9`
|
||||
- Test workflows use local: `uses: ./common-cache` (within repo only)
|
||||
|
||||
7. **ALWAYS** test regex patterns against edge cases
|
||||
- Include prerelease tags (`1.0.0-rc.1`), build metadata (`1.0.0+build.123`)
|
||||
- Version validation should support full semver/calver formats
|
||||
|
||||
8. **ALWAYS** use POSIX shell (`set -eu`) for all scripts
|
||||
- Maximum portability: works on Alpine, busybox, all shells
|
||||
- Use `#!/bin/sh` not `#!/usr/bin/env bash`
|
||||
- Use `set -eu` not `set -euo pipefail` (pipefail not POSIX)
|
||||
|
||||
9. **Avoid** nesting `${{ }}` expressions inside quoted strings in specific contexts
|
||||
- In `hashFiles()`: `"${{ inputs.value }}"` breaks cache key generation - use unquoted or extract to variable
|
||||
- In most other contexts, quoting is required for safety (e.g., shell commands with spaces)
|
||||
- General rule: Quote for shell safety, unquote for YAML expressions in functions like hashFiles
|
||||
|
||||
10. **NEVER** assume tools are available across all runner types
|
||||
- macOS/Windows runners may lack Linux tools (jq, bc, specific GNU utils)
|
||||
- Always provide fallbacks or explicit installation steps
|
||||
|
||||
11. **NEVER** use `set-git-config` action - use direct git config or action parameters instead
|
||||
- Git-related actions (`peter-evans/create-pull-request`, `stefanzweifel/git-auto-commit-action`) handle their own auth
|
||||
- For direct git commands, configure git manually when needed: `git config user.name/user.email`
|
||||
- Pattern for actions with git-auto-commit:
|
||||
|
||||
```yaml
|
||||
- uses: stefanzweifel/git-auto-commit-action@SHA
|
||||
with:
|
||||
commit_user_name: ${{ inputs.username }}
|
||||
commit_user_email: ${{ inputs.email }}
|
||||
```
|
||||
|
||||
- Pattern for actions with direct git commands:
|
||||
|
||||
```yaml
|
||||
- shell: bash
|
||||
run: |
|
||||
git config user.name "${{ inputs.username }}"
|
||||
git config user.email "${{ inputs.email }}"
|
||||
git add .
|
||||
git commit -m "message"
|
||||
git push
|
||||
```
|
||||
|
||||
- Rationale: Avoids complexity, matches proven workflow pattern, no credential conflicts
|
||||
|
||||
## EditorConfig Rules (.editorconfig)
|
||||
|
||||
**CRITICAL**: EditorConfig violations are blocking errors and must be fixed always.
|
||||
|
||||
- **Charset**: UTF-8
|
||||
- **Line Endings**: LF (Unix style)
|
||||
- **Indentation**: 2 spaces globally
|
||||
- **Python override**: 4 spaces (`indent_size=4` for `*.py`)
|
||||
- **Makefile override**: Tabs (`indent_style=tab` for `Makefile`)
|
||||
- **Final Newline**: Required
|
||||
- **Max Line Length**: 200 characters (120 for Markdown)
|
||||
- **Trailing Whitespace**: Trimmed
|
||||
- **Tab Width**: 2 spaces
|
||||
|
||||
## Python Style (Ruff Configuration)
|
||||
|
||||
- **Target Version**: Python 3.8+
|
||||
- **Line Length**: 100 characters
|
||||
- **Indent Width**: 4 spaces
|
||||
- **Quote Style**: Double quotes
|
||||
- **Import Style**: isort with forced sorting within sections
|
||||
- **Docstring Convention**: Google style
|
||||
|
||||
### Enabled Rule Sets
|
||||
|
||||
Comprehensive linting with 30+ rule categories including:
|
||||
|
||||
- pycodestyle, Pyflakes, isort, pep8-naming
|
||||
- Security (bandit), bugbear, comprehensions
|
||||
- Performance optimizations, refactoring suggestions
|
||||
- Type checking, logging best practices
|
||||
|
||||
### Relaxed Rules for GitHub Actions Scripts
|
||||
|
||||
**Scope**: These relaxed rules apply ONLY to Python scripts running as GitHub Actions steps (composite action scripts). They override specific zero-tolerance rules for those files.
|
||||
|
||||
**Precedence**: For GitHub Actions scripts, allowed ignores take precedence over repository zero-tolerance rules; all other rules remain enforced.
|
||||
|
||||
**Allowed Ignore Codes**:
|
||||
|
||||
- `T201` - Allow print statements (GitHub Actions logging)
|
||||
- `S603`, `S607` - Allow subprocess calls (required for shell integration)
|
||||
- `S101` - Allow assert statements (validation assertions)
|
||||
- `BLE001` - Allow broad exception catches (workflow error handling)
|
||||
- `D103`, `D100` - Relaxed docstring requirements for simple scripts
|
||||
- `PLR0913` - Allow many function arguments (GitHub Actions input patterns)
|
||||
|
||||
**Example**: `# ruff: noqa: T201, S603` for action step scripts only
|
||||
|
||||
## Shell Script Standards (POSIX)
|
||||
|
||||
**ALL scripts use POSIX shell** (`#!/bin/sh`) for maximum portability.
|
||||
|
||||
### Required POSIX Compliance Checklist
|
||||
|
||||
- ✅ **Shebang**: `#!/bin/sh` (POSIX-compliant, not bash)
|
||||
- ✅ **Error Handling**: `set -eu` at script start (no pipefail - not POSIX)
|
||||
- ✅ **Defensive Expansion**: Use `${var:-default}` or `${var:?message}` patterns
|
||||
- ✅ **Quote Everything**: Always quote expansions: `"$var"`, `basename -- "$path"`
|
||||
- ✅ **Tool Availability**: `command -v tool >/dev/null 2>&1 || { echo "Missing tool"; exit 1; }`
|
||||
- ✅ **Portable Output**: Use `printf` instead of `echo -e`
|
||||
- ✅ **Portable Sourcing**: Use `. file` instead of `source file`
|
||||
- ✅ **POSIX Tests**: Use `[ ]` instead of `[[ ]]`
|
||||
- ✅ **Parsing**: Use `cut`, `grep`, pipes instead of here-strings `<<<`
|
||||
- ✅ **No Associative Arrays**: Use temp files or line-based processing
|
||||
|
||||
### Key POSIX Differences from Bash
|
||||
|
||||
| Bash Feature | POSIX Replacement |
|
||||
| --------------------- | --------------------------------- |
|
||||
| `#!/usr/bin/env bash` | `#!/bin/sh` |
|
||||
| `set -euo pipefail` | `set -eu` |
|
||||
| `[[ condition ]]` | `[ condition ]` |
|
||||
| `[[ $var =~ regex ]]` | `echo "$var" \| grep -qE 'regex'` |
|
||||
| `<<<` here-strings | `echo \| cut` or pipes |
|
||||
| `source file` | `. file` |
|
||||
| `$BASH_SOURCE` | `$0` |
|
||||
| `((var++))` | `var=$((var + 1))` |
|
||||
| `((var < 10))` | `[ "$var" -lt 10 ]` |
|
||||
| `echo -e` | `printf '%b'` |
|
||||
| `declare -A map` | temp files + sort/uniq |
|
||||
| Process substitution | pipes or temp files |
|
||||
|
||||
### Examples
|
||||
|
||||
```sh
|
||||
#!/bin/sh
|
||||
set -eu
|
||||
|
||||
# Defensive parameter expansion
|
||||
config_file="${CONFIG_FILE:-config.yml}" # Use default if unset
|
||||
required_param="${REQUIRED_PARAM:?Missing value}" # Error if unset
|
||||
|
||||
# Always quote expansions
|
||||
printf 'Processing: %s\n' "$config_file"
|
||||
result=$(basename -- "$file_path")
|
||||
|
||||
# POSIX test conditions
|
||||
if [ -f "$config_file" ]; then
|
||||
printf 'Found config\n'
|
||||
fi
|
||||
|
||||
# Portable output
|
||||
printf '%b' "Color: ${GREEN}text${NC}\n"
|
||||
```
|
||||
|
||||
### Why POSIX Shell
|
||||
|
||||
- **Portability**: Works on Alpine Linux, busybox, minimal containers, all POSIX shells
|
||||
- **Performance**: POSIX shells are lighter and faster than bash
|
||||
- **CI-Friendly**: Minimal dependencies, works everywhere
|
||||
- **Standards**: Follows POSIX best practices
|
||||
- **Compatibility**: Works with sh, dash, ash, bash, zsh
|
||||
|
||||
### Additional Requirements
|
||||
|
||||
- **Security**: All external actions SHA-pinned
|
||||
- **Token Authentication**: `${{ github.token }}` fallback pattern
|
||||
- **Validation**: shellcheck compliance required
|
||||
|
||||
## YAML/GitHub Actions Style
|
||||
|
||||
- **Indentation**: 2 spaces consistent with EditorConfig
|
||||
- **Token Security**: Proper GitHub expression syntax (unquoted when needed)
|
||||
- **Validation**: actionlint and yaml-lint compliance
|
||||
- **Documentation**: Auto-generated README.md via action-docs
|
||||
- **Expression Safety**: Never nest `${{ }}` inside quoted strings
|
||||
|
||||
### Least-Privilege Permissions
|
||||
|
||||
Always scope permissions to minimum required. Set at workflow, workflow_call, or job level:
|
||||
|
||||
```yaml
|
||||
permissions:
|
||||
contents: read # Default for most workflows
|
||||
packages: write # Only if publishing packages
|
||||
pull-requests: write # Only if commenting on PRs
|
||||
# Omit unused permissions
|
||||
```
|
||||
|
||||
**Use GitHub-provided token**: `${{ github.token }}` over PATs when possible
|
||||
|
||||
**Scoped secrets**: `${{ secrets.MY_SECRET }}` never hardcoded
|
||||
|
||||
### Expression Context Examples
|
||||
|
||||
```yaml
|
||||
# Secrets context (always quote in run steps)
|
||||
run: echo "${{ secrets.MY_SECRET }}" | tool
|
||||
|
||||
# Matrix context (quote when used as value)
|
||||
run: echo "Testing ${{ matrix.version }}"
|
||||
|
||||
# Needs context (access outputs from dependent jobs)
|
||||
run: echo "${{ needs.build.outputs.artifact-id }}"
|
||||
|
||||
# Steps context (access outputs from previous steps)
|
||||
uses: action@v1
|
||||
with:
|
||||
value: ${{ steps.build.outputs.version }} # No quotes in 'with'
|
||||
|
||||
# Conditional expressions (no quotes)
|
||||
if: github.event_name == 'push'
|
||||
|
||||
# NEVER interpolate untrusted input into expressions
|
||||
# ❌ WRONG: run: echo "${{ github.event.issue.title }}" # Injection risk
|
||||
# ✅ RIGHT: Use env var: env: TITLE: ${{ github.event.issue.title }}
|
||||
```
|
||||
|
||||
**Quoting Rules**:
|
||||
|
||||
- Quote in `run:` steps when embedding in shell strings
|
||||
- Don't quote in `with:`, `env:`, `if:` - GitHub evaluates these
|
||||
- Never nest expressions: `"${{ inputs.value }}"` inside hashFiles breaks caching
|
||||
|
||||
### Internal Action References (SHA-Pinned)
|
||||
|
||||
**CRITICAL**: Action files (`*/action.yml`) use SHA-pinned references for security:
|
||||
|
||||
- ✅ **CORRECT**: `uses: ivuorinen/actions/action-name@7061aafd35a2f21b57653e34f2b634b2a19334a9`
|
||||
- ❌ **INCORRECT**: `uses: ./action-name` (security risk, not portable when used externally)
|
||||
- ❌ **INCORRECT**: `uses: ivuorinen/actions/action-name@main` (floating reference)
|
||||
|
||||
**Rationale**:
|
||||
|
||||
- **Security**: Immutable, auditable references
|
||||
- **Reproducibility**: Exact version control
|
||||
- **Portability**: Works when actions used externally (e.g., `ivuorinen/f2b` using `ivuorinen/actions/pr-lint`)
|
||||
- **Prevention**: No accidental version drift
|
||||
|
||||
**Test Workflows Exception**:
|
||||
|
||||
Test workflows in `_tests/` use local references since they run within the repo:
|
||||
|
||||
```yaml
|
||||
# ✅ Test workflows only
|
||||
uses: ./validate-inputs
|
||||
```
|
||||
|
||||
### External Action References (SHA-Pinned)
|
||||
|
||||
```yaml
|
||||
# ✅ Correct - SHA-pinned
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
|
||||
# ❌ Incorrect - floating reference
|
||||
uses: actions/checkout@main
|
||||
uses: actions/checkout@v4
|
||||
```
|
||||
|
||||
### Step Output References
|
||||
|
||||
**CRITICAL**: Steps must have `id:` to reference their outputs:
|
||||
|
||||
```yaml
|
||||
# ❌ INCORRECT - missing id
|
||||
- name: Detect Version
|
||||
uses: ivuorinen/actions/version-detect@<SHA>
|
||||
|
||||
- name: Setup
|
||||
with:
|
||||
version: ${{ steps.detect-version.outputs.version }} # UNDEFINED!
|
||||
|
||||
# ✅ CORRECT - id present
|
||||
- name: Detect Version
|
||||
id: detect-version # Required for output reference
|
||||
uses: ivuorinen/actions/version-detect@<SHA>
|
||||
|
||||
- name: Setup
|
||||
with:
|
||||
version: ${{ steps.detect-version.outputs.version }} # Works
|
||||
```
|
||||
|
||||
## Security Standards
|
||||
|
||||
- **No Secrets**: Never commit secrets or keys to repository
|
||||
- **No Logging**: Never expose or log secrets/keys in code
|
||||
- **SHA Pinning**: All action references (internal + external) use SHA commits, not tags
|
||||
- **Input Validation**: All actions import from shared validation library (`validate-inputs/`) - stateless validation functions, no inter-action dependencies
|
||||
- **Output Sanitization**: Use `printf` or heredoc for `$GITHUB_OUTPUT` writes
|
||||
- **Injection Prevention**: Validate inputs for command injection patterns (`;`, `&&`, `|`, backticks)
|
||||
|
||||
## Naming Conventions
|
||||
|
||||
- **Actions**: kebab-case directory names (e.g., `node-setup`, `docker-build`)
|
||||
- **Files**: kebab-case for action files, snake_case for Python modules
|
||||
- **Variables**: snake_case in Python, kebab-case in YAML
|
||||
- **Functions**: snake_case in Python, descriptive names in shell
|
||||
|
||||
## Quality Gates
|
||||
|
||||
- **Linting**: Zero tolerance - all linting errors are blocking
|
||||
- **Testing**: Comprehensive test coverage required
|
||||
- **Documentation**: Auto-generated and maintained
|
||||
- **Validation**: All inputs validated via shared utility library imports (actions remain self-contained)
|
||||
|
||||
## Development Patterns
|
||||
|
||||
- **Self-Contained Actions**: No cross-dependencies between actions
|
||||
- **Modular Composition**: Actions achieve functionality through composition
|
||||
- **Convention-Based**: Automatic rule generation based on input naming patterns
|
||||
- **Error Handling**: Comprehensive error messages and proper exit codes
|
||||
- **Defensive Programming**: Check tool availability, validate inputs, handle edge cases
|
||||
- **POSIX Compliance**: All scripts portable across POSIX shells
|
||||
|
||||
## Pre-commit and Security Configuration
|
||||
|
||||
### Pre-commit Hooks (.pre-commit-config.yaml)
|
||||
|
||||
Comprehensive tooling with 11 different integrations:
|
||||
|
||||
**Local Integration**:
|
||||
|
||||
- `generate-docs-format-lint`: Runs `make all` for comprehensive project maintenance
|
||||
|
||||
**Core Quality Checks** (pre-commit-hooks v6.0.0):
|
||||
|
||||
- File integrity: trailing whitespace, end-of-file-fixer, mixed line endings
|
||||
- Syntax validation: check-ast, check-yaml (multiple documents), check-toml, check-xml
|
||||
- Security: detect-private-key, executable shebangs
|
||||
- JSON formatting: pretty-format-json with autofix
|
||||
|
||||
**Language-Specific Linting**:
|
||||
|
||||
- **Markdown**: markdownlint v0.45.0 with auto-fix
|
||||
- **YAML**: yamllint v1.37.1 for validation
|
||||
- **Python**: ruff v0.13.0 for linting (with fix) and formatting
|
||||
- **Shell**: shfmt v3.12.0-2 and shellcheck v0.11.0 (exclude `_tests/`)
|
||||
|
||||
**Infrastructure Tools**:
|
||||
|
||||
- **GitHub Actions**: actionlint v1.7.7 for workflow validation
|
||||
- **Renovate**: renovate-config-validator v41.113.3
|
||||
- **Security**: checkov v3.2.471 (quiet mode), gitleaks v8.28.0
|
||||
|
||||
### Gitleaks Configuration (.gitleaks.toml)
|
||||
|
||||
**Secret Detection**:
|
||||
|
||||
- Uses default gitleaks rules with smart exclusions
|
||||
- Allowlisted paths: `node_modules`, `.git`, `dist`, lock files, `_tests`
|
||||
- Dual-layer security with both pre-commit-hooks and gitleaks
|
||||
- Test exclusion prevents false positives from test fixtures
|
||||
|
||||
### Test Compatibility
|
||||
|
||||
**ShellSpec Integration**:
|
||||
|
||||
- Shell linting tools (shfmt, shellcheck) exclude `_tests/` directory
|
||||
- Prevents conflicts with ShellSpec test framework syntax
|
||||
- Maintains code quality while preserving test functionality
|
||||
201
.serena/memories/development_standards.md
Normal file
201
.serena/memories/development_standards.md
Normal file
@@ -0,0 +1,201 @@
|
||||
# Development Standards & Workflows
|
||||
|
||||
## Quality Standards (ZERO TOLERANCE)
|
||||
|
||||
### Production Ready Criteria
|
||||
|
||||
- ALL tests pass (100% success rate)
|
||||
- ALL linting passes (zero issues)
|
||||
- ALL validation checks pass
|
||||
- NO warnings or errors
|
||||
|
||||
### Communication
|
||||
|
||||
- Direct, factual only
|
||||
- Never claim "production ready" until literally everything passes
|
||||
- No hype, buzzwords, or excessive enthusiasm
|
||||
|
||||
## Required Commands
|
||||
|
||||
### Development Cycle
|
||||
|
||||
```bash
|
||||
make all # Complete: docs, format, lint, test
|
||||
make dev # Format + lint (development)
|
||||
make lint # All linters (MUST pass 100%)
|
||||
make test # All tests (MUST pass 100%)
|
||||
make format # Auto-fix formatting
|
||||
```
|
||||
|
||||
### Task Completion Checklist
|
||||
|
||||
After ANY coding task:
|
||||
|
||||
- [ ] `make lint` - Fix all issues (blocking)
|
||||
- [ ] `make test` - Ensure 100% pass
|
||||
- [ ] EditorConfig compliance verified
|
||||
|
||||
### Validation System
|
||||
|
||||
```bash
|
||||
make update-validators # Generate validation rules
|
||||
make update-validators-dry # Preview changes
|
||||
make generate-tests # Create missing tests
|
||||
make generate-tests-dry # Preview test generation
|
||||
```
|
||||
|
||||
### Version Management
|
||||
|
||||
```bash
|
||||
make release [VERSION=vYYYY.MM.DD] # Create new release (auto-generates version from date if omitted)
|
||||
make update-version-refs MAJOR=vYYYY # Update refs to version
|
||||
make bump-major-version OLD=vYYYY NEW=vYYYY # Annual bump
|
||||
make check-version-refs # Verify current refs
|
||||
```
|
||||
|
||||
See `versioning_system` memory for complete details.
|
||||
|
||||
## Code Style
|
||||
|
||||
### EditorConfig (BLOCKING ERRORS)
|
||||
|
||||
- **Indent**: 2 spaces (4 for Python, tabs for Makefile)
|
||||
- **Charset**: UTF-8
|
||||
- **Line Endings**: LF
|
||||
- **Max Line**: 200 chars (120 for Markdown)
|
||||
- **Final Newline**: Required
|
||||
- **Trailing Whitespace**: Trimmed
|
||||
|
||||
### Shell Scripts (POSIX REQUIRED)
|
||||
|
||||
**ALL scripts use POSIX shell** (`#!/bin/sh`) for maximum portability:
|
||||
|
||||
```bash
|
||||
#!/bin/sh
|
||||
set -eu # MANDATORY (no pipefail - not POSIX)
|
||||
# Quote everything: "$variable", basename -- "$path"
|
||||
# Check tools: command -v jq >/dev/null 2>&1
|
||||
# Use printf instead of echo -e for portability
|
||||
```
|
||||
|
||||
**Why POSIX:**
|
||||
|
||||
- Works on Alpine Linux, busybox, minimal containers
|
||||
- Faster than bash
|
||||
- Maximum compatibility (sh, dash, ash, bash, zsh)
|
||||
- CI-friendly, minimal dependencies
|
||||
|
||||
**Key Differences from Bash:**
|
||||
|
||||
- Use `#!/bin/sh` not `#!/usr/bin/env bash`
|
||||
- Use `set -eu` not `set -euo pipefail` (pipefail not POSIX)
|
||||
- Use `[ ]` not `[[ ]]`
|
||||
- Use `printf` not `echo -e`
|
||||
- Use `. file` not `source file`
|
||||
- Use `cut`/`grep` for parsing, not here-strings `<<<`
|
||||
- Use temp files instead of associative arrays
|
||||
- Use `$0` not `$BASH_SOURCE`
|
||||
|
||||
### Python (Ruff)
|
||||
|
||||
- **Line Length**: 100 chars
|
||||
- **Indent**: 4 spaces
|
||||
- **Quotes**: Double
|
||||
- **Docstrings**: Google style
|
||||
- **Type Hints**: Required
|
||||
|
||||
### YAML/Actions
|
||||
|
||||
- **Indent**: 2 spaces
|
||||
- **Internal Actions (action.yml)**: `ivuorinen/actions/action-name@<SHA>` (SHA-pinned, security)
|
||||
- **Test Workflows**: `./action-name` (local reference, runs within repo)
|
||||
- **Internal Workflows**: `./action-name` (local reference for sync-labels.yml etc)
|
||||
- **External Actions**: SHA-pinned (not `@main`/`@v1`)
|
||||
- **Step IDs**: Required when outputs referenced
|
||||
- **Permissions**: Minimal scope (contents: read default)
|
||||
- **Output Sanitization**: Use `printf`, never `echo` for `$GITHUB_OUTPUT`
|
||||
|
||||
## Versioning System
|
||||
|
||||
### Internal References (SHA-Pinned)
|
||||
|
||||
All `*/action.yml` files use SHA-pinned references for security and reproducibility:
|
||||
|
||||
```yaml
|
||||
uses: ivuorinen/actions/validate-inputs@7061aafd35a2f21b57653e34f2b634b2a19334a9
|
||||
```
|
||||
|
||||
**Why SHA-pinned internally:**
|
||||
|
||||
- Security: immutable, auditable references
|
||||
- Reproducibility: exact version control
|
||||
- Portability: works when actions used externally
|
||||
- Prevention: no accidental version drift
|
||||
|
||||
### Test Workflows (Local References)
|
||||
|
||||
Test workflows in `_tests/` use local references:
|
||||
|
||||
```yaml
|
||||
uses: ./validate-inputs
|
||||
```
|
||||
|
||||
**Why local in tests:** Tests run within the repo, faster, simpler
|
||||
|
||||
### External User References
|
||||
|
||||
Users reference with version tags:
|
||||
|
||||
```yaml
|
||||
uses: ivuorinen/actions/validate-inputs@v2025
|
||||
```
|
||||
|
||||
### Version Format (CalVer)
|
||||
|
||||
- Major: `v2025` (year)
|
||||
- Minor: `v2025.10` (year.month)
|
||||
- Patch: `v2025.10.18` (year.month.day)
|
||||
|
||||
All three tags point to the same commit SHA.
|
||||
|
||||
### Creating Releases
|
||||
|
||||
```bash
|
||||
make release # Auto-generates vYYYY.MM.DD from today's date
|
||||
make release VERSION=v2025.10.18 # Specific version
|
||||
git push origin main --tags --force-with-lease
|
||||
```
|
||||
|
||||
## Security Requirements
|
||||
|
||||
1. **SHA Pinning**: All action references use commit SHAs (not moving tags)
|
||||
2. **Token Safety**: `${{ github.token }}`, never hardcoded
|
||||
3. **Input Validation**: All inputs validated via centralized system
|
||||
4. **Output Sanitization**: `printf '%s\n' "$value" >> $GITHUB_OUTPUT`
|
||||
5. **Injection Prevention**: Validate for `;`, `&&`, `|`, backticks
|
||||
6. **Tool Availability**: `command -v tool` checks before use
|
||||
7. **Variable Quoting**: Always `"$var"` in shell
|
||||
8. **No Secrets**: Never commit credentials/keys
|
||||
|
||||
## Never Do
|
||||
|
||||
- Never `git commit` (manual commits not allowed)
|
||||
- Never use `--no-verify` flags
|
||||
- Never modify linting config to make tests pass
|
||||
- Never assume linting issues are acceptable
|
||||
- Never skip testing after changes
|
||||
- Never create files unless absolutely necessary
|
||||
- Never nest `${{ }}` in quoted YAML strings (breaks hashFiles)
|
||||
- Never use `@main` for internal action references (use SHA-pinned)
|
||||
- Never use bash-specific features (scripts must be POSIX sh)
|
||||
|
||||
## Preferred Patterns
|
||||
|
||||
- POSIX shell for all scripts (not bash)
|
||||
- SHA-pinned internal action references (security)
|
||||
- Edit existing files over creating new ones
|
||||
- Use centralized validation for all input handling
|
||||
- Follow existing conventions in codebase
|
||||
- Actions use composition, not dependencies
|
||||
- Custom validators in action directories
|
||||
- Convention-based automatic detection
|
||||
101
.serena/memories/documentation_guide.md
Normal file
101
.serena/memories/documentation_guide.md
Normal file
@@ -0,0 +1,101 @@
|
||||
# Documentation Guide
|
||||
|
||||
## Documentation Locations
|
||||
|
||||
### Validation System Docs (`validate-inputs/docs/`)
|
||||
|
||||
Read when working with validators or validation logic:
|
||||
|
||||
**API.md** - Complete API reference
|
||||
|
||||
- BaseValidator methods and properties
|
||||
- Core validators (Boolean, Version, Token, Numeric, Docker, File, Network, Security, CodeQL)
|
||||
- Registry system usage
|
||||
- Custom validator patterns
|
||||
- Convention system
|
||||
|
||||
**DEVELOPER_GUIDE.md** - Creating new validators
|
||||
|
||||
- Quick start guide
|
||||
- Creating core validators (in validators/ directory)
|
||||
- Creating custom validators (in action directories)
|
||||
- Adding convention patterns
|
||||
- Writing tests, debugging, common patterns
|
||||
|
||||
**ACTION_MAINTAINER.md** - Using validation in actions
|
||||
|
||||
- How validation works (automatic integration)
|
||||
- Validation flow (input collection, validator selection, execution, error reporting)
|
||||
- Using automatic validation via conventions
|
||||
- Custom validation for complex scenarios
|
||||
- Testing validation, common scenarios, troubleshooting
|
||||
|
||||
**README_ARCHITECTURE.md** - System architecture
|
||||
|
||||
- Feature overview
|
||||
- Quick start examples
|
||||
- Architecture details
|
||||
- Modular validator structure
|
||||
- Convention-based detection
|
||||
- Custom validator support
|
||||
|
||||
### Testing Framework (`_tests/README.md`)
|
||||
|
||||
Read when writing or debugging tests:
|
||||
|
||||
- ShellSpec framework overview
|
||||
- Multi-level testing strategy (unit, integration, external usage)
|
||||
- Directory structure explanation
|
||||
- Test writing patterns
|
||||
- Running tests (`make test`, `make test-unit`, `make test-action ACTION=name`)
|
||||
- Coverage reporting
|
||||
- Mocking and fixtures
|
||||
- CI integration
|
||||
|
||||
### Docker Testing Tools (`_tools/docker-testing-tools/README.md`)
|
||||
|
||||
Read when working with CI or testing infrastructure:
|
||||
|
||||
- Pre-built Docker image with all testing tools
|
||||
- Pre-installed tools (ShellSpec, nektos/act, TruffleHog, actionlint, etc.)
|
||||
- Building locally (build.sh, test.sh)
|
||||
- Performance benefits (saves ~3 minutes per run)
|
||||
- Multi-stage build process
|
||||
- Usage in workflows
|
||||
|
||||
### Top-Level Documentation
|
||||
|
||||
**README.md** - Main project readme (auto-generated)
|
||||
|
||||
- Actions catalog
|
||||
- Usage examples
|
||||
- Quick reference
|
||||
|
||||
**SECURITY.md** - Security policy
|
||||
|
||||
- Reporting vulnerabilities
|
||||
- Security practices
|
||||
|
||||
**LICENSE.md** - MIT license
|
||||
|
||||
**CLAUDE.md** - Project instructions (covered in development_standards memory)
|
||||
|
||||
## When to Read What
|
||||
|
||||
**Starting new validator work**: Read `DEVELOPER_GUIDE.md`, then `API.md` for reference
|
||||
|
||||
**Using validation in action**: Read `ACTION_MAINTAINER.md`
|
||||
|
||||
**Understanding architecture**: Read `README_ARCHITECTURE.md`
|
||||
|
||||
**Writing tests**: Read `_tests/README.md`
|
||||
|
||||
**Setting up CI/testing**: Read `_tools/docker-testing-tools/README.md`
|
||||
|
||||
**API reference lookup**: Read `API.md` (has method tables, validator details)
|
||||
|
||||
## Documentation is Auto-Generated
|
||||
|
||||
- Action READMEs generated via `action-docs` (don't edit manually)
|
||||
- Validation system README auto-generated
|
||||
- Keep CLAUDE.md and docs/ files updated manually
|
||||
318
.serena/memories/github-workflow-commands.md
Normal file
318
.serena/memories/github-workflow-commands.md
Normal file
@@ -0,0 +1,318 @@
|
||||
# GitHub Actions Workflow Commands
|
||||
|
||||
Comprehensive reference for GitHub Actions workflow commands in bash.
|
||||
|
||||
## Basic Syntax
|
||||
|
||||
```bash
|
||||
::workflow-command parameter1={data},parameter2={data}::{command value}
|
||||
```
|
||||
|
||||
- Commands are case-insensitive
|
||||
- Works in Bash and PowerShell
|
||||
- Use UTF-8 encoding
|
||||
- Environment variables are case-sensitive
|
||||
|
||||
## Setting Outputs
|
||||
|
||||
**Syntax:**
|
||||
|
||||
```bash
|
||||
echo "{name}={value}" >> "$GITHUB_OUTPUT"
|
||||
```
|
||||
|
||||
**Multiline values:**
|
||||
|
||||
```bash
|
||||
{
|
||||
echo 'JSON_RESPONSE<<EOF'
|
||||
echo "$response"
|
||||
echo EOF
|
||||
} >> "$GITHUB_OUTPUT"
|
||||
```
|
||||
|
||||
**Example:**
|
||||
|
||||
```bash
|
||||
echo "action_fruit=strawberry" >> "$GITHUB_OUTPUT"
|
||||
```
|
||||
|
||||
## Setting Environment Variables
|
||||
|
||||
**Syntax:**
|
||||
|
||||
```bash
|
||||
echo "{name}={value}" >> "$GITHUB_ENV"
|
||||
```
|
||||
|
||||
**Multiline values:**
|
||||
|
||||
```bash
|
||||
{
|
||||
echo 'MY_VAR<<EOF'
|
||||
echo "line 1"
|
||||
echo "line 2"
|
||||
echo EOF
|
||||
} >> "$GITHUB_ENV"
|
||||
```
|
||||
|
||||
**Example:**
|
||||
|
||||
```bash
|
||||
echo "BUILD_DATE=$(date +%Y-%m-%d)" >> "$GITHUB_ENV"
|
||||
```
|
||||
|
||||
## Adding to System PATH
|
||||
|
||||
**Syntax:**
|
||||
|
||||
```bash
|
||||
echo "{path}" >> "$GITHUB_PATH"
|
||||
```
|
||||
|
||||
**Example:**
|
||||
|
||||
```bash
|
||||
echo "$HOME/.local/bin" >> "$GITHUB_PATH"
|
||||
```
|
||||
|
||||
## Logging Commands
|
||||
|
||||
### Debug Message
|
||||
|
||||
```bash
|
||||
::debug::{message}
|
||||
```
|
||||
|
||||
Only visible when debug logging is enabled.
|
||||
|
||||
### Notice Message
|
||||
|
||||
```bash
|
||||
::notice file={name},line={line},col={col},endColumn={endColumn},title={title}::{message}
|
||||
```
|
||||
|
||||
Parameters (all optional):
|
||||
|
||||
- `file`: Filename
|
||||
- `line`: Line number
|
||||
- `col`: Column number
|
||||
- `endColumn`: End column number
|
||||
- `title`: Custom title
|
||||
|
||||
**Example:**
|
||||
|
||||
```bash
|
||||
echo "::notice file=app.js,line=42,col=5,endColumn=7::Variable 'x' is deprecated"
|
||||
```
|
||||
|
||||
### Warning Message
|
||||
|
||||
```bash
|
||||
::warning file={name},line={line},col={col},endColumn={endColumn},title={title}::{message}
|
||||
```
|
||||
|
||||
Same parameters as notice.
|
||||
|
||||
**Example:**
|
||||
|
||||
```bash
|
||||
echo "::warning::Missing semicolon"
|
||||
echo "::warning file=config.yml,line=10::Using deprecated syntax"
|
||||
```
|
||||
|
||||
### Error Message
|
||||
|
||||
```bash
|
||||
::error file={name},line={line},col={col},endColumn={endColumn},title={title}::{message}
|
||||
```
|
||||
|
||||
Same parameters as notice/warning.
|
||||
|
||||
**Example:**
|
||||
|
||||
```bash
|
||||
echo "::error::Build failed"
|
||||
echo "::error file=test.sh,line=15::Syntax error detected"
|
||||
```
|
||||
|
||||
## Grouping Log Lines
|
||||
|
||||
Collapsible log sections in the GitHub Actions UI.
|
||||
|
||||
**Syntax:**
|
||||
|
||||
```bash
|
||||
::group::{title}
|
||||
# commands here
|
||||
::endgroup::
|
||||
```
|
||||
|
||||
**Example:**
|
||||
|
||||
```bash
|
||||
echo "::group::Installing dependencies"
|
||||
npm install
|
||||
echo "::endgroup::"
|
||||
```
|
||||
|
||||
## Masking Secrets
|
||||
|
||||
Prevents values from appearing in logs.
|
||||
|
||||
**Syntax:**
|
||||
|
||||
```bash
|
||||
::add-mask::{value}
|
||||
```
|
||||
|
||||
**Example:**
|
||||
|
||||
```bash
|
||||
SECRET_TOKEN="abc123xyz"
|
||||
echo "::add-mask::$SECRET_TOKEN"
|
||||
echo "Token is: $SECRET_TOKEN" # Will show: Token is: ***
|
||||
```
|
||||
|
||||
## Stopping and Resuming Commands
|
||||
|
||||
Temporarily disable workflow command processing.
|
||||
|
||||
**Stop:**
|
||||
|
||||
```bash
|
||||
::stop-commands::{endtoken}
|
||||
```
|
||||
|
||||
**Resume:**
|
||||
|
||||
```bash
|
||||
::{endtoken}::
|
||||
```
|
||||
|
||||
**Example:**
|
||||
|
||||
```bash
|
||||
STOP_TOKEN=$(uuidgen)
|
||||
echo "::stop-commands::$STOP_TOKEN"
|
||||
echo "::warning::This won't be processed"
|
||||
echo "::$STOP_TOKEN::"
|
||||
echo "::notice::Commands resumed"
|
||||
```
|
||||
|
||||
## Echoing Command Output
|
||||
|
||||
Control whether action commands are echoed to the log.
|
||||
|
||||
**Enable:**
|
||||
|
||||
```bash
|
||||
::echo::on
|
||||
```
|
||||
|
||||
**Disable:**
|
||||
|
||||
```bash
|
||||
::echo::off
|
||||
```
|
||||
|
||||
## Job Summaries
|
||||
|
||||
Create Markdown summaries visible in the Actions UI.
|
||||
|
||||
**Syntax:**
|
||||
|
||||
```bash
|
||||
echo "{markdown content}" >> "$GITHUB_STEP_SUMMARY"
|
||||
```
|
||||
|
||||
**Example:**
|
||||
|
||||
```bash
|
||||
echo "### Test Results :rocket:" >> "$GITHUB_STEP_SUMMARY"
|
||||
echo "- Tests passed: 42" >> "$GITHUB_STEP_SUMMARY"
|
||||
echo "- Tests failed: 0" >> "$GITHUB_STEP_SUMMARY"
|
||||
```
|
||||
|
||||
**Multiline:**
|
||||
|
||||
```bash
|
||||
cat << 'EOF' >> "$GITHUB_STEP_SUMMARY"
|
||||
## Deployment Summary
|
||||
|
||||
| Environment | Status |
|
||||
|-------------|--------|
|
||||
| Staging | ✅ |
|
||||
| Production | ✅ |
|
||||
EOF
|
||||
```
|
||||
|
||||
## Common Patterns
|
||||
|
||||
### Set multiple outputs
|
||||
|
||||
```bash
|
||||
{
|
||||
echo "version=$(cat version.txt)"
|
||||
echo "build_date=$(date -u +%Y-%m-%dT%H:%M:%SZ)"
|
||||
echo "commit_sha=$GITHUB_SHA"
|
||||
} >> "$GITHUB_OUTPUT"
|
||||
```
|
||||
|
||||
### Conditional error with file annotation
|
||||
|
||||
```bash
|
||||
if ! npm test; then
|
||||
echo "::error file=tests/unit.test.js,line=23::Test suite failed"
|
||||
exit 1
|
||||
fi
|
||||
```
|
||||
|
||||
### Grouped logging with error handling
|
||||
|
||||
```bash
|
||||
echo "::group::Build application"
|
||||
if make build; then
|
||||
echo "::notice::Build completed successfully"
|
||||
else
|
||||
echo "::error::Build failed"
|
||||
exit 1
|
||||
fi
|
||||
echo "::endgroup::"
|
||||
```
|
||||
|
||||
### Mask and use secret
|
||||
|
||||
```bash
|
||||
API_KEY=$(cat api-key.txt)
|
||||
echo "::add-mask::$API_KEY"
|
||||
echo "API_KEY=$API_KEY" >> "$GITHUB_ENV"
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Always mask secrets** before using them
|
||||
2. **Use groups** for long output sections
|
||||
3. **Add file/line annotations** for code-related errors/warnings
|
||||
4. **Use multiline syntax** for complex values
|
||||
5. **Set outputs early** in the step
|
||||
6. **Use GITHUB_ENV** for values needed in subsequent steps
|
||||
7. **Use GITHUB_OUTPUT** for values consumed by other jobs/steps
|
||||
8. **Validate paths** before adding to GITHUB_PATH
|
||||
9. **Use unique tokens** for stop-commands
|
||||
10. **Add summaries** for important results
|
||||
|
||||
## Environment Files Reference
|
||||
|
||||
- `$GITHUB_ENV` - Set environment variables
|
||||
- `$GITHUB_OUTPUT` - Set step outputs
|
||||
- `$GITHUB_PATH` - Add to system PATH
|
||||
- `$GITHUB_STEP_SUMMARY` - Add Markdown summaries
|
||||
|
||||
## Security Considerations
|
||||
|
||||
- Never echo secrets without masking
|
||||
- Validate all user input before using in commands
|
||||
- Use `::add-mask::` immediately after reading secrets
|
||||
- Be aware that environment variables persist across steps
|
||||
- Outputs can be accessed by other jobs
|
||||
329
.serena/memories/github-workflow-expressions.md
Normal file
329
.serena/memories/github-workflow-expressions.md
Normal file
@@ -0,0 +1,329 @@
|
||||
# GitHub Actions: Expressions and Contexts Reference
|
||||
|
||||
## Expression Syntax
|
||||
|
||||
GitHub Actions expressions are written using `${{ <expression> }}` syntax.
|
||||
|
||||
### Literals
|
||||
|
||||
**Supported Types:**
|
||||
|
||||
- Boolean: `true`, `false`
|
||||
- Null: `null`
|
||||
- Number: Integer or floating-point
|
||||
- String: Single or double quotes
|
||||
|
||||
**Falsy Values:**
|
||||
|
||||
- `false`, `0`, `-0`, `""`, `''`, `null`
|
||||
|
||||
**Truthy Values:**
|
||||
|
||||
- `true` and all non-falsy values
|
||||
|
||||
## Operators
|
||||
|
||||
### Logical Operators
|
||||
|
||||
- `( )` - Grouping
|
||||
- `!` - NOT
|
||||
- `&&` - AND
|
||||
- `||` - OR
|
||||
|
||||
### Comparison Operators
|
||||
|
||||
- `==` - Equal (case-insensitive for strings)
|
||||
- `!=` - Not equal
|
||||
- `<` - Less than
|
||||
- `<=` - Less than or equal
|
||||
- `>` - Greater than
|
||||
- `>=` - Greater than or equal
|
||||
|
||||
## Built-in Functions
|
||||
|
||||
### String Functions
|
||||
|
||||
```yaml
|
||||
contains(search, item) # Check if item exists in search string/array
|
||||
startsWith(searchString, searchValue) # Check prefix
|
||||
endsWith(searchString, searchValue) # Check suffix
|
||||
format(string, replaceValue0, replaceValue1, ...) # String formatting
|
||||
join(array, optionalSeparator) # Join array elements
|
||||
```
|
||||
|
||||
### Conversion Functions
|
||||
|
||||
```yaml
|
||||
toJSON(value) # Convert to JSON string
|
||||
fromJSON(value) # Parse JSON string to object/type
|
||||
```
|
||||
|
||||
### Status Check Functions
|
||||
|
||||
```yaml
|
||||
success() # True if no previous step failed
|
||||
always() # Always returns true, step always runs
|
||||
cancelled() # True if workflow cancelled
|
||||
failure() # True if any previous step failed
|
||||
```
|
||||
|
||||
### Hash Functions
|
||||
|
||||
```yaml
|
||||
hashFiles(path) # Generate SHA-256 hash of files matching pattern
|
||||
```
|
||||
|
||||
## Type Casting Rules
|
||||
|
||||
GitHub Actions performs **loose equality comparisons**:
|
||||
|
||||
- Numbers compared as floating-point
|
||||
- Strings are case-insensitive when compared
|
||||
- Type mismatches coerced to numbers:
|
||||
- Null → `0`
|
||||
- Boolean → `1` (true) or `0` (false)
|
||||
- String → Parsed as number, or `NaN` if invalid
|
||||
- Array/Object → `NaN`
|
||||
- Objects/arrays only equal if same instance reference
|
||||
|
||||
**Best Practice:** Use `fromJSON()` for precise numerical comparisons
|
||||
|
||||
## Contexts
|
||||
|
||||
### `github` Context
|
||||
|
||||
Workflow run and event information:
|
||||
|
||||
```yaml
|
||||
${{ github.event }} # Full webhook payload
|
||||
${{ github.actor }} # User who triggered workflow
|
||||
${{ github.ref }} # Branch/tag reference (e.g., refs/heads/main)
|
||||
${{ github.repository }} # owner/repo format
|
||||
${{ github.sha }} # Commit SHA
|
||||
${{ github.token }} # Automatic GITHUB_TOKEN
|
||||
${{ github.event_name }} # Event that triggered workflow
|
||||
${{ github.run_id }} # Unique workflow run ID
|
||||
${{ github.run_number }} # Run number for this workflow
|
||||
${{ github.job }} # Job ID
|
||||
${{ github.workflow }} # Workflow name
|
||||
```
|
||||
|
||||
### `env` Context
|
||||
|
||||
Environment variables (workflow → job → step scope):
|
||||
|
||||
```yaml
|
||||
${{ env.MY_VARIABLE }}
|
||||
```
|
||||
|
||||
### `vars` Context
|
||||
|
||||
Configuration variables (organization/repo/environment level):
|
||||
|
||||
```yaml
|
||||
${{ vars.MY_CONFIG_VAR }}
|
||||
```
|
||||
|
||||
### `secrets` Context
|
||||
|
||||
Secret values (never printed to logs):
|
||||
|
||||
```yaml
|
||||
${{ secrets.MY_SECRET }}
|
||||
${{ secrets.GITHUB_TOKEN }} # Automatic token
|
||||
```
|
||||
|
||||
### `inputs` Context
|
||||
|
||||
Inputs for reusable workflows or workflow_dispatch:
|
||||
|
||||
```yaml
|
||||
${{ inputs.deploy_target }}
|
||||
${{ inputs.environment }}
|
||||
```
|
||||
|
||||
### `steps` Context
|
||||
|
||||
Information from previous steps in same job:
|
||||
|
||||
```yaml
|
||||
${{ steps.step_id.outputs.output_name }}
|
||||
${{ steps.step_id.outcome }} # success, failure, cancelled, skipped
|
||||
${{ steps.step_id.conclusion }} # success, failure, cancelled, skipped
|
||||
```
|
||||
|
||||
### `job` Context
|
||||
|
||||
Current job information:
|
||||
|
||||
```yaml
|
||||
${{ job.status }} # success, failure, cancelled
|
||||
${{ job.container.id }} # Container ID if running in container
|
||||
${{ job.services }} # Service containers
|
||||
```
|
||||
|
||||
### `runner` Context
|
||||
|
||||
Runner environment details:
|
||||
|
||||
```yaml
|
||||
${{ runner.os }} # Linux, Windows, macOS
|
||||
${{ runner.arch }} # X86, X64, ARM, ARM64
|
||||
${{ runner.temp }} # Temporary directory path
|
||||
${{ runner.tool_cache }} # Tool cache directory
|
||||
```
|
||||
|
||||
### `needs` Context
|
||||
|
||||
Outputs from jobs that current job depends on:
|
||||
|
||||
```yaml
|
||||
${{ needs.job_id.outputs.output_name }}
|
||||
${{ needs.job_id.result }} # success, failure, cancelled, skipped
|
||||
```
|
||||
|
||||
### `matrix` Context
|
||||
|
||||
Matrix strategy values:
|
||||
|
||||
```yaml
|
||||
${{ matrix.os }}
|
||||
${{ matrix.version }}
|
||||
```
|
||||
|
||||
## Common Patterns
|
||||
|
||||
### Conditional Execution
|
||||
|
||||
```yaml
|
||||
if: github.ref == 'refs/heads/main'
|
||||
if: success()
|
||||
if: failure() && steps.test.outcome == 'failure'
|
||||
if: always()
|
||||
```
|
||||
|
||||
### Ternary-like Logic
|
||||
|
||||
```yaml
|
||||
env:
|
||||
DEPLOY_ENV: ${{ github.ref == 'refs/heads/main' && 'production' || 'staging' }}
|
||||
```
|
||||
|
||||
### String Manipulation
|
||||
|
||||
```yaml
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
if: contains(github.event.head_commit.message, '[skip ci]')
|
||||
if: endsWith(github.repository, '-prod')
|
||||
```
|
||||
|
||||
### Array/Object Access
|
||||
|
||||
```yaml
|
||||
${{ github.event.pull_request.title }}
|
||||
${{ fromJSON(steps.output.outputs.json_data).key }}
|
||||
```
|
||||
|
||||
### Combining Conditions
|
||||
|
||||
```yaml
|
||||
if: github.event_name == 'push' && github.ref == 'refs/heads/main'
|
||||
if: (github.event_name == 'pull_request' || github.event_name == 'push') && !cancelled()
|
||||
```
|
||||
|
||||
## Security Best Practices
|
||||
|
||||
1. **Environment Variables for Shell Scripts:**
|
||||
- ✅ Use `env:` block to pass inputs to shell scripts
|
||||
- ❌ Avoid direct `${{ inputs.* }}` in shell commands (script injection risk)
|
||||
|
||||
2. **Secret Masking:**
|
||||
|
||||
```yaml
|
||||
- run: echo "::add-mask::${{ secrets.MY_SECRET }}"
|
||||
```
|
||||
|
||||
3. **Input Validation:**
|
||||
- Always validate user inputs before use
|
||||
- Use dedicated validation steps
|
||||
- Check for command injection patterns
|
||||
|
||||
4. **Type Safety:**
|
||||
- Use `fromJSON()` for structured data
|
||||
- Cast to expected types explicitly
|
||||
- Validate ranges and formats
|
||||
|
||||
## Common Pitfalls
|
||||
|
||||
1. **String Comparison Case Sensitivity:**
|
||||
- GitHub Actions comparisons are case-insensitive
|
||||
- Be careful with exact matches
|
||||
|
||||
2. **Type Coercion:**
|
||||
- Empty string `""` is falsy, not truthy
|
||||
- Number `0` is falsy
|
||||
- Use `fromJSON()` for precise comparisons
|
||||
|
||||
3. **Object/Array Equality:**
|
||||
- Objects/arrays compared by reference, not value
|
||||
- Use `toJSON()` to compare by value
|
||||
|
||||
4. **Status Functions:**
|
||||
- `success()` checks ALL previous steps
|
||||
- Use `steps.id.outcome` for specific step status
|
||||
|
||||
5. **Context Availability:**
|
||||
- Not all contexts available in all places
|
||||
- `env` context not available in `if:` at workflow/job level
|
||||
- `secrets` should never be used in `if:` conditions (may leak)
|
||||
|
||||
## Examples from Project
|
||||
|
||||
### Input Validation Pattern
|
||||
|
||||
```yaml
|
||||
- name: Validate Inputs
|
||||
env:
|
||||
VERSION: ${{ inputs.version }}
|
||||
EMAIL: ${{ inputs.email }}
|
||||
run: |
|
||||
if ! [[ "$VERSION" =~ ^[0-9]+\.[0-9]+(\.[0-9]+)?$ ]]; then
|
||||
echo "::error::Invalid version: $VERSION"
|
||||
exit 1
|
||||
fi
|
||||
```
|
||||
|
||||
### Conditional Steps
|
||||
|
||||
```yaml
|
||||
- name: Deploy Production
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
run: ./deploy.sh production
|
||||
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: ./cleanup.sh
|
||||
```
|
||||
|
||||
### Dynamic Outputs
|
||||
|
||||
```yaml
|
||||
- name: Set Environment
|
||||
id: env
|
||||
run: |
|
||||
if [[ "${{ github.ref }}" == "refs/heads/main" ]]; then
|
||||
echo "environment=production" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "environment=staging" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Deploy
|
||||
run: ./deploy.sh ${{ steps.env.outputs.environment }}
|
||||
```
|
||||
|
||||
## References
|
||||
|
||||
- [GitHub Actions Expressions](https://docs.github.com/en/actions/reference/workflows-and-actions/expressions)
|
||||
- [GitHub Actions Contexts](https://docs.github.com/en/actions/learn-github-actions/contexts)
|
||||
- Project validation patterns in `validate-inputs/` directory
|
||||
- Security patterns documented in `CLAUDE.md`
|
||||
482
.serena/memories/github-workflow-secure-use.md
Normal file
482
.serena/memories/github-workflow-secure-use.md
Normal file
@@ -0,0 +1,482 @@
|
||||
# GitHub Actions Security Best Practices
|
||||
|
||||
Comprehensive guide for secure use of GitHub Actions workflows.
|
||||
|
||||
## Core Security Principles
|
||||
|
||||
1. **Principle of Least Privilege** - Grant minimum necessary permissions
|
||||
2. **Defense in Depth** - Layer multiple security controls
|
||||
3. **Zero Trust** - Verify explicitly, never assume trust
|
||||
4. **Audit and Monitor** - Track and review all security-relevant events
|
||||
|
||||
## Secrets Management
|
||||
|
||||
### Storing Secrets
|
||||
|
||||
✅ **DO:**
|
||||
|
||||
- Store sensitive data in GitHub Secrets
|
||||
- Use organization-level secrets for shared values
|
||||
- Use environment-specific secrets
|
||||
- Register all secrets used in workflows
|
||||
|
||||
❌ **DON'T:**
|
||||
|
||||
- Hard-code secrets in workflow files
|
||||
- Echo secrets to logs
|
||||
- Store secrets in environment variables without masking
|
||||
|
||||
⚠️ **USE WITH CAUTION:**
|
||||
|
||||
- **Structured secrets (JSON, YAML, multi-line keys)**: While sometimes necessary (e.g., service account keys, certificate bundles), they carry additional risks:
|
||||
- **Risks**: Parsing errors can expose content, accidental logging during manipulation, partial leaks when extracting fields
|
||||
- **Mitigations**:
|
||||
- Treat secrets as opaque blobs whenever possible (pass entire secret to tools without parsing)
|
||||
- Never print, echo, or log secrets during parsing/extraction
|
||||
- Use `::add-mask::` before any manipulation
|
||||
- Prefer base64-encoded single-line format for transport
|
||||
- Consider secrets managers (Vault, AWS Secrets Manager) for complex credentials
|
||||
- Write secrets to temporary files with restricted permissions rather than parsing in shell
|
||||
- Limit secret scope and access (repository-level, not organization-wide)
|
||||
- Parse/validate only in secure, well-audited code paths with proper error handling
|
||||
|
||||
**Example:**
|
||||
|
||||
```yaml
|
||||
- name: Use secret
|
||||
env:
|
||||
API_KEY: ${{ secrets.API_KEY }}
|
||||
run: |
|
||||
echo "::add-mask::$API_KEY"
|
||||
curl -H "Authorization: Bearer $API_KEY" https://api.example.com
|
||||
```
|
||||
|
||||
### Masking Sensitive Data
|
||||
|
||||
Always mask secrets before using them:
|
||||
|
||||
```bash
|
||||
# Mask the secret
|
||||
echo "::add-mask::$SECRET_VALUE"
|
||||
|
||||
# Use in commands; avoid printing it even when masked
|
||||
curl -H "Authorization: Bearer $SECRET_VALUE" https://api.example.com
|
||||
```
|
||||
|
||||
### Secret Rotation
|
||||
|
||||
1. **Immediately rotate** exposed secrets
|
||||
2. **Delete** compromised secrets from GitHub
|
||||
3. **Audit** workflow runs that used the secret
|
||||
4. **Review** access logs
|
||||
5. **Update** all systems using the secret
|
||||
|
||||
## Script Injection Prevention
|
||||
|
||||
### The Problem
|
||||
|
||||
User input can inject malicious code:
|
||||
|
||||
```yaml
|
||||
# VULNERABLE
|
||||
- name: Greet user
|
||||
run: echo "Hello ${{ github.event.issue.title }}"
|
||||
```
|
||||
|
||||
If issue title is: `"; rm -rf / #`, the command becomes:
|
||||
|
||||
```bash
|
||||
echo "Hello "; rm -rf / #"
|
||||
```
|
||||
|
||||
### Solution 1: Use Intermediate Environment Variables
|
||||
|
||||
```yaml
|
||||
# SAFE
|
||||
- name: Greet user
|
||||
env:
|
||||
TITLE: ${{ github.event.issue.title }}
|
||||
run: echo "Hello $TITLE"
|
||||
```
|
||||
|
||||
### Solution 2: Use Actions Instead of Scripts
|
||||
|
||||
```yaml
|
||||
# SAFE - Use action instead of inline script
|
||||
- name: Comment on PR
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
github.rest.issues.createComment({
|
||||
issue_number: context.issue.number,
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
body: `Hello ${context.payload.issue.title}`
|
||||
})
|
||||
```
|
||||
|
||||
### Solution 3: Proper Quoting
|
||||
|
||||
Always use double quotes for variables:
|
||||
|
||||
```bash
|
||||
# VULNERABLE
|
||||
echo Hello $USER_INPUT
|
||||
|
||||
# SAFE
|
||||
echo "Hello $USER_INPUT"
|
||||
```
|
||||
|
||||
### High-Risk Inputs
|
||||
|
||||
Be especially careful with:
|
||||
|
||||
- `github.event.issue.title`
|
||||
- `github.event.issue.body`
|
||||
- `github.event.pull_request.title`
|
||||
- `github.event.pull_request.body`
|
||||
- `github.event.comment.body`
|
||||
- `github.event.review.body`
|
||||
- `github.event.head_commit.message`
|
||||
- Any user-provided input
|
||||
|
||||
## Third-Party Actions Security
|
||||
|
||||
### Pinning Actions
|
||||
|
||||
✅ **BEST: Pin to full commit SHA**
|
||||
|
||||
```yaml
|
||||
- uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2
|
||||
```
|
||||
|
||||
⚠️ **ACCEPTABLE: Pin to tag (for verified creators only)**
|
||||
|
||||
```yaml
|
||||
- uses: actions/checkout@v3.5.2
|
||||
```
|
||||
|
||||
❌ **DANGEROUS: Use branch or mutable tag**
|
||||
|
||||
```yaml
|
||||
- uses: actions/checkout@main # DON'T DO THIS
|
||||
```
|
||||
|
||||
### Auditing Actions
|
||||
|
||||
Before using third-party actions:
|
||||
|
||||
1. **Review source code** - Check the action's repository
|
||||
2. **Check maintainer** - Look for "Verified creator" badge
|
||||
3. **Read reviews** - Check community feedback
|
||||
4. **Verify permissions** - Understand what the action accesses
|
||||
5. **Check dependencies** - Review what the action installs
|
||||
|
||||
### Verified Creators
|
||||
|
||||
Actions from these sources are generally safer:
|
||||
|
||||
- GitHub Official (`actions/*`)
|
||||
- Major cloud providers (AWS, Azure, Google)
|
||||
- Well-known organizations with verified badges
|
||||
|
||||
## Token and Permission Management
|
||||
|
||||
### GITHUB_TOKEN Permissions
|
||||
|
||||
Set restrictive defaults:
|
||||
|
||||
```yaml
|
||||
permissions:
|
||||
contents: read # Default to read-only
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write # Only elevate what's needed
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
```
|
||||
|
||||
### Available Permissions
|
||||
|
||||
- `actions`: read|write
|
||||
- `checks`: read|write
|
||||
- `contents`: read|write
|
||||
- `deployments`: read|write
|
||||
- `issues`: read|write
|
||||
- `packages`: read|write
|
||||
- `pages`: read|write
|
||||
- `pull-requests`: read|write
|
||||
- `repository-projects`: read|write
|
||||
- `security-events`: read|write
|
||||
- `statuses`: read|write
|
||||
|
||||
### Principle of Least Privilege
|
||||
|
||||
```yaml
|
||||
# GOOD - Minimal permissions
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write # Only what's needed
|
||||
|
||||
# BAD - Overly permissive
|
||||
permissions: write-all
|
||||
```
|
||||
|
||||
## Runner Security
|
||||
|
||||
### GitHub-Hosted Runners (Recommended)
|
||||
|
||||
✅ **Advantages:**
|
||||
|
||||
- Isolated, ephemeral environments
|
||||
- Automatic patching and updates
|
||||
- No infrastructure management
|
||||
- Better security by default
|
||||
|
||||
### Self-Hosted Runners
|
||||
|
||||
⚠️ **Use with extreme caution:**
|
||||
|
||||
**Risks:**
|
||||
|
||||
- Persistent environments can retain secrets
|
||||
- Accessible to all workflows in repository (public repos)
|
||||
- Requires security hardening
|
||||
- Manual patching and updates
|
||||
|
||||
**If you must use self-hosted:**
|
||||
|
||||
1. **Use JIT (Just-In-Time) runners**
|
||||
- Ephemeral, created on-demand
|
||||
- Automatically destroyed after use
|
||||
|
||||
2. **Never use self-hosted runners for public repositories**
|
||||
|
||||
3. **Organize into groups with restricted access**
|
||||
|
||||
4. **Implement network isolation**
|
||||
|
||||
5. **Use minimal, hardened OS images**
|
||||
|
||||
6. **Rotate regularly**
|
||||
|
||||
### Runner Groups
|
||||
|
||||
```yaml
|
||||
# Restrict workflow to specific runner group
|
||||
runs-on:
|
||||
group: private-runners
|
||||
labels: ubuntu-latest
|
||||
```
|
||||
|
||||
## Code Scanning and Vulnerability Detection
|
||||
|
||||
### Enable CodeQL
|
||||
|
||||
```yaml
|
||||
name: 'Code Scanning'
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
pull_request:
|
||||
branches: [main]
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
security-events: write
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: github/codeql-action/init@v2
|
||||
- uses: github/codeql-action/autobuild@v2
|
||||
- uses: github/codeql-action/analyze@v2
|
||||
```
|
||||
|
||||
### Dependabot for Actions
|
||||
|
||||
```yaml
|
||||
# .github/dependabot.yml
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: 'github-actions'
|
||||
directory: '/'
|
||||
schedule:
|
||||
interval: 'weekly'
|
||||
```
|
||||
|
||||
## OpenID Connect (OIDC)
|
||||
|
||||
Use OIDC for cloud authentication (no long-lived credentials):
|
||||
|
||||
```yaml
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
id-token: write # Required for OIDC
|
||||
contents: read
|
||||
steps:
|
||||
- uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
role-to-assume: arn:aws:iam::123456789012:role/MyRole
|
||||
aws-region: us-east-1
|
||||
```
|
||||
|
||||
## Environment Protection Rules
|
||||
|
||||
Use environments for sensitive deployments:
|
||||
|
||||
```yaml
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
environment:
|
||||
name: production
|
||||
url: https://example.com
|
||||
steps:
|
||||
- name: Deploy
|
||||
run: ./deploy.sh
|
||||
```
|
||||
|
||||
**Configure in repository settings:**
|
||||
|
||||
- Required reviewers
|
||||
- Wait timer
|
||||
- Deployment branches
|
||||
- Environment secrets
|
||||
|
||||
## Security Checklist
|
||||
|
||||
### For Every Workflow
|
||||
|
||||
- [ ] Pin all third-party actions to commit SHAs
|
||||
- [ ] Set minimal `permissions` at workflow/job level
|
||||
- [ ] Use intermediate environment variables for user input
|
||||
- [ ] Mask all secrets with `::add-mask::`
|
||||
- [ ] Never echo secrets to logs
|
||||
- [ ] Use double quotes for shell variables
|
||||
- [ ] Prefer actions over inline scripts
|
||||
- [ ] Use GitHub-hosted runners when possible
|
||||
- [ ] Enable code scanning (CodeQL)
|
||||
- [ ] Configure Dependabot for actions
|
||||
|
||||
### For Self-Hosted Runners
|
||||
|
||||
- [ ] Never use for public repositories
|
||||
- [ ] Use JIT runners when possible
|
||||
- [ ] Implement network isolation
|
||||
- [ ] Use minimal, hardened OS images
|
||||
- [ ] Rotate runners regularly
|
||||
- [ ] Organize into restricted groups
|
||||
- [ ] Monitor and audit runner activity
|
||||
- [ ] Implement resource limits
|
||||
|
||||
### For Secrets
|
||||
|
||||
- [ ] Use GitHub Secrets (not environment variables)
|
||||
- [ ] Rotate secrets regularly
|
||||
- [ ] Delete exposed secrets immediately
|
||||
- [ ] Audit secret usage
|
||||
- [ ] Use environment-specific secrets
|
||||
- [ ] Never use structured data as secrets
|
||||
- [ ] Implement secret scanning
|
||||
|
||||
## Common Vulnerabilities
|
||||
|
||||
### Command Injection
|
||||
|
||||
```yaml
|
||||
# VULNERABLE
|
||||
run: echo "${{ github.event.comment.body }}"
|
||||
|
||||
# SAFE
|
||||
env:
|
||||
COMMENT: ${{ github.event.comment.body }}
|
||||
run: echo "$COMMENT"
|
||||
```
|
||||
|
||||
### Secret Exposure
|
||||
|
||||
```yaml
|
||||
# VULNERABLE
|
||||
run: |
|
||||
echo "API Key: ${{ secrets.API_KEY }}"
|
||||
|
||||
# SAFE
|
||||
run: |
|
||||
echo "::add-mask::${{ secrets.API_KEY }}"
|
||||
curl -H "Authorization: Bearer ${{ secrets.API_KEY }}" https://api.example.com
|
||||
```
|
||||
|
||||
### Privilege Escalation
|
||||
|
||||
```yaml
|
||||
# VULNERABLE - Too permissive
|
||||
permissions: write-all
|
||||
|
||||
# SAFE - Minimal permissions
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
```
|
||||
|
||||
## Supply Chain Security
|
||||
|
||||
### OpenSSF Scorecard
|
||||
|
||||
Monitor your security posture:
|
||||
|
||||
```yaml
|
||||
name: Scorecard
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 0 * * 0'
|
||||
|
||||
jobs:
|
||||
analysis:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
security-events: write
|
||||
id-token: write
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: ossf/scorecard-action@v2
|
||||
- uses: github/codeql-action/upload-sarif@v2
|
||||
```
|
||||
|
||||
### Software Bill of Materials (SBOM)
|
||||
|
||||
Track dependencies:
|
||||
|
||||
```yaml
|
||||
- name: Generate SBOM
|
||||
uses: anchore/sbom-action@v0
|
||||
with:
|
||||
path: ./
|
||||
format: spdx-json
|
||||
```
|
||||
|
||||
## Incident Response
|
||||
|
||||
If a security incident occurs:
|
||||
|
||||
1. **Immediately rotate** all potentially compromised secrets
|
||||
2. **Disable** affected workflows
|
||||
3. **Review** workflow run logs
|
||||
4. **Audit** repository access
|
||||
5. **Check** for unauthorized changes
|
||||
6. **Investigate** all workflow runs during incident window
|
||||
7. **Document** findings and remediation
|
||||
8. **Update** security controls to prevent recurrence
|
||||
|
||||
## Additional Resources
|
||||
|
||||
- [GitHub Security Advisories](https://github.com/advisories)
|
||||
- [Actions Security Hardening](https://docs.github.com/actions/security-guides)
|
||||
- [OIDC with Cloud Providers](https://docs.github.com/actions/deployment/security-hardening-your-deployments)
|
||||
- [Self-Hosted Runner Security](https://docs.github.com/actions/hosting-your-own-runners/about-self-hosted-runners#self-hosted-runner-security)
|
||||
115
.serena/memories/repository_overview.md
Normal file
115
.serena/memories/repository_overview.md
Normal file
@@ -0,0 +1,115 @@
|
||||
# GitHub Actions Monorepo - Overview
|
||||
|
||||
## Repository Info
|
||||
|
||||
- **Path**: /Users/ivuorinen/Code/ivuorinen/actions
|
||||
- **Branch**: main
|
||||
- **External Usage**: `ivuorinen/actions/<action-name>@main`
|
||||
- **Total Actions**: 44 self-contained actions
|
||||
- **Dogfooding**: Workflows use local actions (pr-lint, codeql-analysis, security-scan)
|
||||
|
||||
## Structure
|
||||
|
||||
```text
|
||||
/
|
||||
├── <action-dirs>/ # 44 self-contained actions
|
||||
│ ├── action.yml # Action definition
|
||||
│ ├── README.md # Auto-generated
|
||||
│ └── CustomValidator.py # Optional validator
|
||||
├── validate-inputs/ # Centralized validation
|
||||
│ ├── validators/ # 9 specialized modules
|
||||
│ ├── scripts/ # Rule/test generators
|
||||
│ └── tests/ # 769 pytest tests
|
||||
├── _tests/ # ShellSpec framework
|
||||
├── _tools/ # Development utilities
|
||||
├── .github/workflows/ # CI/CD workflows
|
||||
└── Makefile # Build automation
|
||||
```
|
||||
|
||||
## Action Categories (44 total)
|
||||
|
||||
**Setup (7)**: node-setup, set-git-config, php-version-detect, python-version-detect, python-version-detect-v2, go-version-detect, dotnet-version-detect
|
||||
|
||||
**Linting (13)**: ansible-lint-fix, biome-check/fix, csharp-lint-check, eslint-check/fix, go-lint, pr-lint, pre-commit, prettier-check/fix, python-lint-fix, terraform-lint-fix
|
||||
|
||||
**Security (1)**: security-scan (actionlint, Gitleaks, Trivy scanning)
|
||||
|
||||
**Build (3)**: csharp-build, go-build, docker-build
|
||||
|
||||
**Publishing (5)**: npm-publish, docker-publish, docker-publish-gh, docker-publish-hub, csharp-publish
|
||||
|
||||
**Testing (3)**: php-tests, php-laravel-phpunit, php-composer
|
||||
|
||||
**Repository (9)**: github-release, release-monthly, sync-labels, stale, compress-images, common-cache, common-file-check, common-retry, codeql-analysis
|
||||
|
||||
**Utilities (3)**: version-file-parser, version-validator, validate-inputs
|
||||
|
||||
## Key Principles
|
||||
|
||||
### Self-Contained Design
|
||||
|
||||
- No dependencies between actions
|
||||
- Externally usable via GitHub Actions marketplace
|
||||
- Custom validators colocated with actions
|
||||
|
||||
### Quality Standards
|
||||
|
||||
- **Zero Tolerance**: No failing tests, no linting issues
|
||||
- **Production Ready**: Only when ALL checks pass
|
||||
- **EditorConfig**: 2-space indent, LF, UTF-8, max 200 chars (120 for MD)
|
||||
|
||||
### Security Model
|
||||
|
||||
- SHA-pinned external actions (55 SHA-pinned, 0 unpinned)
|
||||
- Token validation, injection detection
|
||||
- Path traversal protection
|
||||
- `set -euo pipefail` in all shell scripts
|
||||
|
||||
## Development Workflow
|
||||
|
||||
```bash
|
||||
make all # Full pipeline: docs, format, lint, test
|
||||
make dev # Format + lint
|
||||
make lint # All linters (markdownlint, yaml-lint, shellcheck, ruff)
|
||||
make test # All tests (pytest + ShellSpec)
|
||||
```
|
||||
|
||||
## Testing Framework
|
||||
|
||||
- **ShellSpec**: GitHub Actions and shell scripts
|
||||
- **pytest**: Python validators (769 tests, 100% pass rate)
|
||||
- **Test Generator**: Automatic scaffolding for new actions
|
||||
|
||||
## Current Status
|
||||
|
||||
- ✅ All tests passing (769/769)
|
||||
- ✅ Zero linting issues
|
||||
- ✅ Modular validator architecture
|
||||
- ✅ Convention-based validation
|
||||
- ✅ Test generation system
|
||||
- ✅ Full backward compatibility
|
||||
|
||||
## Dogfooding Strategy
|
||||
|
||||
The repository actively dogfoods its own actions in workflows:
|
||||
|
||||
**Fully Dogfooded Workflows**:
|
||||
|
||||
- **pr-lint.yml**: Uses `./pr-lint` (was 204 lines, now 112 lines - 45% reduction)
|
||||
- **action-security.yml**: Uses `./security-scan` (was 264 lines, now 82 lines - 69% reduction)
|
||||
- **codeql-new.yml**: Uses `./codeql-analysis`
|
||||
- **sync-labels.yml**: Uses `./sync-labels`
|
||||
- **version-maintenance.yml**: Uses `./action-versioning`
|
||||
|
||||
**Intentionally External**:
|
||||
|
||||
- **build-testing-image.yml**: Uses docker/\* actions directly (needs metadata extraction)
|
||||
- Core GitHub actions (checkout, upload-artifact, setup-\*) kept for standardization
|
||||
|
||||
**Benefits**:
|
||||
|
||||
- Early detection of action issues
|
||||
- Real-world testing of actions
|
||||
- Reduced workflow duplication
|
||||
- Improved maintainability
|
||||
- Better documentation through usage examples
|
||||
157
.serena/memories/suggested_commands.md
Normal file
157
.serena/memories/suggested_commands.md
Normal file
@@ -0,0 +1,157 @@
|
||||
# Essential Development Commands
|
||||
|
||||
## Primary Development Workflow
|
||||
|
||||
### Complete Development Cycle
|
||||
|
||||
```bash
|
||||
make all # Generate docs, format, lint, test everything
|
||||
make dev # Format then lint (good for development)
|
||||
make ci # CI workflow - check, docs, lint (no formatting)
|
||||
```
|
||||
|
||||
### Individual Operations
|
||||
|
||||
```bash
|
||||
make docs # Generate documentation for all actions
|
||||
make format # Format all files (markdown, YAML, JSON, Python)
|
||||
make lint # Run all linters
|
||||
make check # Quick syntax and tool checks
|
||||
make clean # Clean up temporary files and caches
|
||||
```
|
||||
|
||||
## Testing Commands
|
||||
|
||||
### All Tests
|
||||
|
||||
```bash
|
||||
make test # Run all tests (Python + GitHub Actions)
|
||||
make test-coverage # Run tests with coverage reporting
|
||||
```
|
||||
|
||||
### Python Testing
|
||||
|
||||
```bash
|
||||
make test-python # Run Python validation tests
|
||||
make test-python-coverage # Run Python tests with coverage
|
||||
make dev-python # Format, lint, and test Python code
|
||||
```
|
||||
|
||||
### GitHub Actions Testing
|
||||
|
||||
```bash
|
||||
make test-actions # Run GitHub Actions tests (ShellSpec)
|
||||
make test-unit # Run unit tests only
|
||||
make test-integration # Run integration tests only
|
||||
make test-action ACTION=node-setup # Test specific action
|
||||
```
|
||||
|
||||
### Validation System
|
||||
|
||||
```bash
|
||||
make update-validators # Update validation rules for all actions
|
||||
make update-validators-dry # Preview validation rules changes
|
||||
make test-update-validators # Test the validation rule generator
|
||||
```
|
||||
|
||||
## Formatting Commands (Auto-fixing)
|
||||
|
||||
```bash
|
||||
make format-markdown # Format markdown files
|
||||
make format-yaml-json # Format YAML and JSON files
|
||||
make format-tables # Format markdown tables
|
||||
make format-python # Format Python files with ruff
|
||||
```
|
||||
|
||||
## Linting Commands
|
||||
|
||||
```bash
|
||||
make lint-markdown # Lint markdown files
|
||||
make lint-yaml # Lint YAML files
|
||||
make lint-shell # Lint shell scripts with shellcheck
|
||||
make lint-python # Lint Python files with ruff
|
||||
```
|
||||
|
||||
## Tool Installation
|
||||
|
||||
```bash
|
||||
make install-tools # Install/update all required tools
|
||||
make check-tools # Check if required tools are available
|
||||
```
|
||||
|
||||
## Manual Tool Usage (when needed)
|
||||
|
||||
### Core Linting Sequence
|
||||
|
||||
```bash
|
||||
# This is the exact sequence used by make lint
|
||||
npx markdownlint-cli2 --fix "**/*.md"
|
||||
npx prettier --write "**/*.md" "**/*.yml" "**/*.yaml" "**/*.json"
|
||||
npx markdown-table-formatter "**/*.md"
|
||||
npx yaml-lint "**/*.yml" "**/*.yaml"
|
||||
actionlint
|
||||
shellcheck **/*.sh
|
||||
uv run ruff check --fix validate-inputs/
|
||||
uv run ruff format validate-inputs/
|
||||
```
|
||||
|
||||
### Python Development
|
||||
|
||||
```bash
|
||||
uvx ruff check --fix # Lint and fix Python files
|
||||
uvx ruff format # Format Python files
|
||||
uv run pytest # Run Python tests
|
||||
uv run pytest --cov # Run Python tests with coverage
|
||||
```
|
||||
|
||||
## System-Specific Commands (Darwin/macOS)
|
||||
|
||||
### File Operations
|
||||
|
||||
```bash
|
||||
rg "pattern" # Fast code search (ripgrep)
|
||||
fd "filename" # Fast file finding
|
||||
ls -la # List files with details
|
||||
pwd # Show current directory
|
||||
```
|
||||
|
||||
### Git Operations
|
||||
|
||||
```bash
|
||||
git status # Check repository status
|
||||
git diff # Show changes
|
||||
git add . # Stage all changes
|
||||
# Note: Never use `git commit` - manual commits not allowed
|
||||
```
|
||||
|
||||
### Node.js (via nvm)
|
||||
|
||||
```bash
|
||||
# nvm available at /Users/ivuorinen/.local/share/nvm/nvm.sh
|
||||
source /Users/ivuorinen/.local/share/nvm/nvm.sh
|
||||
nvm use # Activate Node.js version from .nvmrc
|
||||
```
|
||||
|
||||
## Monitoring and Statistics
|
||||
|
||||
```bash
|
||||
make stats # Show repository statistics
|
||||
make watch # Watch files and auto-format on changes (requires entr)
|
||||
```
|
||||
|
||||
## When Tasks Are Completed
|
||||
|
||||
### Required Quality Checks
|
||||
|
||||
Always run these commands after completing any coding task:
|
||||
|
||||
1. `make lint` - Fix all linting issues (blocking requirement)
|
||||
2. `make test` - Ensure all tests pass
|
||||
3. Check EditorConfig compliance (automatic via linting)
|
||||
|
||||
### Never Do These
|
||||
|
||||
- Never use `git commit` (manual commits not allowed)
|
||||
- Never use `--no-verify` with git commands
|
||||
- Never modify linting configuration unless explicitly told
|
||||
- Never create files unless absolutely necessary
|
||||
61
.serena/memories/tech_stack.md
Normal file
61
.serena/memories/tech_stack.md
Normal file
@@ -0,0 +1,61 @@
|
||||
# Tech Stack and Development Tools
|
||||
|
||||
## Core Technologies
|
||||
|
||||
- **GitHub Actions**: YAML-based workflow automation
|
||||
- **Shell/Bash**: Action scripts with `set -euo pipefail` for error handling
|
||||
- **Python 3.8+**: Centralized validation system with PyYAML
|
||||
- **Node.js**: JavaScript tooling and npm packages (managed via nvm)
|
||||
- **Make**: Build automation and task management
|
||||
|
||||
## Development Tools (Darwin/macOS)
|
||||
|
||||
### Available Tools
|
||||
|
||||
- **ripgrep (`rg`)**: `/Users/ivuorinen/.local/share/cargo/bin/rg` - Fast code search
|
||||
- **fd**: `/Users/ivuorinen/.local/share/cargo/bin/fd` - Fast file finding
|
||||
- **uv**: `/Users/ivuorinen/.local/bin/uv` - Python package management and execution
|
||||
- **shellcheck**: `/Users/ivuorinen/.local/share/nvim/mason/bin/shellcheck` - Shell script linting
|
||||
- **yamlfmt**: `/Users/ivuorinen/.local/share/nvim/mason/bin/yamlfmt` - YAML formatting
|
||||
- **actionlint**: `/Users/ivuorinen/.local/share/nvim/mason/bin/actionlint` - GitHub Actions linting
|
||||
- **git**: `/opt/homebrew/bin/git` - Version control
|
||||
- **npm/npx**: `/Users/ivuorinen/.local/share/nvm/versions/node/v22.19.0/bin/npm` - Node.js package management
|
||||
- **make**: `/usr/bin/make` - Build automation
|
||||
|
||||
### Python Stack
|
||||
|
||||
- **uv**: Modern Python package management
|
||||
- **ruff**: Fast Python linting and formatting
|
||||
- **pytest**: Testing framework with coverage reporting
|
||||
- **PyYAML**: YAML parsing for validation rules
|
||||
|
||||
### JavaScript/Node.js Stack
|
||||
|
||||
- **Node.js v22.19.0**: Managed via nvm at `/Users/ivuorinen/.local/share/nvm/`
|
||||
- **npx**: For running npm packages without installation
|
||||
- **markdownlint-cli2**: Markdown linting
|
||||
- **prettier**: Code formatting
|
||||
- **markdown-table-formatter**: Table formatting
|
||||
- **yaml-lint**: YAML validation
|
||||
- **action-docs**: Auto-generate README.md files
|
||||
|
||||
### Testing Framework
|
||||
|
||||
- **ShellSpec**: Shell script testing framework
|
||||
- **pytest**: Python testing with coverage support
|
||||
- **nektos/act** (optional): Local GitHub Actions testing
|
||||
|
||||
## Language Support
|
||||
|
||||
Multi-language ecosystem supporting:
|
||||
|
||||
- **Shell/Bash**: Action scripts and utilities
|
||||
- **Python**: Validation system and testing
|
||||
- **JavaScript/TypeScript**: Linting and formatting actions
|
||||
- **PHP**: Composer, Laravel, PHPUnit support
|
||||
- **Go**: Build, linting, version detection
|
||||
- **C#/.NET**: Build, lint, publish actions
|
||||
- **Docker**: Multi-architecture build and publish
|
||||
- **Terraform/HCL**: Infrastructure linting
|
||||
- **Ansible**: Playbook linting
|
||||
- **YAML/JSON/Markdown**: Configuration and documentation
|
||||
76
.serena/memories/validator_system.md
Normal file
76
.serena/memories/validator_system.md
Normal file
@@ -0,0 +1,76 @@
|
||||
# Validation System Architecture
|
||||
|
||||
## Status: PRODUCTION READY ✅
|
||||
|
||||
- 769 tests passing (100%)
|
||||
- Zero linting issues
|
||||
- Modular architecture complete
|
||||
|
||||
## Architecture
|
||||
|
||||
### Core Components
|
||||
|
||||
- **BaseValidator**: Abstract interface for all validators
|
||||
- **ValidatorRegistry**: Dynamic discovery, loads custom validators from `<action>/CustomValidator.py`
|
||||
- **ConventionMapper**: Auto-detection via 100+ naming patterns (priority-based matching)
|
||||
|
||||
### Specialized Validators (9)
|
||||
|
||||
`token.py`, `version.py` (SemVer/CalVer), `boolean.py`, `numeric.py`, `docker.py`, `file.py`, `network.py`, `security.py`, `codeql.py`
|
||||
|
||||
### Custom Validators (20+)
|
||||
|
||||
Actions with complex validation have `CustomValidator.py` in their directory. Registry auto-discovers them.
|
||||
|
||||
Examples: `docker-build/CustomValidator.py`, `sync-labels/CustomValidator.py`, `codeql-analysis/CustomValidator.py`
|
||||
|
||||
## Convention-Based Detection
|
||||
|
||||
Automatic validator selection from input names:
|
||||
|
||||
- Priority 100: Exact (`dry-run` → boolean)
|
||||
- Priority 95: Language-specific (`-python-version` → python_version)
|
||||
- Priority 90: Suffixes (`-token` → token)
|
||||
- Priority 85: Contains (`email` → email)
|
||||
- Priority 80: Prefixes (`is-` → boolean)
|
||||
|
||||
## Test Generation
|
||||
|
||||
`validate-inputs/scripts/generate-tests.py`:
|
||||
|
||||
- Non-destructive (preserves existing tests)
|
||||
- Intelligent pattern detection for input types
|
||||
- Template-based scaffolding for validators
|
||||
- ShellSpec + pytest generation
|
||||
|
||||
## Usage
|
||||
|
||||
```python
|
||||
from validators.registry import ValidatorRegistry
|
||||
validator = ValidatorRegistry().get_validator("docker-build")
|
||||
result = validator.validate_inputs({"context": ".", "platforms": "linux/amd64"})
|
||||
```
|
||||
|
||||
## File Structure
|
||||
|
||||
```text
|
||||
validate-inputs/
|
||||
├── validator.py # Main entry
|
||||
├── validators/ # 9 specialized + base + registry + conventions
|
||||
├── scripts/
|
||||
│ ├── update-validators.py # Rule generator
|
||||
│ └── generate-tests.py # Test generator
|
||||
└── tests/ # 769 pytest tests
|
||||
|
||||
<action>/CustomValidator.py # Action-specific validators
|
||||
```
|
||||
|
||||
## Key Features
|
||||
|
||||
- Convention-based auto-detection
|
||||
- GitHub expression support (`${{ }}`)
|
||||
- Error propagation between validators
|
||||
- Security validation (injection, secrets)
|
||||
- CalVer, SemVer, flexible versioning
|
||||
- Docker platforms, registries
|
||||
- Token formats (GitHub, NPM, PyPI)
|
||||
219
.serena/memories/versioning_system.md
Normal file
219
.serena/memories/versioning_system.md
Normal file
@@ -0,0 +1,219 @@
|
||||
# Version System Architecture
|
||||
|
||||
## Overview
|
||||
|
||||
This repository uses a CalVer-based SHA-pinned versioning system for all internal action references.
|
||||
|
||||
## Version Format
|
||||
|
||||
### CalVer: vYYYY.MM.DD
|
||||
|
||||
- **Major**: `v2025` (year, updated annually)
|
||||
- **Minor**: `v2025.10` (year.month)
|
||||
- **Patch**: `v2025.10.18` (year.month.day)
|
||||
|
||||
Example: Release `v2025.10.18` creates three tags pointing to the same commit:
|
||||
|
||||
- `v2025.10.18` (patch - specific release)
|
||||
- `v2025.10` (minor - latest October 2025 release)
|
||||
- `v2025` (major - latest 2025 release)
|
||||
|
||||
## Internal vs External References
|
||||
|
||||
### Internal (action.yml files)
|
||||
|
||||
- **Format**: `ivuorinen/actions/validate-inputs@<40-char-SHA>`
|
||||
- **Purpose**: Security, reproducibility, precise control
|
||||
- **Example**: `ivuorinen/actions/validate-inputs@7061aafd35a2f21b57653e34f2b634b2a19334a9`
|
||||
|
||||
### External (user consumption)
|
||||
|
||||
- **Format**: `ivuorinen/actions/validate-inputs@v2025`
|
||||
- **Purpose**: Convenience, always gets latest release
|
||||
- **Options**: `@v2025`, `@v2025.10`, or `@v2025.10.18`
|
||||
|
||||
### Test Workflows
|
||||
|
||||
- **Format**: `uses: ./action-name` (local reference)
|
||||
- **Location**: `_tests/integration/workflows/*.yml`
|
||||
- **Reason**: Tests run within the actions repo context
|
||||
|
||||
### Internal Workflows
|
||||
|
||||
- **Format**: `uses: ./sync-labels` (local reference)
|
||||
- **Location**: `.github/workflows/sync-labels.yml`
|
||||
- **Reason**: Runs within the actions repo, local is sufficient
|
||||
|
||||
## Release Process
|
||||
|
||||
### Creating a Release
|
||||
|
||||
```bash
|
||||
# 1. Create release with version tags
|
||||
make release VERSION=v2025.10.18
|
||||
|
||||
# This automatically:
|
||||
# - Updates all action.yml SHA refs to current HEAD
|
||||
# - Commits the changes
|
||||
# - Creates tags: v2025.10.18, v2025.10, v2025
|
||||
# - All tags point to the same commit SHA
|
||||
|
||||
# 2. Push to remote
|
||||
git push origin main --tags --force-with-lease
|
||||
```
|
||||
|
||||
### After Each Release
|
||||
|
||||
Tags are force-pushed to ensure `v2025` and `v2025.10` always point to latest:
|
||||
|
||||
```bash
|
||||
git push origin v2025 --force
|
||||
git push origin v2025.10 --force
|
||||
git push origin v2025.10.18
|
||||
```
|
||||
|
||||
Or use `--tags --force-with-lease` to push all at once.
|
||||
|
||||
## Makefile Targets
|
||||
|
||||
### `make release VERSION=v2025.10.18`
|
||||
|
||||
Creates new release with version tags and updates all action references.
|
||||
|
||||
### `make update-version-refs MAJOR=v2025`
|
||||
|
||||
Updates all action.yml files to reference the SHA of the specified major version tag.
|
||||
|
||||
### `make bump-major-version OLD=v2025 NEW=v2026`
|
||||
|
||||
Annual version bump - replaces all references from one major version to another.
|
||||
|
||||
### `make check-version-refs`
|
||||
|
||||
Lists all current SHA-pinned references grouped by SHA. Useful for verification.
|
||||
|
||||
## Helper Scripts (\_tools/)
|
||||
|
||||
### release.sh
|
||||
|
||||
Main release script - validates version, updates refs, creates tags.
|
||||
|
||||
### validate-version.sh
|
||||
|
||||
Validates CalVer format (vYYYY.MM.DD, vYYYY.MM, vYYYY).
|
||||
|
||||
### update-action-refs.sh
|
||||
|
||||
Updates all action references to a specific SHA or version tag.
|
||||
|
||||
### bump-major-version.sh
|
||||
|
||||
Handles annual version bumps with commit creation.
|
||||
|
||||
### check-version-refs.sh
|
||||
|
||||
Displays current SHA-pinned references with tag information.
|
||||
|
||||
### get-action-sha.sh
|
||||
|
||||
Retrieves SHA for a specific version tag.
|
||||
|
||||
## Action Versioning Action
|
||||
|
||||
**Location**: `action-versioning/action.yml`
|
||||
|
||||
Automatically checks if major version tag has moved and updates all action references.
|
||||
|
||||
**Usage in CI**:
|
||||
|
||||
```yaml
|
||||
- uses: ./action-versioning
|
||||
with:
|
||||
major-version: v2025
|
||||
```
|
||||
|
||||
**Outputs**:
|
||||
|
||||
- `updated`: true/false
|
||||
- `commit-sha`: SHA of created commit (if any)
|
||||
- `needs-annual-bump`: true/false (year mismatch)
|
||||
|
||||
## CI Workflow
|
||||
|
||||
**File**: `.github/workflows/version-maintenance.yml`
|
||||
|
||||
**Triggers**:
|
||||
|
||||
- Weekly (Monday 9 AM UTC)
|
||||
- Manual (workflow_dispatch)
|
||||
|
||||
**Actions**:
|
||||
|
||||
1. Checks if `v2025` tag has moved
|
||||
2. Updates action references if needed
|
||||
3. Creates PR with changes
|
||||
4. Creates issue if annual bump needed
|
||||
|
||||
## Annual Version Bump
|
||||
|
||||
**When**: Start of each new year
|
||||
|
||||
**Process**:
|
||||
|
||||
```bash
|
||||
# 1. Create new major version tag
|
||||
git tag -a v2026 -m "Major version v2026"
|
||||
git push origin v2026
|
||||
|
||||
# 2. Bump all references
|
||||
make bump-major-version OLD=v2025 NEW=v2026
|
||||
|
||||
# 3. Update documentation
|
||||
make docs
|
||||
|
||||
# 4. Push changes
|
||||
git push origin main
|
||||
```
|
||||
|
||||
## Verification
|
||||
|
||||
### Check Current Refs
|
||||
|
||||
```bash
|
||||
make check-version-refs
|
||||
```
|
||||
|
||||
### Verify All Refs Match
|
||||
|
||||
All action references should point to the same SHA after a release.
|
||||
|
||||
### Test External Usage
|
||||
|
||||
Create a test repo and use:
|
||||
|
||||
```yaml
|
||||
uses: ivuorinen/actions/pr-lint@v2025
|
||||
```
|
||||
|
||||
## Migration from @main
|
||||
|
||||
All action.yml files have been migrated from:
|
||||
|
||||
- `uses: ./action-name`
|
||||
- `uses: ivuorinen/actions/action-name@main`
|
||||
|
||||
To:
|
||||
|
||||
- `uses: ivuorinen/actions/action-name@<SHA>`
|
||||
|
||||
Test workflows still use `./action-name` for local testing.
|
||||
|
||||
## Security Considerations
|
||||
|
||||
**SHA Pinning**: Prevents supply chain attacks by ensuring exact commit is used.
|
||||
|
||||
**Version Tags**: Provide user-friendly references while maintaining security internally.
|
||||
|
||||
**Tag Verification**: Always verify tags point to expected commits before force-pushing.
|
||||
|
||||
**Annual Review**: Each year requires conscious version bump, preventing accidental drift.
|
||||
69
.serena/project.yml
Normal file
69
.serena/project.yml
Normal file
@@ -0,0 +1,69 @@
|
||||
---
|
||||
# language of the project (csharp, python, rust, java, typescript, go, cpp, or ruby)
|
||||
# * For C, use cpp
|
||||
# * For JavaScript, use typescript
|
||||
# Special requirements:
|
||||
# * csharp: Requires the presence of a .sln file in the project folder.
|
||||
ignore_all_files_in_gitignore: true
|
||||
# list of additional paths to ignore
|
||||
# same syntax as gitignore, so you can use * and **
|
||||
# Was previously called `ignored_dirs`, please update your config if you are using that.
|
||||
# Added (renamed) on 2025-04-07
|
||||
ignored_paths: []
|
||||
|
||||
# whether the project is in read-only mode
|
||||
# If set to true, all editing tools will be disabled and attempts to use them will result in an error
|
||||
# Added on 2025-04-18
|
||||
read_only: false
|
||||
|
||||
# list of tool names to exclude. We recommend not excluding any tools, see the readme for more details.
|
||||
# Below is the complete list of tools for convenience.
|
||||
# To make sure you have the latest list of tools, and to view their descriptions,
|
||||
# execute `uv run scripts/print_tool_overview.py`.
|
||||
#
|
||||
# * `activate_project`: Activates a project by name.
|
||||
# * `check_onboarding_performed`: Checks whether project onboarding was already performed.
|
||||
# * `create_text_file`: Creates/overwrites a file in the project directory.
|
||||
# * `delete_lines`: Deletes a range of lines within a file.
|
||||
# * `delete_memory`: Deletes a memory from Serena's project-specific memory store.
|
||||
# * `execute_shell_command`: Executes a shell command.
|
||||
# * `find_referencing_code_snippets`: Finds code snippets in which the symbol at the given location is referenced.
|
||||
# * `find_referencing_symbols`: Finds symbols that reference the symbol at the given location (optionally filtered by type).
|
||||
# * `find_symbol`: Performs a global (or local) search for symbols with/containing a given name/substring (optionally filtered by type).
|
||||
# * `get_current_config`: Prints the current configuration of the agent, including the active and available projects, tools, contexts, and modes.
|
||||
# * `get_symbols_overview`: Gets an overview of the top-level symbols defined in a given file.
|
||||
# * `initial_instructions`: Gets the initial instructions for the current project.
|
||||
# Should only be used in settings where the system prompt cannot be set,
|
||||
# e.g. in clients you have no control over, like Claude Desktop.
|
||||
# * `insert_after_symbol`: Inserts content after the end of the definition of a given symbol.
|
||||
# * `insert_at_line`: Inserts content at a given line in a file.
|
||||
# * `insert_before_symbol`: Inserts content before the beginning of the definition of a given symbol.
|
||||
# * `list_dir`: Lists files and directories in the given directory (optionally with recursion).
|
||||
# * `list_memories`: Lists memories in Serena's project-specific memory store.
|
||||
# * `onboarding`: Performs onboarding (identifying the project structure and essential tasks, e.g. for testing or building).
|
||||
# * `prepare_for_new_conversation`: Provides instructions for preparing for a new conversation (in order to continue with the necessary context).
|
||||
# * `read_file`: Reads a file within the project directory.
|
||||
# * `read_memory`: Reads the memory with the given name from Serena's project-specific memory store.
|
||||
# * `remove_project`: Removes a project from the Serena configuration.
|
||||
# * `replace_lines`: Replaces a range of lines within a file with new content.
|
||||
# * `replace_symbol_body`: Replaces the full definition of a symbol.
|
||||
# * `restart_language_server`: Restarts the language server, may be necessary when edits not through Serena happen.
|
||||
# * `search_for_pattern`: Performs a search for a pattern in the project.
|
||||
# * `summarize_changes`: Provides instructions for summarizing the changes made to the codebase.
|
||||
# * `switch_modes`: Activates modes by providing a list of their names
|
||||
# * `think_about_collected_information`: Thinking tool for pondering the completeness of collected information.
|
||||
# * `think_about_task_adherence`: Thinking tool for determining whether the agent is still on track with the current task.
|
||||
# * `think_about_whether_you_are_done`: Thinking tool for determining whether the task is truly completed.
|
||||
# * `write_memory`: Writes a named memory (for future reference) to Serena's project-specific memory store.
|
||||
excluded_tools: []
|
||||
|
||||
# initial prompt for the project. It will always be given to the LLM upon activating the project
|
||||
# (contrary to the memories, which are loaded on demand).
|
||||
initial_prompt: ''
|
||||
|
||||
project_name: 'actions'
|
||||
languages:
|
||||
- bash
|
||||
- python
|
||||
included_optional_tools: []
|
||||
encoding: utf-8
|
||||
31
.shellspec
Normal file
31
.shellspec
Normal file
@@ -0,0 +1,31 @@
|
||||
# ShellSpec configuration for GitHub Actions Testing Framework
|
||||
|
||||
# Set the default directory containing spec files
|
||||
--default-path _tests/unit
|
||||
|
||||
# Specify pattern to find spec files
|
||||
--pattern "*_spec.sh" --pattern "*.spec.sh"
|
||||
|
||||
# Set shell to use (bash for better compatibility with GitHub Actions)
|
||||
--shell bash
|
||||
|
||||
# Load path for framework modules and spec_helper
|
||||
--load-path _tests/framework --load-path _tests/unit
|
||||
|
||||
# Helper directory containing spec_helper.sh
|
||||
--require spec_helper
|
||||
|
||||
# Output format
|
||||
--format documentation
|
||||
|
||||
# Coverage settings (if kcov is available)
|
||||
--covdir _tests/coverage
|
||||
|
||||
# Enable color output
|
||||
--color
|
||||
|
||||
# Set execution directory to project root
|
||||
--execdir @project
|
||||
|
||||
# Do not sandbox (we need access to real commands for testing)
|
||||
--no-sandbox
|
||||
5
.sonarlint/connectedMode.json
Normal file
5
.sonarlint/connectedMode.json
Normal file
@@ -0,0 +1,5 @@
|
||||
{
|
||||
"sonarCloudOrganization": "ivuorinen",
|
||||
"projectKey": "ivuorinen_actions",
|
||||
"region": "EU"
|
||||
}
|
||||
8
.vscode/settings.json
vendored
Normal file
8
.vscode/settings.json
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"sonarlint.connectedMode.project": {
|
||||
"connectionId": "ivuorinen",
|
||||
"projectKey": "ivuorinen_actions"
|
||||
},
|
||||
"sarif-viewer.connectToGithubCodeScanning": "on",
|
||||
"makefile.configureOnOpen": false
|
||||
}
|
||||
15
.yamlfmt.yml
Normal file
15
.yamlfmt.yml
Normal file
@@ -0,0 +1,15 @@
|
||||
---
|
||||
doublestar: true
|
||||
gitignore_excludes: true
|
||||
formatter:
|
||||
basic:
|
||||
include_document_start: true
|
||||
retain_line_breaks: true
|
||||
scan_folded_as_literal: false
|
||||
max_line_length: 0
|
||||
indentless_arrays: true
|
||||
include:
|
||||
- '**/*.yml'
|
||||
- '**/*.yaml'
|
||||
exclude:
|
||||
- node_modules
|
||||
@@ -0,0 +1,2 @@
|
||||
.venv
|
||||
.worktrees/
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
---
|
||||
extends: default
|
||||
|
||||
ignore: |
|
||||
node_modules/
|
||||
.worktrees/
|
||||
|
||||
rules:
|
||||
line-length:
|
||||
max: 200
|
||||
|
||||
160
CLAUDE.md
Normal file
160
CLAUDE.md
Normal file
@@ -0,0 +1,160 @@
|
||||
# CLAUDE.md - GitHub Actions Monorepo
|
||||
|
||||
**Mantra**: Zero defects. Zero exceptions. All rules mandatory and non-negotiable.
|
||||
|
||||
## Standards
|
||||
|
||||
### Production Ready Criteria
|
||||
|
||||
- All tests pass + all linting passes + all validation passes + zero warnings
|
||||
|
||||
### Core Rules
|
||||
|
||||
- Follow conventions, fix all issues, never compromise standards, test thoroughly
|
||||
- Prioritize quality over speed, write maintainable/DRY code
|
||||
- Document changes, communicate factually, review carefully
|
||||
- Update existing memory files rather than create new ones
|
||||
- Ask when unsure
|
||||
|
||||
### Communication
|
||||
|
||||
- Direct, factual, concise only
|
||||
- Prohibited: hype, buzzwords, jargon, clichés, assumptions, predictions, comparisons, superlatives
|
||||
- Never declare "production ready" until all checks pass
|
||||
|
||||
### Folders
|
||||
|
||||
- `.serena/` – Internal config (do not edit)
|
||||
- `.github/` – Workflows/templates
|
||||
- `_tests/` – ShellSpec tests
|
||||
- `_tools/` – Helper tools
|
||||
- `validate-inputs/` – Python validation system + tests
|
||||
- `*/rules.yml` – Auto-generated validation rules
|
||||
|
||||
### Memory System
|
||||
|
||||
**Location**: `.serena/memories/` (9 consolidated memories for context)
|
||||
|
||||
**When to Use**: Read memories at session start or when needed for specific context. Be token-efficient - read only relevant memories for the task.
|
||||
|
||||
**Core Memories** (read first for project understanding):
|
||||
|
||||
- `repository_overview` – 30 actions, categories, structure, status
|
||||
- `validator_system` – Validation architecture, components, usage patterns
|
||||
- `development_standards` – Quality rules, workflows, security, completion checklist
|
||||
|
||||
**Reference Guides** (read when working on specific areas):
|
||||
|
||||
- `code_style_conventions` – EditorConfig, Shell/Python/YAML style, 10 critical prevention rules
|
||||
- `suggested_commands` – Make targets, testing commands, tool usage
|
||||
- `tech_stack` – Python/Node.js/Shell tools, paths, versions
|
||||
|
||||
**GitHub Actions Reference** (read when working with workflows):
|
||||
|
||||
- `github-workflow-expressions` – Expression syntax, contexts, operators, common patterns
|
||||
- `github-workflow-commands` – Workflow commands (outputs, env, logging, masking)
|
||||
- `github-workflow-secure-use` – Security best practices, secrets, injection prevention
|
||||
|
||||
**Memory Maintenance**: Update existing memories rather than create new ones. Keep content token-efficient and factual.
|
||||
|
||||
### Documentation Locations
|
||||
|
||||
**Validation System**: `validate-inputs/docs/` (4 guides: API.md, DEVELOPER_GUIDE.md, ACTION_MAINTAINER.md, README_ARCHITECTURE.md)
|
||||
|
||||
**Testing**: `_tests/README.md` (ShellSpec framework, test patterns, running tests)
|
||||
|
||||
**Docker Tools**: `_tools/docker-testing-tools/README.md` (CI setup, pre-built testing image)
|
||||
|
||||
**See**: `documentation_guide` memory for detailed descriptions and when to read each
|
||||
|
||||
## Repository Structure
|
||||
|
||||
Flat structure. Each action self-contained with `action.yml`.
|
||||
|
||||
**24 Actions**: Setup (language-version-detect), Utilities (action-versioning, version-file-parser),
|
||||
Linting (ansible-lint-fix, biome-lint, csharp-lint-check, eslint-lint, go-lint, pr-lint, pre-commit, prettier-lint, python-lint-fix, terraform-lint-fix),
|
||||
Testing (php-tests), Build (csharp-build, go-build, docker-build),
|
||||
Publishing (npm-publish, docker-publish, csharp-publish),
|
||||
Repository (release-monthly, sync-labels, stale, compress-images, codeql-analysis),
|
||||
Validation (validate-inputs)
|
||||
|
||||
## Commands
|
||||
|
||||
**Main**: `make all` (docs+format+lint+test), `make dev` (format+lint), `make lint`, `make format`, `make docs`, `make test`
|
||||
|
||||
**Testing**: `make test-python`, `make test-python-coverage`, `make test-actions`, `make test-update-validators`, `make test-coverage`
|
||||
|
||||
**Validation**: `make update-validators`, `make update-validators-dry`
|
||||
|
||||
**Versioning**:
|
||||
|
||||
- `make release [VERSION=vYYYY.MM.DD]` - Create release (auto-generates version from date if omitted)
|
||||
- `make update-version-refs MAJOR=vYYYY` - Update action refs to version
|
||||
- `make bump-major-version OLD=vYYYY NEW=vYYYY` - Annual version bump
|
||||
- `make check-version-refs` - Verify current action references
|
||||
|
||||
### Linters
|
||||
|
||||
Use `make lint` (not direct calls). Runs: markdownlint-cli2, prettier, markdown-table-formatter, yaml-lint, actionlint, shellcheck, ruff
|
||||
|
||||
### Tests
|
||||
|
||||
ShellSpec (`_tests/`) + pytest (`validate-inputs/tests/`). Full coverage + independent + integration tests required.
|
||||
|
||||
## Architecture - Critical Prevention (Zero Tolerance)
|
||||
|
||||
Violations cause runtime failures:
|
||||
|
||||
1. Add `id:` when outputs referenced (`steps.x.outputs.y` requires `id: x`)
|
||||
2. Check tool availability: `command -v jq >/dev/null 2>&1` (jq/bc/terraform not on all runners)
|
||||
3. Sanitize `$GITHUB_OUTPUT`: use `printf '%s\n' "$val"` not `echo "$val"`
|
||||
4. Pin external actions to SHA commits (not `@main`/`@v1`)
|
||||
5. Quote shell vars: `"$var"`, `basename -- "$path"` (handles spaces)
|
||||
6. Use SHA-pinned refs for internal actions: `ivuorinen/actions/action-name@<SHA>`
|
||||
(security, not `./` or `@main`)
|
||||
7. Test regex edge cases (support `1.0.0-rc.1`, `1.0.0+build`)
|
||||
8. Use `set -eu` (POSIX) in shell scripts (all scripts are POSIX sh, not bash)
|
||||
9. Never nest `${{ }}` in quoted YAML strings (breaks hashFiles)
|
||||
10. Provide tool fallbacks (macOS/Windows lack Linux tools)
|
||||
|
||||
### Core Requirements
|
||||
|
||||
- All actions SHA-pinned (external + internal), use `${{ github.token }}`, POSIX shell (`set -eu`)
|
||||
- EditorConfig: 2-space indent, UTF-8, LF, max 200 chars (120 for MD)
|
||||
- Auto-gen README via `action-docs` (note: `npx action-docs --update-readme` doesn't work)
|
||||
- Required error handling, POSIX-compliant scripts
|
||||
|
||||
### Action References
|
||||
|
||||
**Internal actions (in action.yml)**: SHA-pinned full references
|
||||
|
||||
- ✅ `ivuorinen/actions/action-name@7061aafd35a2f21b57653e34f2b634b2a19334a9`
|
||||
- ❌ `./action-name` (security risk, not portable when used externally)
|
||||
- ❌ `owner/repo/action@main` (floating reference)
|
||||
|
||||
**Test workflows**: Local references
|
||||
|
||||
- ✅ `./action-name` (tests run within repo)
|
||||
- ❌ `../action-name` (ambiguous paths)
|
||||
|
||||
**External users**: Version tags
|
||||
|
||||
- ✅ `ivuorinen/actions/action-name@v2025` (CalVer major version)
|
||||
|
||||
Check: `make check-version-refs`
|
||||
|
||||
## Validation System
|
||||
|
||||
**Location**: `validate-inputs/` (YAML rules.yml per action, Python generator)
|
||||
|
||||
**Conventions**: `token`→GitHub token, `*-version`→SemVer/CalVer, `email`→format, `dockerfile`→path, `dry-run`→bool, `architectures`→Docker, `*-retries`→range
|
||||
|
||||
**Version Types**: semantic_version, calver_version, flexible_version, dotnet_version, terraform_version, node_version
|
||||
|
||||
**CalVer Support**: YYYY.MM.PATCH, YYYY.MM.DD, YYYY.0M.0D, YY.MM.MICRO, YYYY.MM, YYYY-MM-DD
|
||||
|
||||
**Maintenance**: `make update-validators`, `git diff validate-inputs/rules/`
|
||||
|
||||
---
|
||||
|
||||
All actions modular and externally usable. No exceptions to any rule.
|
||||
@@ -1,6 +1,6 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2024 Ismo Vuorinen
|
||||
Copyright (c) 2024-2025 Ismo Vuorinen
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
||||
730
Makefile
Normal file
730
Makefile
Normal file
@@ -0,0 +1,730 @@
|
||||
# Makefile for GitHub Actions repository
|
||||
# Provides organized task management with parallel execution capabilities
|
||||
|
||||
.PHONY: help all docs update-catalog lint format check clean install-tools test test-unit test-integration test-coverage generate-tests generate-tests-dry test-generate-tests docker-build docker-push docker-test docker-login docker-all release release-dry release-prep release-tag release-undo update-version-refs bump-major-version check-version-refs
|
||||
.DEFAULT_GOAL := help
|
||||
|
||||
# Colors for output
|
||||
GREEN := $(shell printf '\033[32m')
|
||||
YELLOW := $(shell printf '\033[33m')
|
||||
RED := $(shell printf '\033[31m')
|
||||
BLUE := $(shell printf '\033[34m')
|
||||
RESET := $(shell printf '\033[0m')
|
||||
|
||||
# Configuration
|
||||
SHELL := /bin/bash
|
||||
.SHELLFLAGS := -euo pipefail -c
|
||||
|
||||
# Log file with timestamp
|
||||
LOG_FILE := update_$(shell date +%Y%m%d_%H%M%S).log
|
||||
|
||||
# Detect OS for sed compatibility
|
||||
UNAME_S := $(shell uname -s)
|
||||
ifeq ($(UNAME_S),Darwin)
|
||||
SED_CMD := sed -i .bak
|
||||
else
|
||||
SED_CMD := sed -i
|
||||
endif
|
||||
|
||||
# Help target - shows available commands
|
||||
help: ## Show this help message
|
||||
@echo "$(BLUE)GitHub Actions Repository Management$(RESET)"
|
||||
@echo ""
|
||||
@echo "$(GREEN)Available targets:$(RESET)"
|
||||
@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | \
|
||||
awk 'BEGIN {FS = ":.*?## "}; {printf " $(YELLOW)%-20s$(RESET) %s\n", $$1, $$2}'
|
||||
@echo ""
|
||||
@echo "$(GREEN)Examples:$(RESET)"
|
||||
@echo " make all # Generate docs, format, and lint everything"
|
||||
@echo " make docs # Generate documentation only"
|
||||
@echo " make lint # Run all linters"
|
||||
@echo " make format # Format all files"
|
||||
@echo " make test # Run all tests (unit + integration)"
|
||||
@echo " make check # Quick syntax checks"
|
||||
|
||||
# Main targets
|
||||
all: install-tools update-validators docs update-catalog format lint precommit ## Generate docs, format, lint, and run pre-commit
|
||||
@echo "$(GREEN)✅ All tasks completed successfully$(RESET)"
|
||||
|
||||
docs: ## Generate documentation for all actions
|
||||
@echo "$(BLUE)📂 Generating documentation...$(RESET)"
|
||||
@failed=0; \
|
||||
for dir in $$(find . -mindepth 2 -maxdepth 2 -name "action.yml" | sed 's|/action.yml||' | sed 's|./||'); do \
|
||||
echo "$(BLUE)📄 Updating $$dir/README.md...$(RESET)"; \
|
||||
repo="ivuorinen/actions/$$dir"; \
|
||||
printf "# %s\n\n" "$$repo" > "$$dir/README.md"; \
|
||||
if npx --yes action-docs -n -s "$$dir/action.yml" --no-banner >> "$$dir/README.md" 2>/dev/null; then \
|
||||
$(SED_CMD) "s|\*\*\*PROJECT\*\*\*|$$repo|g" "$$dir/README.md"; \
|
||||
$(SED_CMD) "s|\*\*\*VERSION\*\*\*|main|g" "$$dir/README.md"; \
|
||||
$(SED_CMD) "s|\*\*\*||g" "$$dir/README.md"; \
|
||||
[ "$(UNAME_S)" = "Darwin" ] && rm -f "$$dir/README.md.bak"; \
|
||||
echo "$(GREEN)✅ Updated $$dir/README.md$(RESET)"; \
|
||||
else \
|
||||
echo "$(RED)⚠️ Failed to update $$dir/README.md$(RESET)" | tee -a $(LOG_FILE); \
|
||||
failed=$$((failed + 1)); \
|
||||
fi; \
|
||||
done; \
|
||||
[ $$failed -eq 0 ] && echo "$(GREEN)✅ All documentation updated successfully$(RESET)" || { echo "$(RED)❌ $$failed documentation updates failed$(RESET)"; exit 1; }
|
||||
|
||||
update-catalog: ## Update action catalog in README.md
|
||||
@echo "$(BLUE)📚 Updating action catalog...$(RESET)"
|
||||
@if command -v npm >/dev/null 2>&1; then \
|
||||
npm run update-catalog; \
|
||||
else \
|
||||
echo "$(RED)❌ npm not found. Please install Node.js$(RESET)"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "$(GREEN)✅ Action catalog updated$(RESET)"
|
||||
|
||||
update-validators: ## Update validation rules for all actions
|
||||
@echo "$(BLUE)🔧 Updating validation rules...$(RESET)"
|
||||
@if command -v uv >/dev/null 2>&1; then \
|
||||
cd validate-inputs && uv run scripts/update-validators.py; \
|
||||
else \
|
||||
echo "$(RED)❌ uv not found. Please install uv (see 'make install-tools')$(RESET)"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "$(GREEN)✅ Validation rules updated$(RESET)"
|
||||
|
||||
update-validators-dry: ## Preview validation rules changes (dry run)
|
||||
@echo "$(BLUE)🔍 Previewing validation rules changes...$(RESET)"
|
||||
@if command -v uv >/dev/null 2>&1; then \
|
||||
cd validate-inputs && uv run scripts/update-validators.py --dry-run; \
|
||||
else \
|
||||
echo "$(RED)❌ uv not found. Please install uv (see 'make install-tools')$(RESET)"; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
format: format-markdown format-yaml-json format-python ## Format all files
|
||||
@echo "$(GREEN)✅ All files formatted$(RESET)"
|
||||
|
||||
lint: lint-markdown lint-yaml lint-shell lint-python ## Run all linters
|
||||
@echo "$(GREEN)✅ All linting completed$(RESET)"
|
||||
|
||||
check: check-tools check-syntax check-local-refs ## Quick syntax and tool availability checks
|
||||
@echo "$(GREEN)✅ All checks passed$(RESET)"
|
||||
|
||||
clean: ## Clean up temporary files and caches
|
||||
@echo "$(BLUE)🧹 Cleaning up...$(RESET)"
|
||||
@find . -name "*.bak" -delete 2>/dev/null || true
|
||||
@find . -name "update_*.log" -mtime +7 -delete 2>/dev/null || true
|
||||
@find . -name ".megalinter" -type d -exec rm -rf {} + 2>/dev/null || true
|
||||
@echo "$(GREEN)✅ Cleanup completed$(RESET)"
|
||||
|
||||
precommit: ## Run pre-commit hooks on all files
|
||||
@echo "$(BLUE)🔍 Running pre-commit hooks...$(RESET)"
|
||||
@if command -v pre-commit >/dev/null 2>&1; then \
|
||||
if PRE_COMMIT_USE_UV=1 pre-commit run --all-files; then \
|
||||
echo "$(GREEN)✅ All pre-commit hooks passed$(RESET)"; \
|
||||
else \
|
||||
echo "$(RED)❌ Some pre-commit hooks failed$(RESET)"; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
else \
|
||||
echo "$(RED)❌ pre-commit not found. Please install:$(RESET)"; \
|
||||
echo " brew install pre-commit"; \
|
||||
echo " or: pip install pre-commit"; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
# Local action reference validation
|
||||
check-local-refs: ## Check for ../action-name references that should be ./action-name
|
||||
@echo "$(BLUE)🔍 Checking local action references...$(RESET)"
|
||||
@if command -v uv >/dev/null 2>&1; then \
|
||||
uv run _tools/fix-local-action-refs.py --check; \
|
||||
else \
|
||||
echo "$(RED)❌ uv not found. Please install uv (see 'make install-tools')$(RESET)"; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
fix-local-refs: ## Fix ../action-name references to ./action-name
|
||||
@echo "$(BLUE)🔧 Fixing local action references...$(RESET)"
|
||||
@if command -v uv >/dev/null 2>&1; then \
|
||||
uv run _tools/fix-local-action-refs.py; \
|
||||
else \
|
||||
echo "$(RED)❌ uv not found. Please install uv (see 'make install-tools')$(RESET)"; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
fix-local-refs-dry: ## Preview local action reference fixes (dry run)
|
||||
@echo "$(BLUE)🔍 Previewing local action reference fixes...$(RESET)"
|
||||
@if command -v uv >/dev/null 2>&1; then \
|
||||
uv run _tools/fix-local-action-refs.py --dry-run; \
|
||||
else \
|
||||
echo "$(RED)❌ uv not found. Please install uv (see 'make install-tools')$(RESET)"; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
# Version management targets
|
||||
release: ## Create a new release with version tags (usage: make release [VERSION=v2025.10.18])
|
||||
@VERSION_TO_USE=$$(if [ -n "$(VERSION)" ]; then echo "$(VERSION)"; else date +v%Y.%m.%d; fi); \
|
||||
echo "$(BLUE)🚀 Creating release $$VERSION_TO_USE...$(RESET)"; \
|
||||
sh _tools/release.sh "$$VERSION_TO_USE"
|
||||
|
||||
release-dry: ## Preview release without making changes (usage: make release-dry VERSION=v2025.11.01)
|
||||
@if [ -z "$(VERSION)" ]; then \
|
||||
VERSION_TO_USE=$$(date +v%Y.%m.%d); \
|
||||
else \
|
||||
VERSION_TO_USE="$(VERSION)"; \
|
||||
fi; \
|
||||
echo "$(BLUE)🔍 Previewing release $$VERSION_TO_USE (dry run)...$(RESET)"; \
|
||||
sh _tools/release.sh --dry-run "$$VERSION_TO_USE"
|
||||
|
||||
release-prep: ## Update action refs and commit (no tags) (usage: make release-prep [VERSION=v2025.11.01])
|
||||
@VERSION_TO_USE=$$(if [ -n "$(VERSION)" ]; then echo "$(VERSION)"; else date +v%Y.%m.%d; fi); \
|
||||
echo "$(BLUE)🔧 Preparing release $$VERSION_TO_USE...$(RESET)"; \
|
||||
sh _tools/release.sh --prep-only "$$VERSION_TO_USE"; \
|
||||
echo "$(GREEN)✅ Preparation complete$(RESET)"; \
|
||||
echo "$(YELLOW)Next: make release-tag VERSION=$$VERSION_TO_USE$(RESET)"
|
||||
|
||||
release-tag: ## Create tags only (assumes prep done) (usage: make release-tag VERSION=v2025.11.01)
|
||||
@if [ -z "$(VERSION)" ]; then \
|
||||
echo "$(RED)❌ Error: VERSION parameter required for release-tag$(RESET)"; \
|
||||
echo "Usage: make release-tag VERSION=v2025.11.01"; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
echo "$(BLUE)🏷️ Creating tags for release $(VERSION)...$(RESET)"; \
|
||||
sh _tools/release.sh --tag-only "$(VERSION)"
|
||||
|
||||
release-undo: ## Rollback the most recent release (delete tags and reset HEAD)
|
||||
@echo "$(BLUE)🔙 Rolling back release...$(RESET)"; \
|
||||
sh _tools/release-undo.sh
|
||||
|
||||
update-version-refs: ## Update all action references to a specific version tag (usage: make update-version-refs MAJOR=v2025)
|
||||
@if [ -z "$(MAJOR)" ]; then \
|
||||
echo "$(RED)❌ Error: MAJOR parameter required$(RESET)"; \
|
||||
echo "Usage: make update-version-refs MAJOR=v2025"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "$(BLUE)🔧 Updating action references to $(MAJOR)...$(RESET)"
|
||||
@sh _tools/update-action-refs.sh "$(MAJOR)"
|
||||
@echo "$(GREEN)✅ Action references updated$(RESET)"
|
||||
|
||||
bump-major-version: ## Replace one major version with another (usage: make bump-major-version OLD=v2025 NEW=v2026)
|
||||
@if [ -z "$(OLD)" ] || [ -z "$(NEW)" ]; then \
|
||||
echo "$(RED)❌ Error: OLD and NEW parameters required$(RESET)"; \
|
||||
echo "Usage: make bump-major-version OLD=v2025 NEW=v2026"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "$(BLUE)🔄 Bumping version from $(OLD) to $(NEW)...$(RESET)"
|
||||
@sh _tools/bump-major-version.sh "$(OLD)" "$(NEW)"
|
||||
@echo "$(GREEN)✅ Major version bumped$(RESET)"
|
||||
|
||||
check-version-refs: ## List all current SHA-pinned action references
|
||||
@echo "$(BLUE)🔍 Checking action references...$(RESET)"
|
||||
@sh _tools/check-version-refs.sh
|
||||
|
||||
# Formatting targets
|
||||
format-markdown: ## Format markdown files
|
||||
@echo "$(BLUE)📝 Formatting markdown...$(RESET)"
|
||||
@if npx --yes markdownlint-cli2 --fix "**/*.md" "#node_modules" "#.worktrees" 2>/dev/null; then \
|
||||
echo "$(GREEN)✅ Markdown formatted$(RESET)"; \
|
||||
else \
|
||||
echo "$(YELLOW)⚠️ Markdown formatting issues found$(RESET)" | tee -a $(LOG_FILE); \
|
||||
fi
|
||||
|
||||
format-yaml-json: ## Format YAML and JSON files
|
||||
@echo "$(BLUE)✨ Formatting YAML/JSON...$(RESET)"
|
||||
@if command -v yamlfmt >/dev/null 2>&1; then \
|
||||
if yamlfmt . 2>/dev/null; then \
|
||||
echo "$(GREEN)✅ YAML formatted with yamlfmt$(RESET)"; \
|
||||
else \
|
||||
echo "$(YELLOW)⚠️ YAML formatting issues found with yamlfmt$(RESET)" | tee -a $(LOG_FILE); \
|
||||
fi; \
|
||||
else \
|
||||
echo "$(BLUE)ℹ️ yamlfmt not available, skipping$(RESET)"; \
|
||||
fi
|
||||
@if npx --yes prettier --write "**/*.md" "**/*.yml" "**/*.yaml" "**/*.json" 2>/dev/null; then \
|
||||
echo "$(GREEN)✅ YAML/JSON formatted with prettier$(RESET)"; \
|
||||
else \
|
||||
echo "$(YELLOW)⚠️ YAML/JSON formatting issues found with prettier$(RESET)" | tee -a $(LOG_FILE); \
|
||||
fi
|
||||
@echo "$(BLUE)📊 Formatting tables...$(RESET)"
|
||||
@if npx --yes markdown-table-formatter "**/*.md" 2>/dev/null; then \
|
||||
echo "$(GREEN)✅ Tables formatted$(RESET)"; \
|
||||
else \
|
||||
echo "$(YELLOW)⚠️ Table formatting issues found$(RESET)" | tee -a $(LOG_FILE); \
|
||||
fi
|
||||
|
||||
format-tables: ## Format markdown tables
|
||||
@echo "$(BLUE)📊 Formatting tables...$(RESET)"
|
||||
@if npx --yes markdown-table-formatter "**/*.md" 2>/dev/null; then \
|
||||
echo "$(GREEN)✅ Tables formatted$(RESET)"; \
|
||||
else \
|
||||
echo "$(YELLOW)⚠️ Table formatting issues found$(RESET)" | tee -a $(LOG_FILE); \
|
||||
fi
|
||||
|
||||
format-python: ## Format Python files with ruff
|
||||
@echo "$(BLUE)🐍 Formatting Python files...$(RESET)"
|
||||
@if command -v uv >/dev/null 2>&1; then \
|
||||
if uvx ruff format . --no-cache; then \
|
||||
echo "$(GREEN)✅ Python files formatted$(RESET)"; \
|
||||
else \
|
||||
echo "$(YELLOW)⚠️ Python formatting issues found$(RESET)" | tee -a $(LOG_FILE); \
|
||||
fi; \
|
||||
else \
|
||||
echo "$(BLUE)ℹ️ uv not available, skipping Python formatting$(RESET)"; \
|
||||
fi
|
||||
|
||||
# Linting targets
|
||||
lint-markdown: ## Lint markdown files
|
||||
@echo "$(BLUE)🔍 Linting markdown...$(RESET)"
|
||||
@if npx --yes markdownlint-cli2 --fix "**/*.md" "#node_modules" "#.worktrees"; then \
|
||||
echo "$(GREEN)✅ Markdown linting passed$(RESET)"; \
|
||||
else \
|
||||
echo "$(YELLOW)⚠️ Markdown linting issues found$(RESET)" | tee -a $(LOG_FILE); \
|
||||
fi
|
||||
|
||||
lint-yaml: ## Lint YAML files
|
||||
@echo "$(BLUE)🔍 Linting YAML...$(RESET)"
|
||||
@if npx --yes yaml-lint "**/*.yml" "**/*.yaml" 2>/dev/null; then \
|
||||
echo "$(GREEN)✅ YAML linting passed$(RESET)"; \
|
||||
else \
|
||||
echo "$(YELLOW)⚠️ YAML linting issues found$(RESET)" | tee -a $(LOG_FILE); \
|
||||
fi
|
||||
|
||||
lint-shell: ## Lint shell scripts
|
||||
@echo "$(BLUE)🔍 Linting shell scripts...$(RESET)"
|
||||
@if ! command -v shellcheck >/dev/null 2>&1; then \
|
||||
echo "$(RED)❌ shellcheck not found. Please install shellcheck:$(RESET)"; \
|
||||
echo " brew install shellcheck"; \
|
||||
echo " or: apt-get install shellcheck"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@if find . -name "*.sh" -not -path "./_tests/*" -not -path "./.worktrees/*" -exec shellcheck -x {} +; then \
|
||||
echo "$(GREEN)✅ Shell linting passed$(RESET)"; \
|
||||
else \
|
||||
echo "$(RED)❌ Shell linting issues found$(RESET)"; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
lint-python: ## Lint Python files with ruff and pyright
|
||||
@echo "$(BLUE)🔍 Linting Python files...$(RESET)"
|
||||
@ruff_passed=true; pyright_passed=true; \
|
||||
if command -v uv >/dev/null 2>&1; then \
|
||||
uvx ruff check --fix . --no-cache; \
|
||||
if ! uvx ruff check . --no-cache; then \
|
||||
echo "$(YELLOW)⚠️ Python linting issues found$(RESET)" | tee -a $(LOG_FILE); \
|
||||
ruff_passed=false; \
|
||||
fi; \
|
||||
if command -v pyright >/dev/null 2>&1; then \
|
||||
if ! pyright --pythonpath $$(which python3) validate-inputs/ _tests/framework/; then \
|
||||
echo "$(YELLOW)⚠️ Python type checking issues found$(RESET)" | tee -a $(LOG_FILE); \
|
||||
pyright_passed=false; \
|
||||
fi; \
|
||||
else \
|
||||
echo "$(BLUE)ℹ️ pyright not available, skipping type checking$(RESET)"; \
|
||||
fi; \
|
||||
else \
|
||||
echo "$(BLUE)ℹ️ uv not available, skipping Python linting$(RESET)"; \
|
||||
fi; \
|
||||
if $$ruff_passed && $$pyright_passed; then \
|
||||
echo "$(GREEN)✅ Python linting and type checking passed$(RESET)"; \
|
||||
fi
|
||||
|
||||
# Check targets
|
||||
check-tools: ## Check if required tools are available
|
||||
@echo "$(BLUE)🔧 Checking required tools...$(RESET)"
|
||||
@for cmd in npx sed find grep shellcheck; do \
|
||||
if ! command -v $$cmd >/dev/null 2>&1; then \
|
||||
echo "$(RED)❌ Error: $$cmd not found$(RESET)"; \
|
||||
echo " Please install $$cmd (see 'make install-tools')"; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
done
|
||||
@if ! command -v yamlfmt >/dev/null 2>&1; then \
|
||||
echo "$(YELLOW)⚠️ yamlfmt not found (optional for YAML formatting)$(RESET)"; \
|
||||
fi
|
||||
@echo "$(GREEN)✅ All required tools available$(RESET)"
|
||||
|
||||
check-syntax: ## Check syntax of shell scripts and YAML files
|
||||
@echo "$(BLUE)🔍 Checking syntax...$(RESET)"
|
||||
@failed=0; \
|
||||
find . -name "*.sh" -not -path "./_tests/*" -not -path "./.worktrees/*" -print0 | while IFS= read -r -d '' file; do \
|
||||
if ! bash -n "$$file" 2>&1; then \
|
||||
echo "$(RED)❌ Syntax error in $$file$(RESET)" >&2; \
|
||||
failed=1; \
|
||||
fi; \
|
||||
done; \
|
||||
if [ "$$failed" -eq 1 ]; then \
|
||||
echo "$(RED)❌ Shell script syntax errors found$(RESET)"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "$(GREEN)✅ Syntax checks passed$(RESET)"
|
||||
|
||||
install-tools: ## Install/update required tools
|
||||
@echo "$(BLUE)📦 Installing/updating tools...$(RESET)"
|
||||
@echo "$(YELLOW)Installing NPM tools...$(RESET)"
|
||||
@npx --yes action-docs@latest --version >/dev/null
|
||||
@npx --yes markdownlint-cli2 --version >/dev/null
|
||||
@npx --yes prettier --version >/dev/null
|
||||
@npx --yes markdown-table-formatter --version >/dev/null
|
||||
@npx --yes yaml-lint --version >/dev/null
|
||||
@echo "$(YELLOW)Checking shellcheck...$(RESET)"
|
||||
@if ! command -v shellcheck >/dev/null 2>&1; then \
|
||||
echo "$(RED)⚠️ shellcheck not found. Please install:$(RESET)"; \
|
||||
echo " macOS: brew install shellcheck"; \
|
||||
echo " Linux: apt-get install shellcheck"; \
|
||||
else \
|
||||
echo " shellcheck already installed"; \
|
||||
fi
|
||||
@echo "$(YELLOW)Checking yamlfmt...$(RESET)"
|
||||
@if ! command -v yamlfmt >/dev/null 2>&1; then \
|
||||
echo "$(RED)⚠️ yamlfmt not found. Please install:$(RESET)"; \
|
||||
echo " macOS: brew install yamlfmt"; \
|
||||
echo " Linux: go install github.com/google/yamlfmt/cmd/yamlfmt@latest"; \
|
||||
else \
|
||||
echo " yamlfmt already installed"; \
|
||||
fi
|
||||
@echo "$(YELLOW)Checking uv...$(RESET)"
|
||||
@if ! command -v uv >/dev/null 2>&1; then \
|
||||
echo "$(RED)⚠️ uv not found. Please install:$(RESET)"; \
|
||||
echo " macOS: brew install uv"; \
|
||||
echo " Linux: curl -LsSf https://astral.sh/uv/install.sh | sh"; \
|
||||
echo " Or see: https://docs.astral.sh/uv/getting-started/installation/"; \
|
||||
exit 1; \
|
||||
else \
|
||||
echo " uv already installed"; \
|
||||
fi
|
||||
@echo "$(YELLOW)Checking pre-commit...$(RESET)"
|
||||
@if ! command -v pre-commit >/dev/null 2>&1; then \
|
||||
echo "$(BLUE)ℹ️ pre-commit not found. Installing via uv tool...$(RESET)"; \
|
||||
uv tool install pre-commit; \
|
||||
echo " pre-commit installed"; \
|
||||
else \
|
||||
echo " pre-commit already installed"; \
|
||||
fi
|
||||
@echo "$(YELLOW)Installing git hooks with pre-commit...$(RESET)"
|
||||
@if [ -d .git ] && command -v pre-commit >/dev/null 2>&1; then \
|
||||
if ~/.local/bin/pre-commit install 2>/dev/null || pre-commit install 2>/dev/null; then \
|
||||
echo " Git hooks installed"; \
|
||||
fi; \
|
||||
fi
|
||||
@echo "$(YELLOW)Installing Python dependencies from pyproject.toml...$(RESET)"
|
||||
@uv sync --all-extras
|
||||
@echo " Python dependencies installed"
|
||||
@echo "$(GREEN)✅ All tools installed/updated$(RESET)"
|
||||
|
||||
# Development targets
|
||||
dev: ## Development workflow - format then lint
|
||||
@$(MAKE) format
|
||||
@$(MAKE) lint
|
||||
|
||||
dev-python: ## Python development workflow - format, lint, test
|
||||
@echo "$(BLUE)🐍 Running Python development workflow...$(RESET)"
|
||||
@$(MAKE) format-python
|
||||
@$(MAKE) lint-python
|
||||
@$(MAKE) test-python
|
||||
|
||||
ci: check docs lint ## CI workflow - check, docs, lint (no formatting)
|
||||
@echo "$(GREEN)✅ CI workflow completed$(RESET)"
|
||||
|
||||
# Statistics
|
||||
stats: ## Show repository statistics
|
||||
@echo "$(BLUE)📊 Repository Statistics$(RESET)"
|
||||
@printf "%-20s %6s\n" "Actions:" "$(shell find . -mindepth 2 -maxdepth 2 -name "action.yml" | wc -l | tr -d ' ')"
|
||||
@printf "%-20s %6s\n" "Shell scripts:" "$(shell find . -name "*.sh" | wc -l | tr -d ' ')"
|
||||
@printf "%-20s %6s\n" "YAML files:" "$(shell find . -name "*.yml" -o -name "*.yaml" | wc -l | tr -d ' ')"
|
||||
@printf "%-20s %6s\n" "Markdown files:" "$(shell find . -name "*.md" | wc -l | tr -d ' ')"
|
||||
@printf "%-20s %6s\n" "Total files:" "$(shell find . -type f | wc -l | tr -d ' ')"
|
||||
|
||||
# Watch mode for development
|
||||
# Testing targets
|
||||
test: test-python test-update-validators test-actions ## Run all tests (Python + Update validators + GitHub Actions)
|
||||
@echo "$(GREEN)✅ All tests completed$(RESET)"
|
||||
|
||||
test-actions: ## Run GitHub Actions tests (unit + integration)
|
||||
@echo "$(BLUE)🧪 Running GitHub Actions tests...$(RESET)"
|
||||
@if ./_tests/run-tests.sh --type all --format console; then \
|
||||
echo "$(GREEN)✅ All GitHub Actions tests passed$(RESET)"; \
|
||||
else \
|
||||
echo "$(RED)❌ Some GitHub Actions tests failed$(RESET)"; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
test-python: ## Run Python validation tests
|
||||
@echo "$(BLUE)🐍 Running Python tests...$(RESET)"
|
||||
@if command -v uv >/dev/null 2>&1; then \
|
||||
if uv run pytest -v --tb=short; then \
|
||||
echo "$(GREEN)✅ Python tests passed$(RESET)"; \
|
||||
else \
|
||||
echo "$(RED)❌ Python tests failed$(RESET)"; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
else \
|
||||
echo "$(BLUE)ℹ️ uv not available, skipping Python tests$(RESET)"; \
|
||||
fi
|
||||
|
||||
test-python-coverage: ## Run Python tests with coverage
|
||||
@echo "$(BLUE)📊 Running Python tests with coverage...$(RESET)"
|
||||
@if command -v uv >/dev/null 2>&1; then \
|
||||
uv run pytest --cov=validate-inputs --cov-report=term-missing; \
|
||||
else \
|
||||
echo "$(BLUE)ℹ️ uv not available, skipping Python coverage tests$(RESET)"; \
|
||||
fi
|
||||
|
||||
test-update-validators: ## Run tests for update-validators.py script
|
||||
@echo "$(BLUE)🔧 Running update-validators.py tests...$(RESET)"
|
||||
@if command -v uv >/dev/null 2>&1; then \
|
||||
if uv run pytest validate-inputs/tests/test_update_validators.py -v --tb=short; then \
|
||||
echo "$(GREEN)✅ Update-validators tests passed$(RESET)"; \
|
||||
else \
|
||||
echo "$(RED)❌ Update-validators tests failed$(RESET)"; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
else \
|
||||
echo "$(BLUE)ℹ️ uv not available, skipping update-validators tests$(RESET)"; \
|
||||
fi
|
||||
|
||||
test-unit: ## Run unit tests only
|
||||
@echo "$(BLUE)🔬 Running unit tests...$(RESET)"
|
||||
@./_tests/run-tests.sh --type unit --format console
|
||||
|
||||
test-integration: ## Run integration tests only
|
||||
@echo "$(BLUE)🔗 Running integration tests...$(RESET)"
|
||||
@./_tests/run-tests.sh --type integration --format console
|
||||
|
||||
test-coverage: ## Run tests with coverage reporting
|
||||
@echo "$(BLUE)📊 Running tests with coverage...$(RESET)"
|
||||
@./_tests/run-tests.sh --type all --coverage --format console
|
||||
|
||||
test-action: ## Run tests for specific action (usage: make test-action ACTION=node-setup)
|
||||
@if [ -z "$(ACTION)" ]; then \
|
||||
echo "$(RED)❌ Error: ACTION parameter required$(RESET)"; \
|
||||
echo "Usage: make test-action ACTION=node-setup"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "$(BLUE)🎯 Running tests for action: $(ACTION)$(RESET)"
|
||||
@./_tests/run-tests.sh --action $(ACTION) --format console
|
||||
|
||||
generate-tests: ## Generate missing tests for actions and validators (won't overwrite existing tests)
|
||||
@echo "$(BLUE)🧪 Generating missing tests...$(RESET)"
|
||||
@if command -v uv >/dev/null 2>&1; then \
|
||||
if uv run validate-inputs/scripts/generate-tests.py; then \
|
||||
echo "$(GREEN)✅ Test generation completed$(RESET)"; \
|
||||
else \
|
||||
echo "$(RED)❌ Test generation failed$(RESET)"; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
else \
|
||||
echo "$(RED)❌ uv not found. Please install uv (see 'make install-tools')$(RESET)"; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
generate-tests-dry: ## Preview what tests would be generated without creating files
|
||||
@echo "$(BLUE)👁️ Preview test generation (dry run)...$(RESET)"
|
||||
@if command -v uv >/dev/null 2>&1; then \
|
||||
uv run validate-inputs/scripts/generate-tests.py --dry-run --verbose; \
|
||||
else \
|
||||
echo "$(RED)❌ uv not found. Please install uv (see 'make install-tools')$(RESET)"; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
test-generate-tests: ## Test the test generation system itself
|
||||
@echo "$(BLUE)🔬 Testing test generation system...$(RESET)"
|
||||
@if command -v uv >/dev/null 2>&1; then \
|
||||
if uv run pytest validate-inputs/tests/test_generate_tests.py -v; then \
|
||||
echo "$(GREEN)✅ Test generation tests passed$(RESET)"; \
|
||||
else \
|
||||
echo "$(RED)❌ Test generation tests failed$(RESET)"; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
else \
|
||||
echo "$(RED)❌ uv not found. Please install uv (see 'make install-tools')$(RESET)"; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
# Docker targets
|
||||
docker-build: ## Build the testing-tools Docker image
|
||||
@echo "$(BLUE)🐳 Building testing-tools Docker image...$(RESET)"
|
||||
@if ! command -v docker >/dev/null 2>&1; then \
|
||||
echo "$(RED)❌ Docker not found. Please install Docker.$(RESET)"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@if bash _tools/docker-testing-tools/build.sh; then \
|
||||
echo "$(GREEN)✅ Docker image built successfully$(RESET)"; \
|
||||
else \
|
||||
echo "$(RED)❌ Docker build failed$(RESET)"; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
docker-test: ## Test the Docker image locally
|
||||
@echo "$(BLUE)🧪 Testing Docker image...$(RESET)"
|
||||
@if ! command -v docker >/dev/null 2>&1; then \
|
||||
echo "$(RED)❌ Docker not found$(RESET)"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "$(BLUE)Testing basic functionality...$(RESET)"
|
||||
@docker run --rm ghcr.io/ivuorinen/actions:testing-tools whoami
|
||||
@docker run --rm ghcr.io/ivuorinen/actions:testing-tools shellspec --version
|
||||
@docker run --rm ghcr.io/ivuorinen/actions:testing-tools act --version
|
||||
@echo "$(GREEN)✅ Docker image tests passed$(RESET)"
|
||||
|
||||
docker-login: ## Authenticate with GitHub Container Registry
|
||||
@echo "$(BLUE)🔐 Authenticating with ghcr.io...$(RESET)"
|
||||
@TOKEN=""; \
|
||||
TOKEN_SOURCE=""; \
|
||||
if [ -n "$${GITHUB_TOKEN-}" ]; then \
|
||||
echo "$(BLUE)Using GITHUB_TOKEN from environment$(RESET)"; \
|
||||
TOKEN="$${GITHUB_TOKEN}"; \
|
||||
TOKEN_SOURCE="env"; \
|
||||
elif command -v gh >/dev/null 2>&1 && gh auth status >/dev/null 2>&1; then \
|
||||
echo "$(BLUE)Using token from GitHub CLI (gh)$(RESET)"; \
|
||||
TOKEN=$$(gh auth token); \
|
||||
TOKEN_SOURCE="gh"; \
|
||||
else \
|
||||
echo "$(RED)❌ No authentication method available$(RESET)"; \
|
||||
echo ""; \
|
||||
echo "$(YELLOW)To authenticate with ghcr.io, you need a token with 'write:packages' scope$(RESET)"; \
|
||||
echo ""; \
|
||||
echo "$(GREEN)Option 1: Use environment variable$(RESET)"; \
|
||||
echo " export GITHUB_TOKEN=ghp_xxxxxxxxxxxx"; \
|
||||
echo " make docker-login"; \
|
||||
echo ""; \
|
||||
echo "$(GREEN)Option 2: Use GitHub CLI with proper scopes$(RESET)"; \
|
||||
echo " gh auth login --scopes 'write:packages'"; \
|
||||
echo " make docker-login"; \
|
||||
echo ""; \
|
||||
echo "$(GREEN)Option 3: Create a Personal Access Token$(RESET)"; \
|
||||
echo " 1. Go to: https://github.com/settings/tokens/new"; \
|
||||
echo " 2. Check: write:packages (includes read:packages)"; \
|
||||
echo " 3. Generate token and use with Option 1"; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
if printf '%s' "$${TOKEN}" | docker login ghcr.io -u ivuorinen --password-stdin 2>&1 | tee /tmp/docker-login.log | grep -q "Login Succeeded"; then \
|
||||
echo "$(GREEN)✅ Successfully authenticated with ghcr.io$(RESET)"; \
|
||||
rm -f /tmp/docker-login.log; \
|
||||
else \
|
||||
echo "$(RED)❌ Authentication failed$(RESET)"; \
|
||||
echo ""; \
|
||||
if grep -q "scope" /tmp/docker-login.log 2>/dev/null; then \
|
||||
echo "$(YELLOW)⚠️ Token does not have required 'write:packages' scope$(RESET)"; \
|
||||
echo ""; \
|
||||
if [ "$$TOKEN_SOURCE" = "gh" ]; then \
|
||||
echo "$(BLUE)GitHub CLI tokens need package permissions.$(RESET)"; \
|
||||
echo ""; \
|
||||
if [ -n "$${GITHUB_TOKEN-}" ]; then \
|
||||
echo "$(YELLOW)Note: GITHUB_TOKEN is set in your environment, which prevents gh auth refresh.$(RESET)"; \
|
||||
echo "Clear it first, then refresh:"; \
|
||||
echo ""; \
|
||||
echo "$(GREEN)For Fish shell:$(RESET)"; \
|
||||
echo " set -e GITHUB_TOKEN"; \
|
||||
echo " gh auth refresh --scopes 'write:packages'"; \
|
||||
echo ""; \
|
||||
echo "$(GREEN)For Bash/Zsh:$(RESET)"; \
|
||||
echo " unset GITHUB_TOKEN"; \
|
||||
echo " gh auth refresh --scopes 'write:packages'"; \
|
||||
else \
|
||||
echo "Run:"; \
|
||||
echo " gh auth refresh --scopes 'write:packages'"; \
|
||||
fi; \
|
||||
echo ""; \
|
||||
echo "Then try again:"; \
|
||||
echo " make docker-login"; \
|
||||
else \
|
||||
echo "Your GITHUB_TOKEN needs 'write:packages' scope."; \
|
||||
echo ""; \
|
||||
echo "$(GREEN)Create a new token:$(RESET)"; \
|
||||
echo " 1. Go to: https://github.com/settings/tokens/new"; \
|
||||
echo " 2. Check: write:packages (includes read:packages)"; \
|
||||
echo " 3. Generate and copy the token"; \
|
||||
echo ""; \
|
||||
echo "$(GREEN)For Fish shell:$(RESET)"; \
|
||||
echo " set -gx GITHUB_TOKEN ghp_xxxxxxxxxxxx"; \
|
||||
echo ""; \
|
||||
echo "$(GREEN)For Bash/Zsh:$(RESET)"; \
|
||||
echo " export GITHUB_TOKEN=ghp_xxxxxxxxxxxx"; \
|
||||
fi; \
|
||||
fi; \
|
||||
rm -f /tmp/docker-login.log; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
docker-push: ## Push the testing-tools image to ghcr.io
|
||||
@echo "$(BLUE)📤 Pushing Docker image to ghcr.io...$(RESET)"
|
||||
@if ! command -v docker >/dev/null 2>&1; then \
|
||||
echo "$(RED)❌ Docker not found$(RESET)"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@if ! docker images ghcr.io/ivuorinen/actions:testing-tools -q | grep -q .; then \
|
||||
echo "$(RED)❌ Image not found. Run 'make docker-build' first$(RESET)"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@PUSH_OUTPUT=$$(docker push ghcr.io/ivuorinen/actions:testing-tools 2>&1); \
|
||||
PUSH_EXIT=$$?; \
|
||||
echo "$${PUSH_OUTPUT}"; \
|
||||
if [ $$PUSH_EXIT -ne 0 ]; then \
|
||||
echo ""; \
|
||||
if echo "$${PUSH_OUTPUT}" | grep -q "scope"; then \
|
||||
echo "$(RED)❌ Token does not have required 'write:packages' scope$(RESET)"; \
|
||||
echo ""; \
|
||||
echo "$(YELLOW)Fix the authentication:$(RESET)"; \
|
||||
echo ""; \
|
||||
if [ -n "$${GITHUB_TOKEN-}" ]; then \
|
||||
echo "$(BLUE)Option 1: Clear GITHUB_TOKEN and use gh auth$(RESET)"; \
|
||||
echo ""; \
|
||||
echo "For Fish shell:"; \
|
||||
echo " set -e GITHUB_TOKEN"; \
|
||||
echo " gh auth refresh --scopes 'write:packages'"; \
|
||||
echo " make docker-push"; \
|
||||
echo ""; \
|
||||
echo "For Bash/Zsh:"; \
|
||||
echo " unset GITHUB_TOKEN"; \
|
||||
echo " gh auth refresh --scopes 'write:packages'"; \
|
||||
echo " make docker-push"; \
|
||||
echo ""; \
|
||||
echo "$(BLUE)Option 2: Create a new token with write:packages scope$(RESET)"; \
|
||||
else \
|
||||
echo "$(BLUE)Option 1: Use GitHub CLI$(RESET)"; \
|
||||
echo " gh auth refresh --scopes 'write:packages'"; \
|
||||
echo " make docker-push"; \
|
||||
echo ""; \
|
||||
echo "$(BLUE)Option 2: Use Personal Access Token$(RESET)"; \
|
||||
fi; \
|
||||
echo " 1. Go to: https://github.com/settings/tokens/new"; \
|
||||
echo " 2. Check: write:packages"; \
|
||||
echo " 3. Generate and copy token"; \
|
||||
echo ""; \
|
||||
echo " For Fish shell:"; \
|
||||
echo " set -gx GITHUB_TOKEN ghp_xxxxxxxxxxxx"; \
|
||||
echo " make docker-push"; \
|
||||
echo ""; \
|
||||
echo " For Bash/Zsh:"; \
|
||||
echo " export GITHUB_TOKEN=ghp_xxxxxxxxxxxx"; \
|
||||
echo " make docker-push"; \
|
||||
exit 1; \
|
||||
elif echo "$${PUSH_OUTPUT}" | grep -q "denied\|unauthorized"; then \
|
||||
echo "$(YELLOW)⚠️ Authentication required. Attempting login...$(RESET)"; \
|
||||
if $(MAKE) docker-login; then \
|
||||
echo ""; \
|
||||
echo "$(BLUE)Retrying push...$(RESET)"; \
|
||||
if ! docker push ghcr.io/ivuorinen/actions:testing-tools; then \
|
||||
echo "$(RED)❌ Retry push failed$(RESET)"; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
else \
|
||||
exit 1; \
|
||||
fi; \
|
||||
else \
|
||||
echo "$(RED)❌ Push failed$(RESET)"; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
fi
|
||||
@echo "$(GREEN)✅ Image pushed successfully$(RESET)"
|
||||
@echo ""
|
||||
@echo "Image available at:"
|
||||
@echo " ghcr.io/ivuorinen/actions:testing-tools"
|
||||
|
||||
docker-all: docker-build docker-test docker-push ## Build, test, and push Docker image
|
||||
@echo "$(GREEN)✅ All Docker operations completed$(RESET)"
|
||||
|
||||
watch: ## Watch files and auto-format on changes (requires entr)
|
||||
@if command -v entr >/dev/null 2>&1; then \
|
||||
echo "$(BLUE)👀 Watching for changes... (press Ctrl+C to stop)$(RESET)"; \
|
||||
find . \( -name "*.yml" -o -name "*.yaml" -o -name "*.md" -o -name "*.sh" \) \
|
||||
-not -path "./_tests/*" -not -path "./.worktrees/*" -not -path "./node_modules/*" | \
|
||||
entr -c $(MAKE) format; \
|
||||
else \
|
||||
echo "$(RED)❌ Error: entr not found. Install with: brew install entr$(RESET)"; \
|
||||
exit 1; \
|
||||
fi
|
||||
382
README.md
382
README.md
@@ -2,105 +2,359 @@
|
||||
|
||||
## Overview
|
||||
|
||||
This project contains a collection of workflows and composable actions to streamline CI/CD processes and ensure code quality. The actions are grouped by purpose for easier discovery.
|
||||
This repository contains a collection of reusable GitHub Actions
|
||||
designed to streamline CI/CD processes and ensure code quality.
|
||||
|
||||
## Setup & Caching
|
||||
Each action is fully self-contained and can be used independently in any GitHub repository.
|
||||
|
||||
- [Node Setup][node-setup]: Sets up Node.js with caching and tooling.
|
||||
- [PHP Composer][php-composer]: Installs PHP dependencies using Composer.
|
||||
- [Dotnet Version Detect][dotnet-v-detect]: Detects the required .NET version from `global.json`.
|
||||
- [Go Version Detect][go-version-detect]: Detects the required Go version from configuration files.
|
||||
- [Common Cache][common-cache]: Provides a consistent caching strategy for multiple languages.
|
||||
- [Set Git Config][set-git-config]: Configures Git user information for automated commits.
|
||||
### Key Features
|
||||
|
||||
## Linting & Formatting
|
||||
- **Production-Ready Actions** covering setup, linting, building, testing, and deployment
|
||||
- **Self-Contained Design** - each action works independently without dependencies
|
||||
- **External Usage Ready** - use any action with pinned refs: `ivuorinen/actions/action-name@2025-01-15` or `@<commit-sha>` for supply-chain security
|
||||
- **Multi-Language Support** including Node.js, PHP, Python, Go, C#, and more
|
||||
- **Standardized Patterns** with consistent error handling and input/output interfaces
|
||||
- **Comprehensive Testing** with dual testing framework (ShellSpec + pytest)
|
||||
- **Modular Build System** using Makefile for development and maintenance
|
||||
|
||||
### Code Linting
|
||||
<!--LISTING-->
|
||||
<!-- This section is auto-generated. Run 'npm run update-catalog' to update. -->
|
||||
|
||||
- [Ansible Lint and Fix][ansible-lint-fix]: Lints and fixes Ansible playbooks and roles.
|
||||
- [Biome Check][biome-check]: Runs Biome to lint multiple languages and formats.
|
||||
- [Biome Fix][biome-fix]: Automatically fixes issues detected by Biome.
|
||||
- [C# Lint Check][csharp-lint-check]: Lints C# code using tools like `dotnet-format`.
|
||||
- [ESLint Check][eslint-check]: Runs ESLint to check for code style violations.
|
||||
- [ESLint Fix][eslint-fix]: Automatically fixes code style issues with ESLint.
|
||||
- [Go Lint Check][go-lint]: Lints Go code using `golangci-lint`.
|
||||
- [PR Lint][pr-lint]: Runs MegaLinter against pull requests.
|
||||
- [Python Lint and Fix][python-lint-fix]: Lints and fixes Python code using `flake8` and `black`.
|
||||
- [Terraform Lint and Fix][terraform-lint-fix]: Lints and fixes Terraform configurations.
|
||||
## 📚 Action Catalog
|
||||
|
||||
### Code Formatting
|
||||
This repository contains **26 reusable GitHub Actions** for CI/CD automation.
|
||||
|
||||
- [Prettier Check][prettier-check]: Checks code formatting using Prettier.
|
||||
- [Prettier Fix][prettier-fix]: Automatically fixes code formatting with Prettier.
|
||||
- [Pre-Commit][pre-commit]: Runs `pre-commit` hooks to enforce code quality standards.
|
||||
### Quick Reference (26 Actions)
|
||||
|
||||
## Testing
|
||||
| Icon | Action | Category | Description | Key Features |
|
||||
|:----:|:-----------------------------------------------------|:-----------|:----------------------------------------------------------------|:---------------------------------------------|
|
||||
| 🔀 | [`action-versioning`][action-versioning] | Utilities | Automatically update SHA-pinned action references to match l... | Token auth, Outputs |
|
||||
| 📦 | [`ansible-lint-fix`][ansible-lint-fix] | Linting | Lints and fixes Ansible playbooks, commits changes, and uplo... | Caching, Token auth, Outputs |
|
||||
| ✅ | [`biome-lint`][biome-lint] | Linting | Run Biome linter in check or fix mode | Caching, Auto-detection, Token auth, Outputs |
|
||||
| 🛡️ | [`codeql-analysis`][codeql-analysis] | Repository | Run CodeQL security analysis for a single language with conf... | Auto-detection, Token auth, Outputs |
|
||||
| 🖼️ | [`compress-images`][compress-images] | Repository | Compress images on demand (workflow_dispatch), and at 11pm e... | Token auth, Outputs |
|
||||
| 📝 | [`csharp-build`][csharp-build] | Build | Builds and tests C# projects. | Caching, Auto-detection, Token auth, Outputs |
|
||||
| 📝 | [`csharp-lint-check`][csharp-lint-check] | Linting | Runs linters like StyleCop or dotnet-format for C# code styl... | Caching, Auto-detection, Token auth, Outputs |
|
||||
| 📦 | [`csharp-publish`][csharp-publish] | Publishing | Publishes a C# project to GitHub Packages. | Caching, Auto-detection, Token auth, Outputs |
|
||||
| 📦 | [`docker-build`][docker-build] | Build | Builds a Docker image for multiple architectures with enhanc... | Caching, Auto-detection, Token auth, Outputs |
|
||||
| ☁️ | [`docker-publish`][docker-publish] | Publishing | Simple wrapper to publish Docker images to GitHub Packages a... | Token auth, Outputs |
|
||||
| ✅ | [`eslint-lint`][eslint-lint] | Linting | Run ESLint in check or fix mode with advanced configuration ... | Caching, Auto-detection, Token auth, Outputs |
|
||||
| 📦 | [`go-build`][go-build] | Build | Builds the Go project. | Caching, Auto-detection, Token auth, Outputs |
|
||||
| 📝 | [`go-lint`][go-lint] | Linting | Run golangci-lint with advanced configuration, caching, and ... | Caching, Token auth, Outputs |
|
||||
| 📝 | [`language-version-detect`][language-version-detect] | Setup | DEPRECATED: This action is deprecated. Inline version detect... | Auto-detection, Token auth, Outputs |
|
||||
| 📦 | [`npm-publish`][npm-publish] | Publishing | Publishes the package to the NPM registry with configurable ... | Caching, Auto-detection, Token auth, Outputs |
|
||||
| ✅ | [`php-tests`][php-tests] | Testing | Run PHPUnit tests with optional Laravel setup and Composer d... | Caching, Auto-detection, Token auth, Outputs |
|
||||
| ✅ | [`pr-lint`][pr-lint] | Linting | Runs MegaLinter against pull requests | Caching, Auto-detection, Token auth, Outputs |
|
||||
| 📦 | [`pre-commit`][pre-commit] | Linting | Runs pre-commit on the repository and pushes the fixes back ... | Auto-detection, Token auth, Outputs |
|
||||
| ✅ | [`prettier-lint`][prettier-lint] | Linting | Run Prettier in check or fix mode with advanced configuratio... | Caching, Auto-detection, Token auth, Outputs |
|
||||
| 📝 | [`python-lint-fix`][python-lint-fix] | Linting | Lints and fixes Python files, commits changes, and uploads S... | Caching, Auto-detection, Token auth, Outputs |
|
||||
| 📦 | [`release-monthly`][release-monthly] | Repository | Creates a release for the current month, incrementing patch ... | Token auth, Outputs |
|
||||
| 🛡️ | [`security-scan`][security-scan] | Security | Comprehensive security scanning for GitHub Actions including... | Caching, Token auth, Outputs |
|
||||
| 📦 | [`stale`][stale] | Repository | A GitHub Action to close stale issues and pull requests. | Token auth, Outputs |
|
||||
| 🏷️ | [`sync-labels`][sync-labels] | Repository | Sync labels from a YAML file to a GitHub repository | Token auth, Outputs |
|
||||
| 🖥️ | [`terraform-lint-fix`][terraform-lint-fix] | Linting | Lints and fixes Terraform files with advanced validation and... | Token auth, Outputs |
|
||||
| 🛡️ | [`validate-inputs`][validate-inputs] | Validation | Centralized Python-based input validation for GitHub Actions... | Token auth, Outputs |
|
||||
|
||||
- [PHP Tests][php-tests]: Runs PHPUnit tests to ensure PHP code correctness.
|
||||
- [Laravel PHPUnit][php-laravel-phpunit]: Sets up Laravel and runs Composer tests.
|
||||
### Actions by Category
|
||||
|
||||
## Build & Package
|
||||
#### 🔧 Setup (1 action)
|
||||
|
||||
- [C# Build][csharp-build]: Builds C# projects using the .NET SDK.
|
||||
- [Go Build][go-build]: Builds Go projects using the `go build` command.
|
||||
- [Docker Build][docker-build]: Builds Docker images using a Dockerfile.
|
||||
| Action | Description | Languages | Features |
|
||||
|:--------------------------------------------------------|:------------------------------------------------------|:-------------------------------|:------------------------------------|
|
||||
| 📝 [`language-version-detect`][language-version-detect] | DEPRECATED: This action is deprecated. Inline vers... | PHP, Python, Go, .NET, Node.js | Auto-detection, Token auth, Outputs |
|
||||
|
||||
## Publish & Deployment
|
||||
#### 🛠️ Utilities (1 action)
|
||||
|
||||
- [C# Publish][csharp-publish]: Publishes .NET projects to an output directory.
|
||||
- [Docker Publish][docker-publish]: Publishes Docker images to GitHub Packages and Docker Hub.
|
||||
- [Docker Publish to Docker Hub][docker-publish-hub]: Publishes Docker images to Docker Hub.
|
||||
- [Docker Publish to GitHub Packages][docker-publish-gh]: Publishes Docker images to GitHub's Container Registry.
|
||||
- [Publish to NPM][npm-publish]: Publishes packages to the NPM registry.
|
||||
| Action | Description | Languages | Features |
|
||||
|:--------------------------------------------|:------------------------------------------------------|:---------------|:--------------------|
|
||||
| 🔀 [`action-versioning`][action-versioning] | Automatically update SHA-pinned action references ... | GitHub Actions | Token auth, Outputs |
|
||||
|
||||
## Release Management
|
||||
#### 📝 Linting (10 actions)
|
||||
|
||||
- [GitHub Release][github-release]: Automates GitHub release creation with custom tags and notes.
|
||||
- [Release Monthly][release-monthly]: Creates a monthly GitHub release with autogenerated notes.
|
||||
| Action | Description | Languages | Features |
|
||||
|:-----------------------------------------------|:------------------------------------------------------|:---------------------------------------------|:---------------------------------------------|
|
||||
| 📦 [`ansible-lint-fix`][ansible-lint-fix] | Lints and fixes Ansible playbooks, commits changes... | Ansible, YAML | Caching, Token auth, Outputs |
|
||||
| ✅ [`biome-lint`][biome-lint] | Run Biome linter in check or fix mode | JavaScript, TypeScript, JSON | Caching, Auto-detection, Token auth, Outputs |
|
||||
| 📝 [`csharp-lint-check`][csharp-lint-check] | Runs linters like StyleCop or dotnet-format for C#... | C#, .NET | Caching, Auto-detection, Token auth, Outputs |
|
||||
| ✅ [`eslint-lint`][eslint-lint] | Run ESLint in check or fix mode with advanced conf... | JavaScript, TypeScript | Caching, Auto-detection, Token auth, Outputs |
|
||||
| 📝 [`go-lint`][go-lint] | Run golangci-lint with advanced configuration, cac... | Go | Caching, Token auth, Outputs |
|
||||
| ✅ [`pr-lint`][pr-lint] | Runs MegaLinter against pull requests | Conventional Commits | Caching, Auto-detection, Token auth, Outputs |
|
||||
| 📦 [`pre-commit`][pre-commit] | Runs pre-commit on the repository and pushes the f... | Python, Multiple Languages | Auto-detection, Token auth, Outputs |
|
||||
| ✅ [`prettier-lint`][prettier-lint] | Run Prettier in check or fix mode with advanced co... | JavaScript, TypeScript, Markdown, YAML, JSON | Caching, Auto-detection, Token auth, Outputs |
|
||||
| 📝 [`python-lint-fix`][python-lint-fix] | Lints and fixes Python files, commits changes, and... | Python | Caching, Auto-detection, Token auth, Outputs |
|
||||
| 🖥️ [`terraform-lint-fix`][terraform-lint-fix] | Lints and fixes Terraform files with advanced vali... | Terraform, HCL | Token auth, Outputs |
|
||||
|
||||
## Repository Maintenance
|
||||
#### 🧪 Testing (1 action)
|
||||
|
||||
- [Common File Check][common-file-check]: Checks for the presence of specific files based on a glob pattern.
|
||||
- [Compress Images][compress-images]: Optimizes and creates a pull request with compressed images.
|
||||
- [Stale][stale]: Closes stale issues and pull requests automatically.
|
||||
- [Sync Labels][sync-labels]: Syncs repository labels from a YAML file.
|
||||
| Action | Description | Languages | Features |
|
||||
|:---------------------------|:------------------------------------------------------|:-------------|:---------------------------------------------|
|
||||
| ✅ [`php-tests`][php-tests] | Run PHPUnit tests with optional Laravel setup and ... | PHP, Laravel | Caching, Auto-detection, Token auth, Outputs |
|
||||
|
||||
## License
|
||||
#### 🏗️ Build (3 actions)
|
||||
|
||||
This project is licensed under the MIT License. See the [LICENSE](LICENSE.md) file for details.
|
||||
| Action | Description | Languages | Features |
|
||||
|:----------------------------------|:------------------------------------------------------|:----------|:---------------------------------------------|
|
||||
| 📝 [`csharp-build`][csharp-build] | Builds and tests C# projects. | C#, .NET | Caching, Auto-detection, Token auth, Outputs |
|
||||
| 📦 [`docker-build`][docker-build] | Builds a Docker image for multiple architectures w... | Docker | Caching, Auto-detection, Token auth, Outputs |
|
||||
| 📦 [`go-build`][go-build] | Builds the Go project. | Go | Caching, Auto-detection, Token auth, Outputs |
|
||||
|
||||
#### 🚀 Publishing (3 actions)
|
||||
|
||||
| Action | Description | Languages | Features |
|
||||
|:--------------------------------------|:------------------------------------------------------|:-------------|:---------------------------------------------|
|
||||
| 📦 [`csharp-publish`][csharp-publish] | Publishes a C# project to GitHub Packages. | C#, .NET | Caching, Auto-detection, Token auth, Outputs |
|
||||
| ☁️ [`docker-publish`][docker-publish] | Simple wrapper to publish Docker images to GitHub ... | Docker | Token auth, Outputs |
|
||||
| 📦 [`npm-publish`][npm-publish] | Publishes the package to the NPM registry with con... | Node.js, npm | Caching, Auto-detection, Token auth, Outputs |
|
||||
|
||||
#### 📦 Repository (5 actions)
|
||||
|
||||
| Action | Description | Languages | Features |
|
||||
|:-----------------------------------------|:------------------------------------------------------|:--------------------------------------------------------|:------------------------------------|
|
||||
| 🛡️ [`codeql-analysis`][codeql-analysis] | Run CodeQL security analysis for a single language... | JavaScript, TypeScript, Python, Java, C#, C++, Go, Ruby | Auto-detection, Token auth, Outputs |
|
||||
| 🖼️ [`compress-images`][compress-images] | Compress images on demand (workflow_dispatch), and... | Images, PNG, JPEG | Token auth, Outputs |
|
||||
| 📦 [`release-monthly`][release-monthly] | Creates a release for the current month, increment... | GitHub Actions | Token auth, Outputs |
|
||||
| 📦 [`stale`][stale] | A GitHub Action to close stale issues and pull req... | GitHub Actions | Token auth, Outputs |
|
||||
| 🏷️ [`sync-labels`][sync-labels] | Sync labels from a YAML file to a GitHub repositor... | YAML, GitHub | Token auth, Outputs |
|
||||
|
||||
#### 🛡️ Security (1 action)
|
||||
|
||||
| Action | Description | Languages | Features |
|
||||
|:-------------------------------------|:------------------------------------------------------|:----------|:-----------------------------|
|
||||
| 🛡️ [`security-scan`][security-scan] | Comprehensive security scanning for GitHub Actions... | - | Caching, Token auth, Outputs |
|
||||
|
||||
#### ✅ Validation (1 action)
|
||||
|
||||
| Action | Description | Languages | Features |
|
||||
|:-----------------------------------------|:------------------------------------------------------|:---------------------|:--------------------|
|
||||
| 🛡️ [`validate-inputs`][validate-inputs] | Centralized Python-based input validation for GitH... | YAML, GitHub Actions | Token auth, Outputs |
|
||||
|
||||
### Feature Matrix
|
||||
|
||||
| Action | Caching | Auto-detection | Token auth | Outputs |
|
||||
|:-----------------------------------------------------|:-------:|:--------------:|:----------:|:-------:|
|
||||
| [`action-versioning`][action-versioning] | - | - | ✅ | ✅ |
|
||||
| [`ansible-lint-fix`][ansible-lint-fix] | ✅ | - | ✅ | ✅ |
|
||||
| [`biome-lint`][biome-lint] | ✅ | ✅ | ✅ | ✅ |
|
||||
| [`codeql-analysis`][codeql-analysis] | - | ✅ | ✅ | ✅ |
|
||||
| [`compress-images`][compress-images] | - | - | ✅ | ✅ |
|
||||
| [`csharp-build`][csharp-build] | ✅ | ✅ | ✅ | ✅ |
|
||||
| [`csharp-lint-check`][csharp-lint-check] | ✅ | ✅ | ✅ | ✅ |
|
||||
| [`csharp-publish`][csharp-publish] | ✅ | ✅ | ✅ | ✅ |
|
||||
| [`docker-build`][docker-build] | ✅ | ✅ | ✅ | ✅ |
|
||||
| [`docker-publish`][docker-publish] | - | - | ✅ | ✅ |
|
||||
| [`eslint-lint`][eslint-lint] | ✅ | ✅ | ✅ | ✅ |
|
||||
| [`go-build`][go-build] | ✅ | ✅ | ✅ | ✅ |
|
||||
| [`go-lint`][go-lint] | ✅ | - | ✅ | ✅ |
|
||||
| [`language-version-detect`][language-version-detect] | - | ✅ | ✅ | ✅ |
|
||||
| [`npm-publish`][npm-publish] | ✅ | ✅ | ✅ | ✅ |
|
||||
| [`php-tests`][php-tests] | ✅ | ✅ | ✅ | ✅ |
|
||||
| [`pr-lint`][pr-lint] | ✅ | ✅ | ✅ | ✅ |
|
||||
| [`pre-commit`][pre-commit] | - | ✅ | ✅ | ✅ |
|
||||
| [`prettier-lint`][prettier-lint] | ✅ | ✅ | ✅ | ✅ |
|
||||
| [`python-lint-fix`][python-lint-fix] | ✅ | ✅ | ✅ | ✅ |
|
||||
| [`release-monthly`][release-monthly] | - | - | ✅ | ✅ |
|
||||
| [`security-scan`][security-scan] | ✅ | - | ✅ | ✅ |
|
||||
| [`stale`][stale] | - | - | ✅ | ✅ |
|
||||
| [`sync-labels`][sync-labels] | - | - | ✅ | ✅ |
|
||||
| [`terraform-lint-fix`][terraform-lint-fix] | - | - | ✅ | ✅ |
|
||||
| [`validate-inputs`][validate-inputs] | - | - | ✅ | ✅ |
|
||||
|
||||
### Language Support
|
||||
|
||||
| Language | Actions |
|
||||
|:---------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| .NET | [`csharp-build`][csharp-build], [`csharp-lint-check`][csharp-lint-check], [`csharp-publish`][csharp-publish], [`language-version-detect`][language-version-detect] |
|
||||
| Ansible | [`ansible-lint-fix`][ansible-lint-fix] |
|
||||
| C# | [`codeql-analysis`][codeql-analysis], [`csharp-build`][csharp-build], [`csharp-lint-check`][csharp-lint-check], [`csharp-publish`][csharp-publish] |
|
||||
| C++ | [`codeql-analysis`][codeql-analysis] |
|
||||
| Conventional Commits | [`pr-lint`][pr-lint] |
|
||||
| Docker | [`docker-build`][docker-build], [`docker-publish`][docker-publish] |
|
||||
| GitHub | [`sync-labels`][sync-labels] |
|
||||
| GitHub Actions | [`action-versioning`][action-versioning], [`release-monthly`][release-monthly], [`stale`][stale], [`validate-inputs`][validate-inputs] |
|
||||
| Go | [`codeql-analysis`][codeql-analysis], [`go-build`][go-build], [`go-lint`][go-lint], [`language-version-detect`][language-version-detect] |
|
||||
| HCL | [`terraform-lint-fix`][terraform-lint-fix] |
|
||||
| Images | [`compress-images`][compress-images] |
|
||||
| JPEG | [`compress-images`][compress-images] |
|
||||
| JSON | [`biome-lint`][biome-lint], [`prettier-lint`][prettier-lint] |
|
||||
| Java | [`codeql-analysis`][codeql-analysis] |
|
||||
| JavaScript | [`biome-lint`][biome-lint], [`codeql-analysis`][codeql-analysis], [`eslint-lint`][eslint-lint], [`prettier-lint`][prettier-lint] |
|
||||
| Laravel | [`php-tests`][php-tests] |
|
||||
| Markdown | [`prettier-lint`][prettier-lint] |
|
||||
| Multiple Languages | [`pre-commit`][pre-commit] |
|
||||
| Node.js | [`language-version-detect`][language-version-detect], [`npm-publish`][npm-publish] |
|
||||
| PHP | [`language-version-detect`][language-version-detect], [`php-tests`][php-tests] |
|
||||
| PNG | [`compress-images`][compress-images] |
|
||||
| Python | [`codeql-analysis`][codeql-analysis], [`language-version-detect`][language-version-detect], [`pre-commit`][pre-commit], [`python-lint-fix`][python-lint-fix] |
|
||||
| Ruby | [`codeql-analysis`][codeql-analysis] |
|
||||
| Terraform | [`terraform-lint-fix`][terraform-lint-fix] |
|
||||
| TypeScript | [`biome-lint`][biome-lint], [`codeql-analysis`][codeql-analysis], [`eslint-lint`][eslint-lint], [`prettier-lint`][prettier-lint] |
|
||||
| YAML | [`ansible-lint-fix`][ansible-lint-fix], [`prettier-lint`][prettier-lint], [`sync-labels`][sync-labels], [`validate-inputs`][validate-inputs] |
|
||||
| npm | [`npm-publish`][npm-publish] |
|
||||
|
||||
### Action Usage
|
||||
|
||||
All actions can be used independently in your workflows:
|
||||
|
||||
```yaml
|
||||
# Recommended: Use pinned refs for supply-chain security
|
||||
- uses: ivuorinen/actions/action-name@vYYYY-MM-DD # Date-based tag (example)
|
||||
with:
|
||||
# action-specific inputs
|
||||
|
||||
# Alternative: Use commit SHA for immutability
|
||||
- uses: ivuorinen/actions/action-name@abc123def456 # Full commit SHA
|
||||
with:
|
||||
# action-specific inputs
|
||||
```
|
||||
|
||||
> **Security Note**: Always pin to specific tags or commit SHAs instead of `@main` to ensure reproducible workflows and supply-chain integrity.
|
||||
|
||||
<!-- Reference Links -->
|
||||
|
||||
[action-versioning]: action-versioning/README.md
|
||||
[ansible-lint-fix]: ansible-lint-fix/README.md
|
||||
[biome-check]: biome-check/README.md
|
||||
[biome-fix]: biome-fix/README.md
|
||||
[common-cache]: common-cache/README.md
|
||||
[common-file-check]: common-file-check/README.md
|
||||
[biome-lint]: biome-lint/README.md
|
||||
[codeql-analysis]: codeql-analysis/README.md
|
||||
[compress-images]: compress-images/README.md
|
||||
[csharp-build]: csharp-build/README.md
|
||||
[csharp-lint-check]: csharp-lint-check/README.md
|
||||
[csharp-publish]: csharp-publish/README.md
|
||||
[docker-build]: docker-build/README.md
|
||||
[docker-publish]: docker-publish/README.md
|
||||
[docker-publish-gh]: docker-publish-gh/README.md
|
||||
[docker-publish-hub]: docker-publish-hub/README.md
|
||||
[dotnet-v-detect]: dotnet-version-detect/README.md
|
||||
[eslint-check]: eslint-check/README.md
|
||||
[eslint-fix]: eslint-fix/README.md
|
||||
[github-release]: github-release/README.md
|
||||
[eslint-lint]: eslint-lint/README.md
|
||||
[go-build]: go-build/README.md
|
||||
[go-lint]: go-lint/README.md
|
||||
[go-version-detect]: go-version-detect/README.md
|
||||
[node-setup]: node-setup/README.md
|
||||
[language-version-detect]: language-version-detect/README.md
|
||||
[npm-publish]: npm-publish/README.md
|
||||
[php-composer]: php-composer/README.md
|
||||
[php-laravel-phpunit]: php-laravel-phpunit/README.md
|
||||
[php-tests]: php-tests/README.md
|
||||
[pr-lint]: pr-lint/README.md
|
||||
[pre-commit]: pre-commit/README.md
|
||||
[prettier-check]: prettier-check/README.md
|
||||
[prettier-fix]: prettier-fix/README.md
|
||||
[prettier-lint]: prettier-lint/README.md
|
||||
[python-lint-fix]: python-lint-fix/README.md
|
||||
[release-monthly]: release-monthly/README.md
|
||||
[set-git-config]: set-git-config/README.md
|
||||
[security-scan]: security-scan/README.md
|
||||
[stale]: stale/README.md
|
||||
[sync-labels]: sync-labels/README.md
|
||||
[terraform-lint-fix]: terraform-lint-fix/README.md
|
||||
[validate-inputs]: validate-inputs/README.md
|
||||
|
||||
---
|
||||
|
||||
<!--/LISTING-->
|
||||
|
||||
## Usage
|
||||
|
||||
### Using Actions Externally
|
||||
|
||||
All actions in this repository can be used in your workflows like any other GitHub Action.
|
||||
|
||||
**⚠️ Security Best Practice**: Always pin actions to specific tags or commit SHAs instead of `@main` to ensure:
|
||||
|
||||
- **Reproducibility**: Workflows behave consistently over time
|
||||
- **Supply-chain integrity**: Protection against unexpected changes or compromises
|
||||
- **Immutability**: Reference exact versions that cannot be modified
|
||||
|
||||
```yaml
|
||||
steps:
|
||||
- name: Setup Node.js with Auto-Detection
|
||||
uses: ivuorinen/actions/node-setup@2025-01-15 # Date-based tag
|
||||
with:
|
||||
default-version: '20'
|
||||
|
||||
- name: Detect PHP Version
|
||||
uses: ivuorinen/actions/php-version-detect@abc123def456 # Commit SHA
|
||||
with:
|
||||
default-version: '8.2'
|
||||
|
||||
- name: Universal Version Parser
|
||||
uses: ivuorinen/actions/version-file-parser@2025-01-15
|
||||
with:
|
||||
language: 'python'
|
||||
tool-versions-key: 'python'
|
||||
dockerfile-image: 'python'
|
||||
version-file: '.python-version'
|
||||
default-version: '3.12'
|
||||
```
|
||||
|
||||
Actions achieve modularity through composition:
|
||||
|
||||
```yaml
|
||||
steps:
|
||||
- name: Parse Version
|
||||
id: parse-version
|
||||
uses: ivuorinen/actions/version-file-parser@2025-01-15
|
||||
with:
|
||||
language: 'node'
|
||||
tool-versions-key: 'nodejs'
|
||||
dockerfile-image: 'node'
|
||||
version-file: '.nvmrc'
|
||||
default-version: '20'
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@sha
|
||||
with:
|
||||
node-version: ${{ steps.parse-version.outputs.detected-version }}
|
||||
```
|
||||
|
||||
## Development
|
||||
|
||||
This repository uses a Makefile-based build system for development tasks:
|
||||
|
||||
```bash
|
||||
# Full workflow - docs, format, and lint
|
||||
make all
|
||||
|
||||
# Individual operations
|
||||
make docs # Generate documentation for all actions
|
||||
make format # Format all files (markdown, YAML, JSON)
|
||||
make lint # Run all linters
|
||||
make check # Quick syntax and tool checks
|
||||
|
||||
# Development workflow
|
||||
make dev # Format then lint (good for development)
|
||||
make ci # CI workflow - check, docs, lint
|
||||
```
|
||||
|
||||
### Python Development
|
||||
|
||||
For Python development (validation system), use these specialized commands:
|
||||
|
||||
```bash
|
||||
# Python development workflow
|
||||
make dev-python # Format, lint, and test Python code
|
||||
make test-python # Run Python unit tests
|
||||
make test-python-coverage # Run tests with coverage reporting
|
||||
|
||||
# Individual Python operations
|
||||
make format-python # Format Python files with ruff
|
||||
make lint-python # Lint Python files with ruff
|
||||
```
|
||||
|
||||
The Python validation system (`validate-inputs/`) includes:
|
||||
|
||||
- **CalVer and SemVer Support**: Flexible version validation for different schemes
|
||||
- **Comprehensive Test Suite**: Extensive test cases covering all validation types
|
||||
- **Security Features**: Command injection and path traversal protection
|
||||
- **Performance**: Efficient Python regex engine vs multiple bash processes
|
||||
|
||||
### Testing
|
||||
|
||||
```bash
|
||||
# Run all tests (Python + GitHub Actions)
|
||||
make test
|
||||
|
||||
# Run specific test types
|
||||
make test-python # Python validation tests only
|
||||
make test-actions # GitHub Actions tests only
|
||||
make test-action ACTION=node-setup # Test specific action
|
||||
|
||||
# Coverage reporting
|
||||
make test-coverage # All tests with coverage
|
||||
make test-python-coverage # Python tests with coverage
|
||||
```
|
||||
|
||||
For detailed development guidelines, see [CLAUDE.md](CLAUDE.md).
|
||||
|
||||
## License
|
||||
|
||||
This project is licensed under the MIT License. See the [LICENSE](LICENSE.md) file for details.
|
||||
|
||||
280
SECURITY.md
Normal file
280
SECURITY.md
Normal file
@@ -0,0 +1,280 @@
|
||||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
All actions in this repository are actively maintained. Security updates are applied to all actions as needed.
|
||||
|
||||
| Version | Supported |
|
||||
|---------|--------------------|
|
||||
| Latest | :white_check_mark: |
|
||||
|
||||
## Security Features
|
||||
|
||||
This repository implements multiple layers of security controls to protect against common vulnerabilities:
|
||||
|
||||
### 1. Script Injection Prevention
|
||||
|
||||
**Status**: ✅ Implemented across all 43 actions
|
||||
|
||||
All shell scripts use environment variables instead of direct `${{ inputs.* }}` interpolation to prevent command injection attacks.
|
||||
|
||||
**Before** (vulnerable):
|
||||
|
||||
```yaml
|
||||
run: |
|
||||
version="${{ inputs.version }}"
|
||||
echo "Version: $version"
|
||||
```
|
||||
|
||||
**After** (secure):
|
||||
|
||||
```yaml
|
||||
env:
|
||||
VERSION: ${{ inputs.version }}
|
||||
run: |
|
||||
version="$VERSION"
|
||||
echo "Version: $version"
|
||||
```
|
||||
|
||||
### 2. Secret Masking
|
||||
|
||||
**Status**: ✅ Implemented in 7 critical actions
|
||||
|
||||
Actions that handle sensitive data use GitHub Actions secret masking to prevent accidental exposure in logs:
|
||||
|
||||
- `npm-publish` - NPM authentication tokens
|
||||
- `docker-publish` - Docker Hub credentials (defense-in-depth masking)
|
||||
- `docker-publish-hub` - Docker Hub passwords
|
||||
- `docker-publish-gh` - GitHub tokens
|
||||
- `csharp-publish` - NuGet API keys
|
||||
- `php-composer` - Composer authentication tokens
|
||||
- `php-laravel-phpunit` - Database credentials
|
||||
|
||||
**Implementation**:
|
||||
|
||||
```yaml
|
||||
run: |
|
||||
echo "::add-mask::$SECRET_VALUE"
|
||||
```
|
||||
|
||||
### 3. SHA Pinning
|
||||
|
||||
All third-party actions are pinned to specific commit SHAs to prevent supply chain attacks:
|
||||
|
||||
```yaml
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
```
|
||||
|
||||
### 4. Input Validation
|
||||
|
||||
**Status**: ✅ Centralized validation system
|
||||
|
||||
All actions use comprehensive input validation to prevent:
|
||||
|
||||
- Path traversal attacks
|
||||
- Command injection patterns
|
||||
- ReDoS (Regular Expression Denial of Service)
|
||||
- Malformed version strings
|
||||
- Invalid URLs and file paths
|
||||
|
||||
**Key validation patterns**:
|
||||
|
||||
- Version strings: Semantic versioning, CalVer, flexible formats
|
||||
- File paths: Path traversal prevention, absolute path validation
|
||||
- Tokens: Format validation, injection pattern detection
|
||||
- Boolean values: Strict true/false validation
|
||||
- URLs: Protocol validation, basic structure checks
|
||||
|
||||
### 5. Permissions Documentation
|
||||
|
||||
**Status**: ✅ All 43 actions documented
|
||||
|
||||
Every action includes explicit permissions comments documenting required GitHub token permissions:
|
||||
|
||||
```yaml
|
||||
# permissions:
|
||||
# - contents: write # Required for creating releases
|
||||
# - packages: write # Required for publishing packages
|
||||
```
|
||||
|
||||
### 6. Official Action Usage
|
||||
|
||||
Third-party security tools use official maintained actions:
|
||||
|
||||
- **Bun**: `oven-sh/setup-bun@v2.0.2` (SHA-pinned)
|
||||
- **Trivy**: `aquasecurity/trivy-action@0.33.1` (SHA-pinned)
|
||||
|
||||
## Security Best Practices
|
||||
|
||||
When using these actions in your workflows:
|
||||
|
||||
### 1. Use Least Privilege
|
||||
|
||||
Only grant the minimum required permissions:
|
||||
|
||||
```yaml
|
||||
permissions:
|
||||
contents: write # Only if creating commits/releases
|
||||
packages: write # Only if publishing packages
|
||||
security-events: write # Only if uploading SARIF reports
|
||||
```
|
||||
|
||||
### 2. Protect Secrets
|
||||
|
||||
- Never log sensitive values
|
||||
- Use GitHub Secrets for all credentials
|
||||
- Avoid exposing secrets in error messages
|
||||
- Use secret masking for custom secrets
|
||||
|
||||
```yaml
|
||||
- name: Use Secret
|
||||
env:
|
||||
API_KEY: ${{ secrets.API_KEY }}
|
||||
run: |
|
||||
echo "::add-mask::$API_KEY"
|
||||
# Use API_KEY safely
|
||||
```
|
||||
|
||||
### 3. Validate Inputs
|
||||
|
||||
When calling actions, validate inputs match expected patterns:
|
||||
|
||||
```yaml
|
||||
- uses: ./version-validator
|
||||
with:
|
||||
version: ${{ github.event.inputs.version }}
|
||||
validation-regex: '^[0-9]+\.[0-9]+\.[0-9]+$'
|
||||
```
|
||||
|
||||
### 4. Pin Action Versions
|
||||
|
||||
Always use specific versions or commit SHAs:
|
||||
|
||||
```yaml
|
||||
# Good: SHA-pinned
|
||||
- uses: owner/action@abc123def456...
|
||||
|
||||
# Good: Specific version
|
||||
- uses: owner/action@v1.2.3
|
||||
|
||||
# Bad: Mutable reference
|
||||
- uses: owner/action@main
|
||||
```
|
||||
|
||||
### 5. Review Action Code
|
||||
|
||||
Before using any action:
|
||||
|
||||
- Review the source code
|
||||
- Check permissions requirements
|
||||
- Verify input validation
|
||||
- Examine shell script patterns
|
||||
- Look for secret handling
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
We take security vulnerabilities seriously. If you discover a security issue:
|
||||
|
||||
### Reporting Process
|
||||
|
||||
1. **DO NOT** open a public issue
|
||||
2. **DO** report via GitHub Security Advisories (preferred):
|
||||
- Go to the repository's Security tab
|
||||
- Click "Report a vulnerability"
|
||||
- Create a private security advisory
|
||||
3. **Alternatively**, email security concerns to the repository owner if GitHub Security Advisories are unavailable
|
||||
4. **DO** include:
|
||||
- Description of the vulnerability
|
||||
- Steps to reproduce
|
||||
- Potential impact
|
||||
- Suggested fix (if available)
|
||||
|
||||
### What to Report
|
||||
|
||||
Report any security concerns including:
|
||||
|
||||
- Command injection vulnerabilities
|
||||
- Path traversal issues
|
||||
- Secret exposure in logs
|
||||
- ReDoS vulnerabilities
|
||||
- Unsafe input handling
|
||||
- Supply chain security issues
|
||||
- Privilege escalation risks
|
||||
|
||||
### Response Timeline
|
||||
|
||||
- **24 hours**: Initial response acknowledging receipt
|
||||
- **7 days**: Assessment and severity classification
|
||||
- **30 days**: Fix developed and tested (for confirmed vulnerabilities)
|
||||
- **Public disclosure**: Coordinated after fix is released
|
||||
|
||||
### Security Updates
|
||||
|
||||
When security issues are fixed:
|
||||
|
||||
1. A patch is released
|
||||
2. Affected actions are updated
|
||||
3. Security advisory is published
|
||||
4. Users are notified via GitHub Security Advisories
|
||||
|
||||
## Audit History
|
||||
|
||||
### Phase 1: Script Injection Prevention (2024)
|
||||
|
||||
- Converted 43 actions to use environment variables
|
||||
- Eliminated all direct `${{ inputs.* }}` usage in shell scripts
|
||||
- Added comprehensive input validation
|
||||
- Status: ✅ Complete
|
||||
|
||||
### Phase 2: Enhanced Security (2024-2025)
|
||||
|
||||
- Replaced custom Bun installation with official action
|
||||
- Replaced custom Trivy installation with official action
|
||||
- Added secret masking to 7 critical actions (including docker-publish)
|
||||
- Migrated from custom common-cache to official actions/cache
|
||||
- Status: ✅ Complete
|
||||
|
||||
### Phase 3: Documentation & Policy (2024)
|
||||
|
||||
- Added permissions comments to all 43 actions
|
||||
- Created security policy (this document)
|
||||
- Documented best practices
|
||||
- Status: ✅ Complete
|
||||
|
||||
## Security Testing
|
||||
|
||||
All actions include:
|
||||
|
||||
- **Unit tests**: ShellSpec tests for action logic
|
||||
- **Integration tests**: End-to-end workflow validation
|
||||
- **Validation tests**: pytest tests for input validation
|
||||
- **Security tests**: Command injection prevention tests
|
||||
|
||||
Run security tests:
|
||||
|
||||
```bash
|
||||
make test
|
||||
```
|
||||
|
||||
## Additional Resources
|
||||
|
||||
- [GitHub Actions Security Hardening](https://docs.github.com/en/actions/security-guides/security-hardening-for-github-actions)
|
||||
- [OWASP Command Injection](https://owasp.org/www-community/attacks/Command_Injection)
|
||||
- [CWE-78: OS Command Injection](https://cwe.mitre.org/data/definitions/78.html)
|
||||
- [Supply Chain Security](https://slsa.dev/)
|
||||
|
||||
## License
|
||||
|
||||
This security policy is part of the repository and follows the same license.
|
||||
|
||||
## Contact
|
||||
|
||||
**For security vulnerabilities:**
|
||||
|
||||
- **Primary**: Create a private security advisory in the repository's Security tab
|
||||
- **Fallback**: Email the repository owner if Security Advisories are unavailable
|
||||
|
||||
---
|
||||
|
||||
**Last Updated**: 2025-09-29
|
||||
**Policy Version**: 1.0.0
|
||||
665
_tests/README.md
Normal file
665
_tests/README.md
Normal file
@@ -0,0 +1,665 @@
|
||||
# GitHub Actions Testing Framework
|
||||
|
||||
A comprehensive testing framework for validating GitHub Actions in this monorepo using ShellSpec and Python-based input validation.
|
||||
|
||||
## 🚀 Quick Start
|
||||
|
||||
```bash
|
||||
# Run all tests
|
||||
make test
|
||||
|
||||
# Run only unit tests
|
||||
make test-unit
|
||||
|
||||
# Run tests for specific action
|
||||
make test-action ACTION=node-setup
|
||||
|
||||
# Run with coverage reporting
|
||||
make test-coverage
|
||||
```
|
||||
|
||||
### Prerequisites
|
||||
|
||||
```bash
|
||||
# Install ShellSpec (testing framework)
|
||||
curl -fsSL https://github.com/shellspec/shellspec/releases/latest/download/shellspec-dist.tar.gz | tar -xz
|
||||
sudo make -C shellspec-* install
|
||||
|
||||
# Install nektos/act (optional, for integration tests)
|
||||
brew install act # macOS
|
||||
# or: curl https://raw.githubusercontent.com/nektos/act/master/install.sh | sudo bash
|
||||
```
|
||||
|
||||
## 📁 Framework Overview
|
||||
|
||||
### Architecture
|
||||
|
||||
The testing framework uses a **multi-level testing strategy**:
|
||||
|
||||
1. **Unit Tests** - Fast validation of action logic, inputs, and outputs using Python validation
|
||||
2. **Integration Tests** - Test actions in realistic workflow environments
|
||||
3. **External Usage Tests** - Validate actions work as `ivuorinen/actions/action-name@main`
|
||||
|
||||
### Technology Stack
|
||||
|
||||
- **Primary Framework**: [ShellSpec](https://shellspec.info/) - BDD testing for shell scripts
|
||||
- **Validation**: Python-based input validation via `validate-inputs/validator.py`
|
||||
- **Local Execution**: [nektos/act](https://github.com/nektos/act) - Run GitHub Actions locally
|
||||
- **CI Integration**: GitHub Actions workflows
|
||||
|
||||
### Directory Structure
|
||||
|
||||
```text
|
||||
_tests/
|
||||
├── README.md # This documentation
|
||||
├── run-tests.sh # Main test runner script
|
||||
├── unit/ # Unit tests by action
|
||||
│ ├── spec_helper.sh # ShellSpec helper with validation functions
|
||||
│ ├── version-file-parser/ # Example unit tests
|
||||
│ ├── node-setup/ # Example unit tests
|
||||
│ └── ... # One directory per action
|
||||
├── framework/ # Core testing utilities
|
||||
│ ├── setup.sh # Test environment setup
|
||||
│ ├── utils.sh # Common testing functions
|
||||
│ ├── validation.py # Python validation utilities
|
||||
│ └── fixtures/ # Test fixtures
|
||||
├── integration/ # Integration tests
|
||||
│ ├── workflows/ # Test workflows for nektos/act
|
||||
│ ├── external-usage/ # External reference tests
|
||||
│ └── action-chains/ # Multi-action workflow tests
|
||||
├── coverage/ # Coverage reports
|
||||
└── reports/ # Test execution reports
|
||||
```
|
||||
|
||||
## ✍️ Writing Tests
|
||||
|
||||
### Basic Unit Test Structure
|
||||
|
||||
```bash
|
||||
#!/usr/bin/env shellspec
|
||||
# _tests/unit/my-action/validation.spec.sh
|
||||
|
||||
Describe "my-action validation"
|
||||
ACTION_DIR="my-action"
|
||||
ACTION_FILE="$ACTION_DIR/action.yml"
|
||||
|
||||
Context "when validating required inputs"
|
||||
It "accepts valid input"
|
||||
When call validate_input_python "my-action" "input-name" "valid-value"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects invalid input"
|
||||
When call validate_input_python "my-action" "input-name" "invalid@value"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating boolean inputs"
|
||||
It "accepts true"
|
||||
When call validate_input_python "my-action" "dry-run" "true"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts false"
|
||||
When call validate_input_python "my-action" "dry-run" "false"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects invalid boolean"
|
||||
When call validate_input_python "my-action" "dry-run" "maybe"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
End
|
||||
```
|
||||
|
||||
### Integration Test Example
|
||||
|
||||
```yaml
|
||||
# _tests/integration/workflows/my-action-test.yml
|
||||
name: Test my-action Integration
|
||||
on: workflow_dispatch
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Test action locally
|
||||
id: test-local
|
||||
uses: ./my-action
|
||||
with:
|
||||
required-input: 'test-value'
|
||||
|
||||
- name: Validate outputs
|
||||
run: |
|
||||
echo "Output: ${{ steps.test-local.outputs.result }}"
|
||||
[[ -n "${{ steps.test-local.outputs.result }}" ]] || exit 1
|
||||
|
||||
- name: Test external reference
|
||||
uses: ivuorinen/actions/my-action@main
|
||||
with:
|
||||
required-input: 'test-value'
|
||||
```
|
||||
|
||||
## 🛠️ Testing Functions
|
||||
|
||||
### Primary Validation Function
|
||||
|
||||
The framework provides one main validation function that uses the Python validation system:
|
||||
|
||||
#### validate_input_python
|
||||
|
||||
Tests input validation using the centralized Python validator:
|
||||
|
||||
```bash
|
||||
validate_input_python "action-name" "input-name" "test-value"
|
||||
```
|
||||
|
||||
**Examples:**
|
||||
|
||||
```bash
|
||||
# Boolean validation
|
||||
validate_input_python "pre-commit" "dry-run" "true" # success
|
||||
validate_input_python "pre-commit" "dry-run" "false" # success
|
||||
validate_input_python "pre-commit" "dry-run" "maybe" # failure
|
||||
|
||||
# Version validation
|
||||
validate_input_python "node-setup" "node-version" "18.0.0" # success
|
||||
validate_input_python "node-setup" "node-version" "v1.2.3" # success
|
||||
validate_input_python "node-setup" "node-version" "invalid" # failure
|
||||
|
||||
# Token validation
|
||||
validate_input_python "npm-publish" "npm-token" "ghp_123..." # success
|
||||
validate_input_python "npm-publish" "npm-token" "invalid" # failure
|
||||
|
||||
# Docker validation
|
||||
validate_input_python "docker-build" "image-name" "myapp" # success
|
||||
validate_input_python "docker-build" "tag" "v1.0.0" # success
|
||||
|
||||
# Path validation (security)
|
||||
validate_input_python "pre-commit" "config-file" "config.yml" # success
|
||||
validate_input_python "pre-commit" "config-file" "../etc/pass" # failure
|
||||
|
||||
# Injection detection
|
||||
validate_input_python "common-retry" "command" "echo test" # success
|
||||
validate_input_python "common-retry" "command" "rm -rf /; " # failure
|
||||
```
|
||||
|
||||
### Helper Functions from spec_helper.sh
|
||||
|
||||
```bash
|
||||
# Setup/cleanup
|
||||
setup_default_inputs "action-name" "input-name" # Set required defaults
|
||||
cleanup_default_inputs "action-name" "input-name" # Clean up defaults
|
||||
shellspec_setup_test_env "test-name" # Setup test environment
|
||||
shellspec_cleanup_test_env "test-name" # Cleanup test environment
|
||||
|
||||
# Mock execution
|
||||
shellspec_mock_action_run "action-dir" key1 value1 key2 value2
|
||||
shellspec_validate_action_output "expected-key" "expected-value"
|
||||
|
||||
# Action metadata
|
||||
validate_action_yml "action.yml" # Validate YAML structure
|
||||
get_action_inputs "action.yml" # Get action inputs
|
||||
get_action_outputs "action.yml" # Get action outputs
|
||||
get_action_name "action.yml" # Get action name
|
||||
```
|
||||
|
||||
### Complete Action Validation Example
|
||||
|
||||
```bash
|
||||
Describe "comprehensive-action validation"
|
||||
ACTION_DIR="comprehensive-action"
|
||||
ACTION_FILE="$ACTION_DIR/action.yml"
|
||||
|
||||
Context "when validating all input types"
|
||||
It "validates boolean inputs"
|
||||
When call validate_input_python "$ACTION_DIR" "verbose" "true"
|
||||
The status should be success
|
||||
|
||||
When call validate_input_python "$ACTION_DIR" "verbose" "false"
|
||||
The status should be success
|
||||
|
||||
When call validate_input_python "$ACTION_DIR" "verbose" "invalid"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates numeric inputs"
|
||||
When call validate_input_python "$ACTION_DIR" "max-retries" "3"
|
||||
The status should be success
|
||||
|
||||
When call validate_input_python "$ACTION_DIR" "max-retries" "999"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates version inputs"
|
||||
When call validate_input_python "$ACTION_DIR" "tool-version" "1.0.0"
|
||||
The status should be success
|
||||
|
||||
When call validate_input_python "$ACTION_DIR" "tool-version" "v1.2.3-rc.1"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "validates security patterns"
|
||||
When call validate_input_python "$ACTION_DIR" "command" "echo test"
|
||||
The status should be success
|
||||
|
||||
When call validate_input_python "$ACTION_DIR" "command" "rm -rf /; "
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating action structure"
|
||||
It "has valid YAML structure"
|
||||
When call validate_action_yml "$ACTION_FILE"
|
||||
The status should be success
|
||||
End
|
||||
End
|
||||
End
|
||||
```
|
||||
|
||||
## 🎯 Testing Patterns by Action Type
|
||||
|
||||
### Setup Actions (node-setup, php-version-detect, etc.)
|
||||
|
||||
Focus on version detection and environment setup:
|
||||
|
||||
```bash
|
||||
Context "when detecting versions"
|
||||
It "detects version from config files"
|
||||
When call validate_input_python "node-setup" "node-version" "18.0.0"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts default version"
|
||||
When call validate_input_python "python-version-detect" "default-version" "3.11"
|
||||
The status should be success
|
||||
End
|
||||
End
|
||||
```
|
||||
|
||||
### Linting Actions (eslint-fix, prettier-fix, etc.)
|
||||
|
||||
Focus on file processing and security:
|
||||
|
||||
```bash
|
||||
Context "when processing files"
|
||||
It "validates working directory"
|
||||
When call validate_input_python "eslint-fix" "working-directory" "."
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects path traversal"
|
||||
When call validate_input_python "eslint-fix" "working-directory" "../etc"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates boolean flags"
|
||||
When call validate_input_python "eslint-fix" "fix-only" "true"
|
||||
The status should be success
|
||||
End
|
||||
End
|
||||
```
|
||||
|
||||
### Build Actions (docker-build, go-build, etc.)
|
||||
|
||||
Focus on build configuration:
|
||||
|
||||
```bash
|
||||
Context "when building"
|
||||
It "validates image name"
|
||||
When call validate_input_python "docker-build" "image-name" "myapp"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "validates tag format"
|
||||
When call validate_input_python "docker-build" "tag" "v1.0.0"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "validates platforms"
|
||||
When call validate_input_python "docker-build" "platforms" "linux/amd64,linux/arm64"
|
||||
The status should be success
|
||||
End
|
||||
End
|
||||
```
|
||||
|
||||
### Publishing Actions (npm-publish, docker-publish, etc.)
|
||||
|
||||
Focus on credentials and registry validation:
|
||||
|
||||
```bash
|
||||
Context "when publishing"
|
||||
It "validates token format"
|
||||
When call validate_input_python "npm-publish" "npm-token" "ghp_123456789012345678901234567890123456"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects invalid token"
|
||||
When call validate_input_python "npm-publish" "npm-token" "invalid-token"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates version"
|
||||
When call validate_input_python "npm-publish" "package-version" "1.0.0"
|
||||
The status should be success
|
||||
End
|
||||
End
|
||||
```
|
||||
|
||||
## 🔧 Running Tests
|
||||
|
||||
### Command Line Interface
|
||||
|
||||
```bash
|
||||
# Basic usage
|
||||
./_tests/run-tests.sh [OPTIONS] [ACTION_NAME...]
|
||||
|
||||
# Examples
|
||||
./_tests/run-tests.sh # All tests, all actions
|
||||
./_tests/run-tests.sh -t unit # Unit tests only
|
||||
./_tests/run-tests.sh -a node-setup # Specific action
|
||||
./_tests/run-tests.sh -t integration docker-build # Integration tests for docker-build
|
||||
./_tests/run-tests.sh --format json --coverage # JSON output with coverage
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
| Option | Description |
|
||||
|-----------------------|------------------------------------------------|
|
||||
| `-t, --type TYPE` | Test type: `unit`, `integration`, `e2e`, `all` |
|
||||
| `-a, --action ACTION` | Filter by action name pattern |
|
||||
| `-j, --jobs JOBS` | Number of parallel jobs (default: 4) |
|
||||
| `-c, --coverage` | Enable coverage reporting |
|
||||
| `-f, --format FORMAT` | Output format: `console`, `json`, `junit` |
|
||||
| `-v, --verbose` | Enable verbose output |
|
||||
| `-h, --help` | Show help message |
|
||||
|
||||
### Make Targets
|
||||
|
||||
```bash
|
||||
make test # Run all tests
|
||||
make test-unit # Unit tests only
|
||||
make test-integration # Integration tests only
|
||||
make test-coverage # Tests with coverage
|
||||
make test-action ACTION=name # Test specific action
|
||||
```
|
||||
|
||||
## 🤝 Contributing Tests
|
||||
|
||||
### Adding Tests for New Actions
|
||||
|
||||
1. **Create Unit Test Directory**
|
||||
|
||||
```bash
|
||||
mkdir -p _tests/unit/new-action
|
||||
```
|
||||
|
||||
2. **Write Unit Tests**
|
||||
|
||||
```bash
|
||||
# _tests/unit/new-action/validation.spec.sh
|
||||
#!/usr/bin/env shellspec
|
||||
|
||||
Describe "new-action validation"
|
||||
ACTION_DIR="new-action"
|
||||
ACTION_FILE="$ACTION_DIR/action.yml"
|
||||
|
||||
Context "when validating inputs"
|
||||
It "validates required input"
|
||||
When call validate_input_python "new-action" "required-input" "value"
|
||||
The status should be success
|
||||
End
|
||||
End
|
||||
End
|
||||
```
|
||||
|
||||
3. **Create Integration Test**
|
||||
|
||||
```bash
|
||||
# _tests/integration/workflows/new-action-test.yml
|
||||
# (See integration test example above)
|
||||
```
|
||||
|
||||
4. **Test Your Tests**
|
||||
|
||||
```bash
|
||||
make test-action ACTION=new-action
|
||||
```
|
||||
|
||||
### Pull Request Checklist
|
||||
|
||||
- [ ] Tests use `validate_input_python` for input validation
|
||||
- [ ] All test types pass locally (`make test`)
|
||||
- [ ] Integration test workflow created
|
||||
- [ ] Security testing included for user inputs
|
||||
- [ ] Tests are independent and isolated
|
||||
- [ ] Proper cleanup in test teardown
|
||||
- [ ] Documentation updated if needed
|
||||
|
||||
## 💡 Best Practices
|
||||
|
||||
### 1. Use validate_input_python for All Input Testing
|
||||
|
||||
✅ **Good**:
|
||||
|
||||
```bash
|
||||
When call validate_input_python "my-action" "verbose" "true"
|
||||
The status should be success
|
||||
```
|
||||
|
||||
❌ **Avoid**:
|
||||
|
||||
```bash
|
||||
# Don't manually test validation - use the Python validator
|
||||
export INPUT_VERBOSE="true"
|
||||
python3 validate-inputs/validator.py
|
||||
```
|
||||
|
||||
### 2. Group Related Validations
|
||||
|
||||
✅ **Good**:
|
||||
|
||||
```bash
|
||||
Context "when validating configuration"
|
||||
It "accepts valid boolean"
|
||||
When call validate_input_python "my-action" "dry-run" "true"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts valid version"
|
||||
When call validate_input_python "my-action" "tool-version" "1.0.0"
|
||||
The status should be success
|
||||
End
|
||||
End
|
||||
```
|
||||
|
||||
### 3. Always Include Security Testing
|
||||
|
||||
✅ **Always include**:
|
||||
|
||||
```bash
|
||||
It "rejects command injection"
|
||||
When call validate_input_python "common-retry" "command" "rm -rf /; "
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects path traversal"
|
||||
When call validate_input_python "pre-commit" "config-file" "../etc/passwd"
|
||||
The status should be failure
|
||||
End
|
||||
```
|
||||
|
||||
### 4. Write Descriptive Test Names
|
||||
|
||||
✅ **Good**:
|
||||
|
||||
```bash
|
||||
It "accepts valid semantic version format"
|
||||
It "rejects version with invalid characters"
|
||||
It "falls back to default when no version file exists"
|
||||
```
|
||||
|
||||
❌ **Avoid**:
|
||||
|
||||
```bash
|
||||
It "validates input"
|
||||
It "works correctly"
|
||||
```
|
||||
|
||||
### 5. Keep Tests Independent
|
||||
|
||||
- Each test should work in isolation
|
||||
- Don't rely on test execution order
|
||||
- Clean up after each test
|
||||
- Use proper setup/teardown
|
||||
|
||||
## 🔍 Framework Features
|
||||
|
||||
### Test Environment Setup
|
||||
|
||||
The framework automatically sets up test environments via `spec_helper.sh`:
|
||||
|
||||
```bash
|
||||
# Automatic setup on load
|
||||
- GitHub Actions environment variables
|
||||
- Temporary directories
|
||||
- Mock GITHUB_OUTPUT files
|
||||
- Default required inputs for actions
|
||||
|
||||
# Available variables
|
||||
$PROJECT_ROOT # Repository root
|
||||
$TEST_ROOT # _tests/ directory
|
||||
$FRAMEWORK_DIR # _tests/framework/
|
||||
$FIXTURES_DIR # _tests/framework/fixtures/
|
||||
$TEMP_DIR # Temporary test directory
|
||||
$GITHUB_OUTPUT # Mock outputs file
|
||||
$GITHUB_ENV # Mock environment file
|
||||
```
|
||||
|
||||
### Python Validation Integration
|
||||
|
||||
All input validation uses the centralized Python validation system from `validate-inputs/`:
|
||||
|
||||
- Convention-based automatic validation
|
||||
- 9 specialized validators (Boolean, Version, Token, Numeric, File, Network, Docker, Security, CodeQL)
|
||||
- Custom validator support per action
|
||||
- Injection and security pattern detection
|
||||
|
||||
## 🚨 Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
#### "ShellSpec command not found"
|
||||
|
||||
```bash
|
||||
# Install ShellSpec globally
|
||||
curl -fsSL https://github.com/shellspec/shellspec/releases/latest/download/shellspec-dist.tar.gz | tar -xz
|
||||
sudo make -C shellspec-* install
|
||||
```
|
||||
|
||||
#### "act command not found"
|
||||
|
||||
```bash
|
||||
# Install nektos/act (macOS)
|
||||
brew install act
|
||||
|
||||
# Install nektos/act (Linux)
|
||||
curl https://raw.githubusercontent.com/nektos/act/master/install.sh | sudo bash
|
||||
```
|
||||
|
||||
#### Tests timeout
|
||||
|
||||
```bash
|
||||
# Increase timeout for slow operations
|
||||
export TEST_TIMEOUT=300
|
||||
```
|
||||
|
||||
#### Permission denied on test scripts
|
||||
|
||||
```bash
|
||||
# Make test scripts executable
|
||||
find _tests/ -name "*.sh" -exec chmod +x {} \;
|
||||
```
|
||||
|
||||
### Debugging Tests
|
||||
|
||||
1. **Enable Verbose Mode**
|
||||
|
||||
```bash
|
||||
./_tests/run-tests.sh -v
|
||||
```
|
||||
|
||||
2. **Run Single Test**
|
||||
|
||||
```bash
|
||||
shellspec _tests/unit/my-action/validation.spec.sh
|
||||
```
|
||||
|
||||
3. **Enable Debug Mode**
|
||||
|
||||
```bash
|
||||
export SHELLSPEC_DEBUG=1
|
||||
shellspec _tests/unit/my-action/validation.spec.sh
|
||||
```
|
||||
|
||||
4. **Check Test Output**
|
||||
|
||||
```bash
|
||||
# Test results stored in _tests/reports/
|
||||
cat _tests/reports/unit/my-action.txt
|
||||
```
|
||||
|
||||
## 📚 Resources
|
||||
|
||||
- [ShellSpec Documentation](https://shellspec.info/)
|
||||
- [nektos/act Documentation](https://nektosact.com/)
|
||||
- [GitHub Actions Documentation](https://docs.github.com/en/actions)
|
||||
- [Testing GitHub Actions Best Practices](https://docs.github.com/en/actions/creating-actions/creating-a-composite-action#testing-your-action)
|
||||
- [validate-inputs Documentation](../validate-inputs/docs/README_ARCHITECTURE.md)
|
||||
|
||||
## Framework Development
|
||||
|
||||
### Framework File Structure
|
||||
|
||||
```text
|
||||
_tests/
|
||||
├── unit/
|
||||
│ └── spec_helper.sh # ShellSpec configuration and helpers
|
||||
├── framework/
|
||||
│ ├── setup.sh # Test environment initialization
|
||||
│ ├── utils.sh # Common utility functions
|
||||
│ ├── validation.py # Python validation helpers
|
||||
│ └── fixtures/ # Test fixtures
|
||||
└── integration/
|
||||
├── workflows/ # Integration test workflows
|
||||
├── external-usage/ # External reference tests
|
||||
└── action-chains/ # Multi-action tests
|
||||
```
|
||||
|
||||
### Available Functions
|
||||
|
||||
**From spec_helper.sh (\_tests/unit/spec_helper.sh):**
|
||||
|
||||
- `validate_input_python(action, input_name, value)` - Main validation function
|
||||
- `setup_default_inputs(action, input_name)` - Set default required inputs
|
||||
- `cleanup_default_inputs(action, input_name)` - Clean up default inputs
|
||||
- `shellspec_setup_test_env(name)` - Setup test environment
|
||||
- `shellspec_cleanup_test_env(name)` - Cleanup test environment
|
||||
- `shellspec_mock_action_run(action_dir, ...)` - Mock action execution
|
||||
- `shellspec_validate_action_output(key, value)` - Validate outputs
|
||||
|
||||
**From utils.sh (\_tests/framework/utils.sh):**
|
||||
|
||||
- `validate_action_yml(file)` - Validate action YAML
|
||||
- `get_action_inputs(file)` - Extract action inputs
|
||||
- `get_action_outputs(file)` - Extract action outputs
|
||||
- `get_action_name(file)` - Get action name
|
||||
- `test_input_validation(dir, name, value, expected)` - Test input
|
||||
- `test_action_outputs(dir)` - Test action outputs
|
||||
- `test_external_usage(dir)` - Test external usage
|
||||
|
||||
**Last Updated:** October 15, 2025
|
||||
239
_tests/framework/setup.sh
Executable file
239
_tests/framework/setup.sh
Executable file
@@ -0,0 +1,239 @@
|
||||
#!/usr/bin/env bash
|
||||
# Test environment setup utilities
|
||||
# Provides common setup functions for GitHub Actions testing
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Global test configuration
|
||||
export GITHUB_ACTIONS=true
|
||||
export GITHUB_WORKSPACE="${GITHUB_WORKSPACE:-$(pwd)}"
|
||||
export GITHUB_REPOSITORY="${GITHUB_REPOSITORY:-ivuorinen/actions}"
|
||||
export GITHUB_SHA="${GITHUB_SHA:-fake-sha}"
|
||||
export GITHUB_REF="${GITHUB_REF:-refs/heads/main}"
|
||||
export GITHUB_TOKEN="${GITHUB_TOKEN:-ghp_fake_token_for_testing}"
|
||||
|
||||
# Test framework directories
|
||||
TEST_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||
FRAMEWORK_DIR="${TEST_ROOT}/framework"
|
||||
FIXTURES_DIR="${FRAMEWORK_DIR}/fixtures"
|
||||
MOCKS_DIR="${FRAMEWORK_DIR}/mocks"
|
||||
|
||||
# Export directories for use by other scripts
|
||||
export FIXTURES_DIR MOCKS_DIR
|
||||
# Only create TEMP_DIR if not already set
|
||||
if [ -z "${TEMP_DIR:-}" ]; then
|
||||
TEMP_DIR=$(mktemp -d) || exit 1
|
||||
fi
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Logging functions
|
||||
log_info() {
|
||||
echo -e "${BLUE}[INFO]${NC} $*" >&2
|
||||
}
|
||||
|
||||
log_success() {
|
||||
echo -e "${GREEN}[SUCCESS]${NC} $*" >&2
|
||||
}
|
||||
|
||||
log_warning() {
|
||||
echo -e "${YELLOW}[WARNING]${NC} $*" >&2
|
||||
}
|
||||
|
||||
log_error() {
|
||||
echo -e "${RED}[ERROR]${NC} $*" >&2
|
||||
}
|
||||
|
||||
# Setup test environment
|
||||
setup_test_env() {
|
||||
local test_name="${1:-unknown}"
|
||||
|
||||
log_info "Setting up test environment for: $test_name"
|
||||
|
||||
# Create temporary directory for test
|
||||
export TEST_TEMP_DIR="${TEMP_DIR}/${test_name}"
|
||||
mkdir -p "$TEST_TEMP_DIR"
|
||||
|
||||
# Create fake GitHub workspace
|
||||
export TEST_WORKSPACE="${TEST_TEMP_DIR}/workspace"
|
||||
mkdir -p "$TEST_WORKSPACE"
|
||||
|
||||
# Setup fake GitHub outputs
|
||||
export GITHUB_OUTPUT="${TEST_TEMP_DIR}/github-output"
|
||||
export GITHUB_ENV="${TEST_TEMP_DIR}/github-env"
|
||||
export GITHUB_PATH="${TEST_TEMP_DIR}/github-path"
|
||||
export GITHUB_STEP_SUMMARY="${TEST_TEMP_DIR}/github-step-summary"
|
||||
|
||||
# Initialize output files
|
||||
touch "$GITHUB_OUTPUT" "$GITHUB_ENV" "$GITHUB_PATH" "$GITHUB_STEP_SUMMARY"
|
||||
|
||||
# Change to test workspace
|
||||
cd "$TEST_WORKSPACE"
|
||||
|
||||
log_success "Test environment setup complete"
|
||||
}
|
||||
|
||||
# Cleanup test environment
|
||||
cleanup_test_env() {
|
||||
local test_name="${1:-unknown}"
|
||||
|
||||
log_info "Cleaning up test environment for: $test_name"
|
||||
|
||||
if [[ -n ${TEST_TEMP_DIR:-} && -d $TEST_TEMP_DIR ]]; then
|
||||
# Check if current directory is inside TEST_TEMP_DIR
|
||||
local current_dir
|
||||
current_dir="$(pwd)"
|
||||
if [[ "$current_dir" == "$TEST_TEMP_DIR"* ]]; then
|
||||
cd "$GITHUB_WORKSPACE" || cd /tmp || true
|
||||
fi
|
||||
|
||||
rm -rf "$TEST_TEMP_DIR"
|
||||
log_success "Test environment cleanup complete"
|
||||
fi
|
||||
}
|
||||
|
||||
# Cleanup framework temp directory
|
||||
cleanup_framework_temp() {
|
||||
if [[ -n ${TEMP_DIR:-} && -d $TEMP_DIR ]]; then
|
||||
# Check if current directory is inside TEMP_DIR
|
||||
local current_dir
|
||||
current_dir="$(pwd)"
|
||||
if [[ "$current_dir" == "$TEMP_DIR"* ]]; then
|
||||
cd "$GITHUB_WORKSPACE" || cd /tmp || true
|
||||
fi
|
||||
|
||||
rm -rf "$TEMP_DIR"
|
||||
log_info "Framework temp directory cleaned up"
|
||||
fi
|
||||
}
|
||||
|
||||
# Create a mock GitHub repository structure
|
||||
create_mock_repo() {
|
||||
local repo_type="${1:-node}"
|
||||
|
||||
case "$repo_type" in
|
||||
"node")
|
||||
create_mock_node_repo
|
||||
;;
|
||||
"php" | "python" | "go" | "dotnet")
|
||||
log_error "Unsupported repo type: $repo_type. Only 'node' is currently supported."
|
||||
return 1
|
||||
;;
|
||||
*)
|
||||
log_warning "Unknown repo type: $repo_type, defaulting to node"
|
||||
create_mock_node_repo
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Create mock Node.js repository
|
||||
create_mock_node_repo() {
|
||||
cat >package.json <<EOF
|
||||
{
|
||||
"name": "test-project",
|
||||
"version": "1.0.0",
|
||||
"engines": {
|
||||
"node": ">=18.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"test": "npm test",
|
||||
"lint": "eslint .",
|
||||
"build": "npm run build"
|
||||
},
|
||||
"devDependencies": {
|
||||
"eslint": "^8.0.0"
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
echo "node_modules/" >.gitignore
|
||||
mkdir -p src
|
||||
echo 'console.log("Hello, World!");' >src/index.js
|
||||
}
|
||||
|
||||
# Removed unused mock repository functions:
|
||||
# create_mock_php_repo, create_mock_python_repo, create_mock_go_repo, create_mock_dotnet_repo
|
||||
# Only create_mock_node_repo is used and kept below
|
||||
|
||||
# Validate action outputs
|
||||
validate_action_output() {
|
||||
local expected_key="$1"
|
||||
local expected_value="$2"
|
||||
local output_file="${3:-$GITHUB_OUTPUT}"
|
||||
|
||||
if grep -q "^${expected_key}=${expected_value}$" "$output_file"; then
|
||||
log_success "Output validation passed: $expected_key=$expected_value"
|
||||
return 0
|
||||
else
|
||||
log_error "Output validation failed: $expected_key=$expected_value not found"
|
||||
log_error "Actual outputs:"
|
||||
cat "$output_file" >&2
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Removed unused function: run_action_step
|
||||
|
||||
# Check if required tools are available
|
||||
check_required_tools() {
|
||||
local tools=("git" "jq" "curl" "python3" "tar" "make")
|
||||
local missing_tools=()
|
||||
|
||||
for tool in "${tools[@]}"; do
|
||||
if ! command -v "$tool" >/dev/null 2>&1; then
|
||||
missing_tools+=("$tool")
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ ${#missing_tools[@]} -gt 0 ]]; then
|
||||
log_error "Missing required tools: ${missing_tools[*]}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [[ -z ${SHELLSPEC_VERSION:-} ]]; then
|
||||
log_success "All required tools are available"
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# Initialize testing framework
|
||||
init_testing_framework() {
|
||||
# Use file-based lock to prevent multiple initialization across ShellSpec processes
|
||||
local lock_file="${TEMP_DIR}/.framework_initialized"
|
||||
|
||||
if [[ -f "$lock_file" ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Silent initialization in ShellSpec environment to avoid output interference
|
||||
if [[ -z ${SHELLSPEC_VERSION:-} ]]; then
|
||||
log_info "Initializing GitHub Actions Testing Framework"
|
||||
fi
|
||||
|
||||
# Check requirements
|
||||
check_required_tools
|
||||
|
||||
# Temporary directory already created by mktemp above
|
||||
|
||||
# Note: Cleanup trap removed to avoid conflicts with ShellSpec
|
||||
# Individual tests should call cleanup_test_env when needed
|
||||
|
||||
# Mark as initialized with file lock
|
||||
touch "$lock_file"
|
||||
export TESTING_FRAMEWORK_INITIALIZED=1
|
||||
|
||||
if [[ -z ${SHELLSPEC_VERSION:-} ]]; then
|
||||
log_success "Testing framework initialized"
|
||||
fi
|
||||
}
|
||||
|
||||
# Export all functions for use in tests
|
||||
export -f setup_test_env cleanup_test_env cleanup_framework_temp create_mock_repo
|
||||
export -f create_mock_node_repo validate_action_output check_required_tools
|
||||
export -f log_info log_success log_warning log_error
|
||||
export -f init_testing_framework
|
||||
374
_tests/framework/utils.sh
Executable file
374
_tests/framework/utils.sh
Executable file
@@ -0,0 +1,374 @@
|
||||
#!/usr/bin/env bash
|
||||
# Common testing utilities for GitHub Actions
|
||||
# Provides helper functions for testing action behavior
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Source setup utilities
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
# shellcheck source=_tests/framework/setup.sh
|
||||
# shellcheck disable=SC1091
|
||||
source "${SCRIPT_DIR}/setup.sh"
|
||||
|
||||
# Action testing utilities
|
||||
validate_action_yml() {
|
||||
local action_file="$1"
|
||||
local quiet_mode="${2:-false}"
|
||||
|
||||
if [[ ! -f $action_file ]]; then
|
||||
[[ $quiet_mode == "false" ]] && log_error "Action file not found: $action_file"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Check if it's valid YAML
|
||||
if ! yq eval '.' "$action_file" >/dev/null 2>&1; then
|
||||
# Compute path relative to this script for CWD independence
|
||||
local utils_dir
|
||||
utils_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
if ! uv run "$utils_dir/../shared/validation_core.py" --validate-yaml "$action_file" 2>/dev/null; then
|
||||
[[ $quiet_mode == "false" ]] && log_error "Invalid YAML in action file: $action_file"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
[[ $quiet_mode == "false" ]] && log_success "Action YAML is valid: $action_file"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Extract action metadata using Python validation module
|
||||
get_action_inputs() {
|
||||
local action_file="$1"
|
||||
local script_dir
|
||||
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
uv run "$script_dir/../shared/validation_core.py" --inputs "$action_file"
|
||||
}
|
||||
|
||||
get_action_outputs() {
|
||||
local action_file="$1"
|
||||
local script_dir
|
||||
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
uv run "$script_dir/../shared/validation_core.py" --outputs "$action_file"
|
||||
}
|
||||
|
||||
get_action_name() {
|
||||
local action_file="$1"
|
||||
local script_dir
|
||||
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
uv run "$script_dir/../shared/validation_core.py" --name "$action_file"
|
||||
}
|
||||
|
||||
get_action_runs_using() {
|
||||
local action_file="$1"
|
||||
local script_dir
|
||||
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
uv run "$script_dir/../shared/validation_core.py" --runs-using "$action_file"
|
||||
}
|
||||
|
||||
# Check if an input is required in an action.yml file
|
||||
is_input_required() {
|
||||
local action_file="$1"
|
||||
local input_name="$2"
|
||||
local script_dir
|
||||
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
# Get the 'required' property for the input
|
||||
local required_status
|
||||
required_status=$(uv run "$script_dir/../shared/validation_core.py" --property "$action_file" "$input_name" "required")
|
||||
|
||||
# Return 0 (success) if input is required, 1 (failure) if optional
|
||||
[[ "$required_status" == "required" ]]
|
||||
}
|
||||
|
||||
# Test input validation using Python validation module
|
||||
test_input_validation() {
|
||||
local action_dir="$1"
|
||||
local input_name="$2"
|
||||
local test_value="$3"
|
||||
local expected_result="${4:-success}" # success or failure
|
||||
|
||||
# Normalize action_dir to absolute path before setup_test_env changes working directory
|
||||
action_dir="$(cd "$action_dir" && pwd)"
|
||||
|
||||
log_info "Testing input validation: $input_name = '$test_value'"
|
||||
|
||||
# Setup test environment
|
||||
setup_test_env "input-validation-${input_name}"
|
||||
|
||||
# Use Python validation module via CLI
|
||||
local script_dir
|
||||
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
local result="success"
|
||||
# Call validation_core CLI with proper argument passing (no injection risk)
|
||||
if ! uv run "$script_dir/../shared/validation_core.py" --validate "$action_dir" "$input_name" "$test_value" 2>&1; then
|
||||
result="failure"
|
||||
fi
|
||||
|
||||
# Check result matches expectation
|
||||
if [[ $result == "$expected_result" ]]; then
|
||||
log_success "Input validation test passed: $input_name"
|
||||
cleanup_test_env "input-validation-${input_name}"
|
||||
return 0
|
||||
else
|
||||
log_error "Input validation test failed: $input_name (expected: $expected_result, got: $result)"
|
||||
cleanup_test_env "input-validation-${input_name}"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Removed: create_validation_script, create_python_validation_script,
|
||||
# convert_github_expressions_to_env_vars, needs_python_validation, python_validate_input
|
||||
# These functions are no longer needed as we use Python validation directly
|
||||
|
||||
# Test action outputs
|
||||
test_action_outputs() {
|
||||
local action_dir="$1"
|
||||
shift
|
||||
|
||||
# Normalize action_dir to absolute path before setup_test_env changes working directory
|
||||
action_dir="$(cd "$action_dir" && pwd)"
|
||||
|
||||
log_info "Testing action outputs for: $(basename "$action_dir")"
|
||||
|
||||
# Setup test environment
|
||||
setup_test_env "output-test-$(basename "$action_dir")"
|
||||
create_mock_repo "node"
|
||||
|
||||
# Set up inputs
|
||||
while [[ $# -gt 1 ]]; do
|
||||
local key="$1"
|
||||
local value="$2"
|
||||
# Convert dashes to underscores and uppercase for environment variable names
|
||||
local env_key="${key//-/_}"
|
||||
local env_key_upper
|
||||
env_key_upper=$(echo "$env_key" | tr '[:lower:]' '[:upper:]')
|
||||
export "INPUT_${env_key_upper}"="$value"
|
||||
shift 2
|
||||
done
|
||||
|
||||
# Run the action (simplified simulation)
|
||||
local action_file="${action_dir}/action.yml"
|
||||
local action_name
|
||||
action_name=$(get_action_name "$action_file")
|
||||
|
||||
log_info "Simulating action: $action_name"
|
||||
|
||||
# For now, we'll create mock outputs based on the action definition
|
||||
local outputs
|
||||
outputs=$(get_action_outputs "$action_file")
|
||||
|
||||
# Create mock outputs
|
||||
while IFS= read -r output; do
|
||||
if [[ -n $output ]]; then
|
||||
echo "${output}=mock-value-$(date +%s)" >>"$GITHUB_OUTPUT"
|
||||
fi
|
||||
done <<<"$outputs"
|
||||
|
||||
# Validate outputs exist
|
||||
local test_passed=true
|
||||
while IFS= read -r output; do
|
||||
if [[ -n $output ]]; then
|
||||
if ! grep -q "^${output}=" "$GITHUB_OUTPUT"; then
|
||||
log_error "Missing output: $output"
|
||||
test_passed=false
|
||||
else
|
||||
log_success "Output found: $output"
|
||||
fi
|
||||
fi
|
||||
done <<<"$outputs"
|
||||
|
||||
cleanup_test_env "output-test-$(basename "$action_dir")"
|
||||
|
||||
if [[ $test_passed == "true" ]]; then
|
||||
log_success "Output test passed for: $(basename "$action_dir")"
|
||||
return 0
|
||||
else
|
||||
log_error "Output test failed for: $(basename "$action_dir")"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Test external usage pattern
|
||||
test_external_usage() {
|
||||
local action_name="$1"
|
||||
|
||||
log_info "Testing external usage pattern for: $action_name"
|
||||
|
||||
# Create test workflow that uses external reference
|
||||
local test_workflow_dir="${TEST_ROOT}/integration/workflows"
|
||||
mkdir -p "$test_workflow_dir"
|
||||
|
||||
local workflow_file="${test_workflow_dir}/${action_name}-external-test.yml"
|
||||
|
||||
cat >"$workflow_file" <<EOF
|
||||
name: External Usage Test - $action_name
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
paths:
|
||||
- '$action_name/**'
|
||||
|
||||
jobs:
|
||||
test-external-usage:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test external usage
|
||||
uses: ivuorinen/actions/${action_name}@main
|
||||
with:
|
||||
# Default inputs for testing
|
||||
EOF
|
||||
|
||||
# Add common test inputs based on action type
|
||||
case "$action_name" in
|
||||
*-setup | *-version-detect)
|
||||
echo " # Version detection action - no additional inputs needed" >>"$workflow_file"
|
||||
;;
|
||||
*-lint* | *-fix)
|
||||
# shellcheck disable=SC2016
|
||||
echo ' token: ${{ github.token }}' >>"$workflow_file"
|
||||
;;
|
||||
*-publish | *-build)
|
||||
# shellcheck disable=SC2016
|
||||
echo ' token: ${{ github.token }}' >>"$workflow_file"
|
||||
;;
|
||||
*)
|
||||
echo " # Generic test inputs" >>"$workflow_file"
|
||||
;;
|
||||
esac
|
||||
|
||||
log_success "Created external usage test workflow: $workflow_file"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Performance test utilities
|
||||
measure_action_time() {
|
||||
local action_dir="$1"
|
||||
shift
|
||||
|
||||
# Normalize action_dir to absolute path for consistent behavior
|
||||
action_dir="$(cd "$action_dir" && pwd)"
|
||||
|
||||
log_info "Measuring execution time for: $(basename "$action_dir")"
|
||||
|
||||
local start_time
|
||||
start_time=$(date +%s%N)
|
||||
|
||||
# Run the action test
|
||||
test_action_outputs "$action_dir" "$@"
|
||||
local result=$?
|
||||
|
||||
local end_time
|
||||
end_time=$(date +%s%N)
|
||||
|
||||
local duration_ns=$((end_time - start_time))
|
||||
local duration_ms=$((duration_ns / 1000000))
|
||||
|
||||
log_info "Action execution time: ${duration_ms}ms"
|
||||
|
||||
# Store performance data
|
||||
echo "$(basename "$action_dir"),${duration_ms}" >>"${TEST_ROOT}/reports/performance.csv"
|
||||
|
||||
return $result
|
||||
}
|
||||
|
||||
# Batch test runner
|
||||
run_action_tests() {
|
||||
local action_dir="$1"
|
||||
local test_type="${2:-all}" # all, unit, integration, outputs
|
||||
|
||||
# Normalize action_dir to absolute path for consistent behavior
|
||||
action_dir="$(cd "$action_dir" && pwd)"
|
||||
|
||||
local action_name
|
||||
action_name=$(basename "$action_dir")
|
||||
|
||||
log_info "Running $test_type tests for: $action_name"
|
||||
|
||||
local test_results=()
|
||||
|
||||
# Handle "all" type by running all test types
|
||||
if [[ $test_type == "all" ]]; then
|
||||
# Run unit tests
|
||||
log_info "Running unit tests..."
|
||||
if validate_action_yml "${action_dir}/action.yml"; then
|
||||
test_results+=("unit:PASS")
|
||||
else
|
||||
test_results+=("unit:FAIL")
|
||||
fi
|
||||
|
||||
# Run output tests
|
||||
log_info "Running output tests..."
|
||||
if test_action_outputs "$action_dir"; then
|
||||
test_results+=("outputs:PASS")
|
||||
else
|
||||
test_results+=("outputs:FAIL")
|
||||
fi
|
||||
|
||||
# Run integration tests
|
||||
log_info "Running integration tests..."
|
||||
if test_external_usage "$action_name"; then
|
||||
test_results+=("integration:PASS")
|
||||
else
|
||||
test_results+=("integration:FAIL")
|
||||
fi
|
||||
else
|
||||
# Handle individual test types
|
||||
case "$test_type" in
|
||||
"unit")
|
||||
log_info "Running unit tests..."
|
||||
if validate_action_yml "${action_dir}/action.yml"; then
|
||||
test_results+=("unit:PASS")
|
||||
else
|
||||
test_results+=("unit:FAIL")
|
||||
fi
|
||||
;;
|
||||
|
||||
"outputs")
|
||||
log_info "Running output tests..."
|
||||
if test_action_outputs "$action_dir"; then
|
||||
test_results+=("outputs:PASS")
|
||||
else
|
||||
test_results+=("outputs:FAIL")
|
||||
fi
|
||||
;;
|
||||
|
||||
"integration")
|
||||
log_info "Running integration tests..."
|
||||
if test_external_usage "$action_name"; then
|
||||
test_results+=("integration:PASS")
|
||||
else
|
||||
test_results+=("integration:FAIL")
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
# Report results
|
||||
log_info "Test results for $action_name:"
|
||||
for result in "${test_results[@]}"; do
|
||||
local test_name="${result%:*}"
|
||||
local status="${result#*:}"
|
||||
|
||||
if [[ $status == "PASS" ]]; then
|
||||
log_success " $test_name: $status"
|
||||
else
|
||||
log_error " $test_name: $status"
|
||||
fi
|
||||
done
|
||||
|
||||
# Check if all tests passed
|
||||
if [[ ! " ${test_results[*]} " =~ " FAIL" ]]; then
|
||||
log_success "All tests passed for: $action_name"
|
||||
return 0
|
||||
else
|
||||
log_error "Some tests failed for: $action_name"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Export all functions
|
||||
export -f validate_action_yml get_action_inputs get_action_outputs get_action_name get_action_runs_using is_input_required
|
||||
export -f test_input_validation test_action_outputs test_external_usage measure_action_time run_action_tests
|
||||
885
_tests/framework/validation.py
Executable file
885
_tests/framework/validation.py
Executable file
@@ -0,0 +1,885 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
GitHub Actions Validation Module
|
||||
|
||||
This module provides advanced validation capabilities for GitHub Actions testing,
|
||||
specifically handling PCRE regex patterns with lookahead/lookbehind assertions
|
||||
that are not supported in bash's basic regex engine.
|
||||
|
||||
Features:
|
||||
- PCRE-compatible regex validation using Python's re module
|
||||
- GitHub token format validation with proper lookahead support
|
||||
- Input sanitization and security validation
|
||||
- Complex pattern detection and validation
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
import re
|
||||
import sys
|
||||
|
||||
import yaml # pylint: disable=import-error
|
||||
|
||||
|
||||
class ActionValidator:
|
||||
"""Handles validation of GitHub Action inputs using Python regex engine."""
|
||||
|
||||
# Common regex patterns that require PCRE features
|
||||
COMPLEX_PATTERNS = {
|
||||
"lookahead": r"\(\?\=",
|
||||
"lookbehind": r"\(\?\<=",
|
||||
"negative_lookahead": r"\(\?\!",
|
||||
"named_groups": r"\(\?P<\w+>",
|
||||
"conditional": r"\(\?\(",
|
||||
}
|
||||
|
||||
# Standardized token patterns (resolved GitHub documentation discrepancies)
|
||||
# Fine-grained PATs are 50-255 characters with underscores (github_pat_[A-Za-z0-9_]{50,255})
|
||||
TOKEN_PATTERNS = {
|
||||
"classic": r"^gh[efpousr]_[a-zA-Z0-9]{36}$",
|
||||
"fine_grained": r"^github_pat_[A-Za-z0-9_]{50,255}$", # 50-255 chars with underscores
|
||||
"installation": r"^ghs_[a-zA-Z0-9]{36}$",
|
||||
"npm_classic": r"^npm_[a-zA-Z0-9]{40,}$", # NPM classic tokens
|
||||
}
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize the validator."""
|
||||
|
||||
def is_complex_pattern(self, pattern: str) -> bool:
|
||||
"""
|
||||
Check if a regex pattern requires PCRE features not supported in bash.
|
||||
|
||||
Args:
|
||||
pattern: The regex pattern to check
|
||||
|
||||
Returns:
|
||||
True if pattern requires PCRE features, False otherwise
|
||||
"""
|
||||
for regex in self.COMPLEX_PATTERNS.values():
|
||||
if re.search(regex, pattern):
|
||||
return True
|
||||
return False
|
||||
|
||||
def validate_github_token(self, token: str, action_dir: str = "") -> tuple[bool, str]:
|
||||
"""
|
||||
Validate GitHub token format using proper PCRE patterns.
|
||||
|
||||
Args:
|
||||
token: The token to validate
|
||||
action_dir: The action directory (for context-specific validation)
|
||||
|
||||
Returns:
|
||||
Tuple of (is_valid, error_message)
|
||||
"""
|
||||
# Actions that require tokens shouldn't accept empty values
|
||||
action_name = Path(action_dir).name
|
||||
if action_name in ["csharp-publish", "eslint-fix", "pr-lint", "pre-commit"]:
|
||||
if not token or token.strip() == "":
|
||||
return False, "Token cannot be empty"
|
||||
# Other actions may accept empty tokens (they'll use defaults)
|
||||
elif not token or token.strip() == "":
|
||||
return True, ""
|
||||
|
||||
# Check for GitHub Actions expression (should be allowed)
|
||||
if token == "${{ github.token }}" or (token.startswith("${{") and token.endswith("}}")):
|
||||
return True, ""
|
||||
|
||||
# Check for environment variable reference (e.g., $GITHUB_TOKEN)
|
||||
if re.match(r"^\$[A-Za-z_][A-Za-z0-9_]*$", token):
|
||||
return True, ""
|
||||
|
||||
# Check against all known token patterns
|
||||
for pattern in self.TOKEN_PATTERNS.values():
|
||||
if re.match(pattern, token):
|
||||
return True, ""
|
||||
|
||||
return (
|
||||
False,
|
||||
"Invalid token format. Expected: gh[efpousr]_* (36 chars), "
|
||||
"github_pat_[A-Za-z0-9_]* (50-255 chars), ghs_* (36 chars), or npm_* (40+ chars)",
|
||||
)
|
||||
|
||||
def validate_namespace_with_lookahead(self, namespace: str) -> tuple[bool, str]:
|
||||
"""
|
||||
Validate namespace using the original lookahead pattern from csharp-publish.
|
||||
|
||||
Args:
|
||||
namespace: The namespace to validate
|
||||
|
||||
Returns:
|
||||
Tuple of (is_valid, error_message)
|
||||
"""
|
||||
if not namespace or namespace.strip() == "":
|
||||
return False, "Namespace cannot be empty"
|
||||
|
||||
# Original pattern: ^[a-zA-Z0-9]([a-zA-Z0-9]|-(?=[a-zA-Z0-9])){0,38}$
|
||||
# This ensures hyphens are only allowed when followed by alphanumeric characters
|
||||
pattern = r"^[a-zA-Z0-9]([a-zA-Z0-9]|-(?=[a-zA-Z0-9])){0,38}$"
|
||||
|
||||
if re.match(pattern, namespace):
|
||||
return True, ""
|
||||
return (
|
||||
False,
|
||||
"Invalid namespace format. Must be 1-39 characters, "
|
||||
"alphanumeric and hyphens, no trailing hyphens",
|
||||
)
|
||||
|
||||
def validate_input_pattern(self, input_value: str, pattern: str) -> tuple[bool, str]:
|
||||
"""
|
||||
Validate an input value against a regex pattern using Python's re module.
|
||||
|
||||
Args:
|
||||
input_value: The value to validate
|
||||
pattern: The regex pattern to match against
|
||||
|
||||
Returns:
|
||||
Tuple of (is_valid, error_message)
|
||||
"""
|
||||
try:
|
||||
if re.match(pattern, input_value):
|
||||
return True, ""
|
||||
return False, f"Value '{input_value}' does not match required pattern: {pattern}"
|
||||
except re.error as e:
|
||||
return False, f"Invalid regex pattern: {pattern} - {e!s}"
|
||||
|
||||
def validate_security_patterns(self, input_value: str) -> tuple[bool, str]:
|
||||
"""
|
||||
Check for common security injection patterns.
|
||||
|
||||
Args:
|
||||
input_value: The value to validate
|
||||
|
||||
Returns:
|
||||
Tuple of (is_valid, error_message)
|
||||
"""
|
||||
# Allow empty values for most inputs (they're often optional)
|
||||
if not input_value or input_value.strip() == "":
|
||||
return True, ""
|
||||
|
||||
# Common injection patterns
|
||||
injection_patterns = [
|
||||
r";\s*(rm|del|format|shutdown|reboot)",
|
||||
r"&&\s*(rm|del|format|shutdown|reboot)",
|
||||
r"\|\s*(rm|del|format|shutdown|reboot)",
|
||||
r"`[^`]*`", # Command substitution
|
||||
r"\$\([^)]*\)", # Command substitution
|
||||
# Path traversal only dangerous when combined with commands
|
||||
r"\.\./.*;\s*(rm|del|format|shutdown|reboot)",
|
||||
r"\\\.\\\.\\.*;\s*(rm|del|format|shutdown|reboot)",
|
||||
]
|
||||
|
||||
for pattern in injection_patterns:
|
||||
if re.search(pattern, input_value, re.IGNORECASE):
|
||||
return False, f"Potential security injection pattern detected: {pattern}"
|
||||
|
||||
return True, ""
|
||||
|
||||
|
||||
def extract_validation_patterns(action_file: str) -> dict[str, list[str]]:
|
||||
"""
|
||||
Extract validation patterns from an action.yml file.
|
||||
|
||||
Args:
|
||||
action_file: Path to the action.yml file
|
||||
|
||||
Returns:
|
||||
Dictionary mapping input names to their validation patterns
|
||||
"""
|
||||
patterns = {}
|
||||
|
||||
try:
|
||||
with Path(action_file).open(encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
|
||||
# Look for validation patterns in the shell scripts
|
||||
validation_block_match = re.search(
|
||||
r"- name:\s*Validate\s+Inputs.*?run:\s*\|(.+?)(?=- name:|$)",
|
||||
content,
|
||||
re.DOTALL | re.IGNORECASE,
|
||||
)
|
||||
|
||||
if validation_block_match:
|
||||
validation_script = validation_block_match.group(1)
|
||||
|
||||
# Extract regex patterns from the validation script
|
||||
regex_matches = re.findall(
|
||||
r'\[\[\s*["\']?\$\{\{\s*inputs\.(\w+(?:-\w+)*)\s*\}\}["\']?\s*=~\s*(.+?)\]\]',
|
||||
validation_script,
|
||||
re.DOTALL | re.IGNORECASE,
|
||||
)
|
||||
|
||||
for input_name, pattern in regex_matches:
|
||||
# Clean up the pattern
|
||||
pattern = pattern.strip().strip("\"'")
|
||||
if input_name not in patterns:
|
||||
patterns[input_name] = []
|
||||
patterns[input_name].append(pattern)
|
||||
|
||||
except Exception as e: # pylint: disable=broad-exception-caught
|
||||
print(f"Error extracting patterns from {action_file}: {e}", file=sys.stderr)
|
||||
|
||||
return patterns
|
||||
|
||||
|
||||
def get_input_property(action_file: str, input_name: str, property_check: str) -> str: # pylint: disable=too-many-return-statements
|
||||
"""
|
||||
Get a property of an input from an action.yml file.
|
||||
|
||||
This function replaces the functionality of check_input.py.
|
||||
|
||||
Args:
|
||||
action_file: Path to the action.yml file
|
||||
input_name: Name of the input to check
|
||||
property_check: Property to check (required, optional, default, description, all_optional)
|
||||
|
||||
Returns:
|
||||
- For 'required': 'required' or 'optional'
|
||||
- For 'optional': 'optional' or 'required'
|
||||
- For 'default': the default value or 'no-default'
|
||||
- For 'description': the description or 'no-description'
|
||||
- For 'all_optional': 'none' if no required inputs, else comma-separated list of
|
||||
required inputs
|
||||
"""
|
||||
try:
|
||||
with Path(action_file).open(encoding="utf-8") as f:
|
||||
data = yaml.safe_load(f)
|
||||
|
||||
inputs = data.get("inputs", {})
|
||||
input_data = inputs.get(input_name, {})
|
||||
|
||||
if property_check in ["required", "optional"]:
|
||||
is_required = input_data.get("required") in [True, "true"]
|
||||
if property_check == "required":
|
||||
return "required" if is_required else "optional"
|
||||
# optional
|
||||
return "optional" if not is_required else "required"
|
||||
|
||||
if property_check == "default":
|
||||
default_value = input_data.get("default", "")
|
||||
return str(default_value) if default_value else "no-default"
|
||||
|
||||
if property_check == "description":
|
||||
description = input_data.get("description", "")
|
||||
return description if description else "no-description"
|
||||
|
||||
if property_check == "all_optional":
|
||||
# Check if all inputs are optional (none are required)
|
||||
required_inputs = [k for k, v in inputs.items() if v.get("required") in [True, "true"]]
|
||||
return "none" if not required_inputs else ",".join(required_inputs)
|
||||
|
||||
return f"unknown-property-{property_check}"
|
||||
|
||||
except Exception as e: # pylint: disable=broad-exception-caught
|
||||
return f"error: {e}"
|
||||
|
||||
|
||||
def get_action_inputs(action_file: str) -> list[str]:
|
||||
"""
|
||||
Get all input names from an action.yml file.
|
||||
|
||||
This function replaces the bash version in utils.sh.
|
||||
|
||||
Args:
|
||||
action_file: Path to the action.yml file
|
||||
|
||||
Returns:
|
||||
List of input names
|
||||
"""
|
||||
try:
|
||||
with Path(action_file).open(encoding="utf-8") as f:
|
||||
data = yaml.safe_load(f)
|
||||
|
||||
inputs = data.get("inputs", {})
|
||||
return list(inputs.keys())
|
||||
|
||||
except Exception:
|
||||
return []
|
||||
|
||||
|
||||
def get_action_outputs(action_file: str) -> list[str]:
|
||||
"""
|
||||
Get all output names from an action.yml file.
|
||||
|
||||
This function replaces the bash version in utils.sh.
|
||||
|
||||
Args:
|
||||
action_file: Path to the action.yml file
|
||||
|
||||
Returns:
|
||||
List of output names
|
||||
"""
|
||||
try:
|
||||
with Path(action_file).open(encoding="utf-8") as f:
|
||||
data = yaml.safe_load(f)
|
||||
|
||||
outputs = data.get("outputs", {})
|
||||
return list(outputs.keys())
|
||||
|
||||
except Exception:
|
||||
return []
|
||||
|
||||
|
||||
def get_action_name(action_file: str) -> str:
|
||||
"""
|
||||
Get the action name from an action.yml file.
|
||||
|
||||
This function replaces the bash version in utils.sh.
|
||||
|
||||
Args:
|
||||
action_file: Path to the action.yml file
|
||||
|
||||
Returns:
|
||||
Action name or "Unknown" if not found
|
||||
"""
|
||||
try:
|
||||
with Path(action_file).open(encoding="utf-8") as f:
|
||||
data = yaml.safe_load(f)
|
||||
|
||||
return data.get("name", "Unknown")
|
||||
|
||||
except Exception:
|
||||
return "Unknown"
|
||||
|
||||
|
||||
def _show_usage():
|
||||
"""Show usage information and exit."""
|
||||
print("Usage:")
|
||||
print(
|
||||
" Validation mode: python3 validation.py <action_dir> <input_name> <input_value> "
|
||||
"[expected_result]",
|
||||
)
|
||||
print(
|
||||
" Property mode: python3 validation.py --property <action_file> <input_name> <property>",
|
||||
)
|
||||
print(" List inputs: python3 validation.py --inputs <action_file>")
|
||||
print(" List outputs: python3 validation.py --outputs <action_file>")
|
||||
print(" Get name: python3 validation.py --name <action_file>")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def _parse_property_mode():
|
||||
"""Parse property mode arguments."""
|
||||
if len(sys.argv) != 5:
|
||||
print(
|
||||
"Property mode usage: python3 validation.py --property <action_file> "
|
||||
"<input_name> <property>",
|
||||
)
|
||||
print("Properties: required, optional, default, description, all_optional")
|
||||
sys.exit(1)
|
||||
return {
|
||||
"mode": "property",
|
||||
"action_file": sys.argv[2],
|
||||
"input_name": sys.argv[3],
|
||||
"property": sys.argv[4],
|
||||
}
|
||||
|
||||
|
||||
def _parse_single_file_mode(mode_name):
|
||||
"""Parse modes that take a single action file argument."""
|
||||
if len(sys.argv) != 3:
|
||||
print(f"{mode_name.title()} mode usage: python3 validation.py --{mode_name} <action_file>")
|
||||
sys.exit(1)
|
||||
return {
|
||||
"mode": mode_name,
|
||||
"action_file": sys.argv[2],
|
||||
}
|
||||
|
||||
|
||||
def _parse_validation_mode():
|
||||
"""Parse validation mode arguments."""
|
||||
if len(sys.argv) < 4:
|
||||
print(
|
||||
"Validation mode usage: python3 validation.py <action_dir> <input_name> "
|
||||
"<input_value> [expected_result]",
|
||||
)
|
||||
print("Expected result: 'success' or 'failure' (default: auto-detect)")
|
||||
sys.exit(1)
|
||||
return {
|
||||
"mode": "validation",
|
||||
"action_dir": sys.argv[1],
|
||||
"input_name": sys.argv[2],
|
||||
"input_value": sys.argv[3],
|
||||
"expected_result": sys.argv[4] if len(sys.argv) > 4 else None,
|
||||
}
|
||||
|
||||
|
||||
def _parse_command_line_args():
|
||||
"""Parse and validate command line arguments."""
|
||||
if len(sys.argv) < 2:
|
||||
_show_usage()
|
||||
|
||||
mode_arg = sys.argv[1]
|
||||
|
||||
if mode_arg == "--property":
|
||||
return _parse_property_mode()
|
||||
if mode_arg in ["--inputs", "--outputs", "--name"]:
|
||||
return _parse_single_file_mode(mode_arg[2:]) # Remove '--' prefix
|
||||
return _parse_validation_mode()
|
||||
|
||||
|
||||
def _resolve_action_file_path(action_dir: str) -> str:
|
||||
"""Resolve the path to the action.yml file."""
|
||||
action_dir_path = Path(action_dir)
|
||||
if not action_dir_path.is_absolute():
|
||||
# If relative, assume we're in _tests/framework and actions are at ../../
|
||||
script_dir = Path(__file__).resolve().parent
|
||||
project_root = script_dir.parent.parent
|
||||
return str(project_root / action_dir / "action.yml")
|
||||
return f"{action_dir}/action.yml"
|
||||
|
||||
|
||||
def _validate_docker_build_input(input_name: str, input_value: str) -> tuple[bool, str]:
|
||||
"""Handle special validation for docker-build inputs."""
|
||||
if input_name == "build-args" and input_value == "":
|
||||
return True, ""
|
||||
# All other docker-build inputs pass through centralized validation
|
||||
return True, ""
|
||||
|
||||
|
||||
# Validation function registry
|
||||
def _validate_boolean(input_value: str, input_name: str) -> tuple[bool, str]:
|
||||
"""Validate boolean input."""
|
||||
if input_value.lower() not in ["true", "false"]:
|
||||
return False, f"Input '{input_name}' must be 'true' or 'false'"
|
||||
return True, ""
|
||||
|
||||
|
||||
def _validate_docker_architectures(input_value: str) -> tuple[bool, str]:
|
||||
"""Validate docker architectures format."""
|
||||
if input_value and not re.match(r"^[a-zA-Z0-9/_,.-]+$", input_value):
|
||||
return False, f"Invalid docker architectures format: {input_value}"
|
||||
return True, ""
|
||||
|
||||
|
||||
def _validate_registry(input_value: str, action_name: str) -> tuple[bool, str]:
|
||||
"""Validate registry format."""
|
||||
if action_name == "docker-publish":
|
||||
if input_value not in ["dockerhub", "github", "both"]:
|
||||
return False, "Invalid registry value. Must be 'dockerhub', 'github', or 'both'"
|
||||
elif input_value and not re.match(r"^[\w.-]+(:\d+)?$", input_value):
|
||||
return False, f"Invalid registry format: {input_value}"
|
||||
return True, ""
|
||||
|
||||
|
||||
def _validate_file_path(input_value: str) -> tuple[bool, str]:
|
||||
"""Validate file path format."""
|
||||
if input_value and re.search(r"[;&|`$()]", input_value):
|
||||
return False, f"Potential injection detected in file path: {input_value}"
|
||||
if input_value and not re.match(r"^[a-zA-Z0-9._/,~-]+$", input_value):
|
||||
return False, f"Invalid file path format: {input_value}"
|
||||
return True, ""
|
||||
|
||||
|
||||
def _validate_backoff_strategy(input_value: str) -> tuple[bool, str]:
|
||||
"""Validate backoff strategy."""
|
||||
if input_value not in ["linear", "exponential", "fixed"]:
|
||||
return False, "Invalid backoff strategy. Must be 'linear', 'exponential', or 'fixed'"
|
||||
return True, ""
|
||||
|
||||
|
||||
def _validate_shell_type(input_value: str) -> tuple[bool, str]:
|
||||
"""Validate shell type."""
|
||||
if input_value not in ["bash", "sh"]:
|
||||
return False, "Invalid shell type. Must be 'bash' or 'sh'"
|
||||
return True, ""
|
||||
|
||||
|
||||
def _validate_docker_image_name(input_value: str) -> tuple[bool, str]:
|
||||
"""Validate docker image name format."""
|
||||
if input_value and not re.match(
|
||||
r"^[a-z0-9]+((\.|_|__|-+)[a-z0-9]+)*(/[a-z0-9]+((\.|_|__|-+)[a-z0-9]+)*)*$",
|
||||
input_value,
|
||||
):
|
||||
return False, f"Invalid docker image name format: {input_value}"
|
||||
return True, ""
|
||||
|
||||
|
||||
def _validate_docker_tag(input_value: str) -> tuple[bool, str]:
|
||||
"""Validate docker tag format."""
|
||||
if input_value:
|
||||
tags = [tag.strip() for tag in input_value.split(",")]
|
||||
for tag in tags:
|
||||
if not re.match(r"^[a-zA-Z0-9]([a-zA-Z0-9._-]*[a-zA-Z0-9])?$", tag):
|
||||
return False, f"Invalid docker tag format: {tag}"
|
||||
return True, ""
|
||||
|
||||
|
||||
def _validate_docker_password(input_value: str) -> tuple[bool, str]:
|
||||
"""Validate docker password."""
|
||||
if input_value and len(input_value) < 8:
|
||||
return False, "Docker password must be at least 8 characters long"
|
||||
return True, ""
|
||||
|
||||
|
||||
def _validate_go_version(input_value: str) -> tuple[bool, str]:
|
||||
"""Validate Go version format."""
|
||||
if input_value in ["stable", "latest"]:
|
||||
return True, ""
|
||||
if input_value and not re.match(r"^v?\d+\.\d+(\.\d+)?", input_value):
|
||||
return False, f"Invalid Go version format: {input_value}"
|
||||
return True, ""
|
||||
|
||||
|
||||
def _validate_timeout_with_unit(input_value: str) -> tuple[bool, str]:
|
||||
"""Validate timeout with unit format."""
|
||||
if input_value and not re.match(r"^\d+[smh]$", input_value):
|
||||
return False, "Invalid timeout format. Use format like '5m', '300s', or '1h'"
|
||||
return True, ""
|
||||
|
||||
|
||||
def _validate_linter_list(input_value: str) -> tuple[bool, str]:
|
||||
"""Validate linter list format."""
|
||||
if input_value and re.search(r",\s+", input_value):
|
||||
return False, "Invalid linter list format. Use comma-separated values without spaces"
|
||||
return True, ""
|
||||
|
||||
|
||||
def _validate_version_types(input_value: str) -> tuple[bool, str]:
|
||||
"""Validate semantic/calver/flexible version formats."""
|
||||
if input_value.lower() == "latest":
|
||||
return True, ""
|
||||
if input_value.startswith("v"):
|
||||
return False, f"Version should not start with 'v': {input_value}"
|
||||
if not re.match(r"^\d+\.\d+(\.\d+)?", input_value):
|
||||
return False, f"Invalid version format: {input_value}"
|
||||
return True, ""
|
||||
|
||||
|
||||
def _validate_file_pattern(input_value: str) -> tuple[bool, str]:
|
||||
"""Validate file pattern format."""
|
||||
if input_value and ("../" in input_value or "\\..\\" in input_value):
|
||||
return False, f"Path traversal not allowed in file patterns: {input_value}"
|
||||
if input_value and input_value.startswith("/"):
|
||||
return False, f"Absolute paths not allowed in file patterns: {input_value}"
|
||||
if input_value and re.search(r"[;&|`$()]", input_value):
|
||||
return False, f"Potential injection detected in file pattern: {input_value}"
|
||||
return True, ""
|
||||
|
||||
|
||||
def _validate_report_format(input_value: str) -> tuple[bool, str]:
|
||||
"""Validate report format."""
|
||||
if input_value not in ["json", "sarif"]:
|
||||
return False, "Invalid report format. Must be 'json' or 'sarif'"
|
||||
return True, ""
|
||||
|
||||
|
||||
def _validate_plugin_list(input_value: str) -> tuple[bool, str]:
|
||||
"""Validate plugin list format."""
|
||||
if input_value and re.search(r"[;&|`$()]", input_value):
|
||||
return False, f"Potential injection detected in plugin list: {input_value}"
|
||||
return True, ""
|
||||
|
||||
|
||||
def _validate_prefix(input_value: str) -> tuple[bool, str]:
|
||||
"""Validate prefix format."""
|
||||
if input_value and re.search(r"[;&|`$()]", input_value):
|
||||
return False, f"Potential injection detected in prefix: {input_value}"
|
||||
return True, ""
|
||||
|
||||
|
||||
def _validate_terraform_version(input_value: str) -> tuple[bool, str]:
|
||||
"""Validate terraform version format."""
|
||||
if input_value and input_value.lower() == "latest":
|
||||
return True, ""
|
||||
if input_value and input_value.startswith("v"):
|
||||
return False, f"Terraform version should not start with 'v': {input_value}"
|
||||
if input_value and not re.match(r"^\d+\.\d+(\.\d+)?", input_value):
|
||||
return False, f"Invalid terraform version format: {input_value}"
|
||||
return True, ""
|
||||
|
||||
|
||||
def _validate_php_extensions(input_value: str) -> tuple[bool, str]:
|
||||
"""Validate PHP extensions format."""
|
||||
if input_value and re.search(r"[;&|`$()@#]", input_value):
|
||||
return False, f"Potential injection detected in PHP extensions: {input_value}"
|
||||
if input_value and not re.match(r"^[a-zA-Z0-9_,\s]+$", input_value):
|
||||
return False, f"Invalid PHP extensions format: {input_value}"
|
||||
return True, ""
|
||||
|
||||
|
||||
def _validate_coverage_driver(input_value: str) -> tuple[bool, str]:
|
||||
"""Validate coverage driver."""
|
||||
if input_value not in ["none", "xdebug", "pcov", "xdebug3"]:
|
||||
return False, "Invalid coverage driver. Must be 'none', 'xdebug', 'pcov', or 'xdebug3'"
|
||||
return True, ""
|
||||
|
||||
|
||||
# Validation registry mapping types to functions and their argument requirements
|
||||
VALIDATION_REGISTRY = {
|
||||
"boolean": (_validate_boolean, "input_name"),
|
||||
"docker_architectures": (_validate_docker_architectures, "value_only"),
|
||||
"registry": (_validate_registry, "action_name"),
|
||||
"file_path": (_validate_file_path, "value_only"),
|
||||
"backoff_strategy": (_validate_backoff_strategy, "value_only"),
|
||||
"shell_type": (_validate_shell_type, "value_only"),
|
||||
"docker_image_name": (_validate_docker_image_name, "value_only"),
|
||||
"docker_tag": (_validate_docker_tag, "value_only"),
|
||||
"docker_password": (_validate_docker_password, "value_only"),
|
||||
"go_version": (_validate_go_version, "value_only"),
|
||||
"timeout_with_unit": (_validate_timeout_with_unit, "value_only"),
|
||||
"linter_list": (_validate_linter_list, "value_only"),
|
||||
"semantic_version": (_validate_version_types, "value_only"),
|
||||
"calver_version": (_validate_version_types, "value_only"),
|
||||
"flexible_version": (_validate_version_types, "value_only"),
|
||||
"file_pattern": (_validate_file_pattern, "value_only"),
|
||||
"report_format": (_validate_report_format, "value_only"),
|
||||
"plugin_list": (_validate_plugin_list, "value_only"),
|
||||
"prefix": (_validate_prefix, "value_only"),
|
||||
"terraform_version": (_validate_terraform_version, "value_only"),
|
||||
"php_extensions": (_validate_php_extensions, "value_only"),
|
||||
"coverage_driver": (_validate_coverage_driver, "value_only"),
|
||||
}
|
||||
|
||||
|
||||
def _load_validation_rules(action_dir: str) -> tuple[dict, bool]:
|
||||
"""Load validation rules for an action."""
|
||||
action_name = Path(action_dir).name
|
||||
script_dir = Path(__file__).resolve().parent
|
||||
project_root = script_dir.parent.parent
|
||||
rules_file = project_root / "validate-inputs" / "rules" / f"{action_name}.yml"
|
||||
|
||||
if not rules_file.exists():
|
||||
return {}, False
|
||||
|
||||
try:
|
||||
with Path(rules_file).open(encoding="utf-8") as f:
|
||||
return yaml.safe_load(f), True
|
||||
except Exception as e: # pylint: disable=broad-exception-caught
|
||||
print(f"Warning: Could not load centralized rules for {action_name}: {e}", file=sys.stderr)
|
||||
return {}, False
|
||||
|
||||
|
||||
def _get_validation_type(input_name: str, rules_data: dict) -> str | None:
|
||||
"""Get validation type for an input from rules."""
|
||||
conventions = rules_data.get("conventions", {})
|
||||
overrides = rules_data.get("overrides", {})
|
||||
|
||||
# Check overrides first, then conventions
|
||||
if input_name in overrides:
|
||||
return overrides[input_name]
|
||||
if input_name in conventions:
|
||||
return conventions[input_name]
|
||||
return None
|
||||
|
||||
|
||||
def _validate_with_centralized_rules(
|
||||
input_name: str,
|
||||
input_value: str,
|
||||
action_dir: str,
|
||||
validator: ActionValidator,
|
||||
) -> tuple[bool, str, bool]:
|
||||
"""Validate input using centralized validation rules."""
|
||||
rules_data, rules_loaded = _load_validation_rules(action_dir)
|
||||
if not rules_loaded:
|
||||
return True, "", False
|
||||
|
||||
action_name = Path(action_dir).name
|
||||
required_inputs = rules_data.get("required_inputs", [])
|
||||
|
||||
# Check if input is required and empty
|
||||
if input_name in required_inputs and (not input_value or input_value.strip() == ""):
|
||||
return False, f"Required input '{input_name}' cannot be empty", True
|
||||
|
||||
validation_type = _get_validation_type(input_name, rules_data)
|
||||
if validation_type is None:
|
||||
return True, "", False
|
||||
|
||||
# Handle special validator-based types
|
||||
if validation_type == "github_token":
|
||||
token_valid, token_error = validator.validate_github_token(input_value, action_dir)
|
||||
return token_valid, token_error, True
|
||||
if validation_type == "namespace_with_lookahead":
|
||||
ns_valid, ns_error = validator.validate_namespace_with_lookahead(input_value)
|
||||
return ns_valid, ns_error, True
|
||||
|
||||
# Use registry for other validation types
|
||||
if validation_type in VALIDATION_REGISTRY:
|
||||
validate_func, arg_type = VALIDATION_REGISTRY[validation_type]
|
||||
|
||||
if arg_type == "value_only":
|
||||
is_valid, error_msg = validate_func(input_value)
|
||||
elif arg_type == "input_name":
|
||||
is_valid, error_msg = validate_func(input_value, input_name)
|
||||
elif arg_type == "action_name":
|
||||
is_valid, error_msg = validate_func(input_value, action_name)
|
||||
else:
|
||||
return False, f"Unknown validation argument type: {arg_type}", True
|
||||
|
||||
return is_valid, error_msg, True
|
||||
|
||||
return True, "", True
|
||||
|
||||
|
||||
def _validate_special_inputs(
|
||||
input_name: str,
|
||||
input_value: str,
|
||||
action_dir: str,
|
||||
validator: ActionValidator,
|
||||
) -> tuple[bool, str, bool]:
|
||||
"""Handle special input validation cases."""
|
||||
action_name = Path(action_dir).name
|
||||
|
||||
if action_name == "docker-build":
|
||||
is_valid, error_message = _validate_docker_build_input(input_name, input_value)
|
||||
return is_valid, error_message, True
|
||||
|
||||
if input_name == "token" and action_name in [
|
||||
"csharp-publish",
|
||||
"eslint-fix",
|
||||
"pr-lint",
|
||||
"pre-commit",
|
||||
]:
|
||||
# Special handling for GitHub tokens
|
||||
token_valid, token_error = validator.validate_github_token(input_value, action_dir)
|
||||
return token_valid, token_error, True
|
||||
|
||||
if input_name == "namespace" and action_name == "csharp-publish":
|
||||
# Special handling for namespace with lookahead
|
||||
ns_valid, ns_error = validator.validate_namespace_with_lookahead(input_value)
|
||||
return ns_valid, ns_error, True
|
||||
|
||||
return True, "", False
|
||||
|
||||
|
||||
def _validate_with_patterns(
|
||||
input_name: str,
|
||||
input_value: str,
|
||||
patterns: dict,
|
||||
validator: ActionValidator,
|
||||
) -> tuple[bool, str, bool]:
|
||||
"""Validate input using extracted patterns."""
|
||||
if input_name not in patterns:
|
||||
return True, "", False
|
||||
|
||||
for pattern in patterns[input_name]:
|
||||
pattern_valid, pattern_error = validator.validate_input_pattern(
|
||||
input_value,
|
||||
pattern,
|
||||
)
|
||||
if not pattern_valid:
|
||||
return False, pattern_error, True
|
||||
|
||||
return True, "", True
|
||||
|
||||
|
||||
def _handle_test_mode(expected_result: str, *, is_valid: bool) -> None:
|
||||
"""Handle test mode output and exit."""
|
||||
if (expected_result == "success" and is_valid) or (
|
||||
expected_result == "failure" and not is_valid
|
||||
):
|
||||
sys.exit(0) # Test expectation met
|
||||
sys.exit(1) # Test expectation not met
|
||||
|
||||
|
||||
def _handle_validation_mode(*, is_valid: bool, error_message: str) -> None:
|
||||
"""Handle validation mode output and exit."""
|
||||
if is_valid:
|
||||
print("VALID")
|
||||
sys.exit(0)
|
||||
print(f"INVALID: {error_message}")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def _handle_property_mode(args: dict) -> None:
|
||||
"""Handle property checking mode."""
|
||||
result = get_input_property(args["action_file"], args["input_name"], args["property"])
|
||||
print(result)
|
||||
|
||||
|
||||
def _handle_inputs_mode(args: dict) -> None:
|
||||
"""Handle inputs listing mode."""
|
||||
inputs = get_action_inputs(args["action_file"])
|
||||
for input_name in inputs:
|
||||
print(input_name)
|
||||
|
||||
|
||||
def _handle_outputs_mode(args: dict) -> None:
|
||||
"""Handle outputs listing mode."""
|
||||
outputs = get_action_outputs(args["action_file"])
|
||||
for output_name in outputs:
|
||||
print(output_name)
|
||||
|
||||
|
||||
def _handle_name_mode(args: dict) -> None:
|
||||
"""Handle name getting mode."""
|
||||
name = get_action_name(args["action_file"])
|
||||
print(name)
|
||||
|
||||
|
||||
def _perform_validation_steps(args: dict) -> tuple[bool, str]:
|
||||
"""Perform all validation steps and return result."""
|
||||
# Resolve action file path
|
||||
action_file = _resolve_action_file_path(args["action_dir"])
|
||||
|
||||
# Initialize validator and extract patterns
|
||||
validator = ActionValidator()
|
||||
patterns = extract_validation_patterns(action_file)
|
||||
|
||||
# Perform security validation (always performed)
|
||||
security_valid, security_error = validator.validate_security_patterns(args["input_value"])
|
||||
if not security_valid:
|
||||
return False, security_error
|
||||
|
||||
# Perform input-specific validation
|
||||
# Check centralized rules first
|
||||
is_valid, error_message, has_validation = _validate_with_centralized_rules(
|
||||
args["input_name"],
|
||||
args["input_value"],
|
||||
args["action_dir"],
|
||||
validator,
|
||||
)
|
||||
|
||||
# If no centralized validation, check special input cases
|
||||
if not has_validation:
|
||||
is_valid, error_message, has_validation = _validate_special_inputs(
|
||||
args["input_name"],
|
||||
args["input_value"],
|
||||
args["action_dir"],
|
||||
validator,
|
||||
)
|
||||
|
||||
# If no special validation, try pattern-based validation
|
||||
if not has_validation:
|
||||
is_valid, error_message, has_validation = _validate_with_patterns(
|
||||
args["input_name"],
|
||||
args["input_value"],
|
||||
patterns,
|
||||
validator,
|
||||
)
|
||||
|
||||
return is_valid, error_message
|
||||
|
||||
|
||||
def _handle_validation_mode_main(args: dict) -> None:
|
||||
"""Handle validation mode from main function."""
|
||||
is_valid, error_message = _perform_validation_steps(args)
|
||||
|
||||
# Handle output based on mode
|
||||
if args["expected_result"]:
|
||||
_handle_test_mode(args["expected_result"], is_valid=is_valid)
|
||||
_handle_validation_mode(is_valid=is_valid, error_message=error_message)
|
||||
|
||||
|
||||
def main():
|
||||
"""Command-line interface for the validation module."""
|
||||
args = _parse_command_line_args()
|
||||
|
||||
# Dispatch to appropriate mode handler
|
||||
mode_handlers = {
|
||||
"property": _handle_property_mode,
|
||||
"inputs": _handle_inputs_mode,
|
||||
"outputs": _handle_outputs_mode,
|
||||
"name": _handle_name_mode,
|
||||
"validation": _handle_validation_mode_main,
|
||||
}
|
||||
|
||||
if args["mode"] in mode_handlers:
|
||||
mode_handlers[args["mode"]](args)
|
||||
else:
|
||||
print(f"Unknown mode: {args['mode']}")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
186
_tests/integration/workflows/docker-build-publish-test.yml
Normal file
186
_tests/integration/workflows/docker-build-publish-test.yml
Normal file
@@ -0,0 +1,186 @@
|
||||
---
|
||||
name: Test Docker Build & Publish Integration
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
paths:
|
||||
- 'docker-build/**'
|
||||
- 'docker-publish/**'
|
||||
- 'docker-publish-gh/**'
|
||||
- 'docker-publish-hub/**'
|
||||
- '_tests/integration/workflows/docker-build-publish-test.yml'
|
||||
|
||||
jobs:
|
||||
test-docker-build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Create test Dockerfile
|
||||
run: |
|
||||
cat > Dockerfile <<EOF
|
||||
FROM alpine:3.19
|
||||
RUN apk add --no-cache bash
|
||||
COPY test.sh /test.sh
|
||||
RUN chmod +x /test.sh
|
||||
CMD ["/test.sh"]
|
||||
EOF
|
||||
|
||||
cat > test.sh <<EOF
|
||||
#!/bin/bash
|
||||
echo "Test container is running"
|
||||
EOF
|
||||
|
||||
- name: Test docker-build action
|
||||
id: build
|
||||
uses: ./docker-build
|
||||
with:
|
||||
image-name: 'test-image'
|
||||
tag: 'test-tag'
|
||||
dockerfile: './Dockerfile'
|
||||
context: '.'
|
||||
platforms: 'linux/amd64'
|
||||
push: 'false'
|
||||
scan-image: 'false'
|
||||
|
||||
- name: Validate build outputs
|
||||
run: |
|
||||
echo "Build outputs:"
|
||||
echo " Image Digest: ${{ steps.build.outputs.image-digest }}"
|
||||
echo " Build Time: ${{ steps.build.outputs.build-time }}"
|
||||
echo " Platforms: ${{ steps.build.outputs.platforms }}"
|
||||
|
||||
# Validate that we got a digest
|
||||
if [[ -z "${{ steps.build.outputs.image-digest }}" ]]; then
|
||||
echo "❌ ERROR: No image digest output"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Validate digest format (sha256:...)
|
||||
if ! echo "${{ steps.build.outputs.image-digest }}" | grep -E '^sha256:[a-f0-9]{64}'; then
|
||||
echo "❌ ERROR: Invalid digest format: ${{ steps.build.outputs.image-digest }}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✅ Docker build validation passed"
|
||||
|
||||
test-docker-inputs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Create test Dockerfile
|
||||
run: |
|
||||
cat > Dockerfile <<EOF
|
||||
FROM alpine:3.19
|
||||
CMD ["echo", "test"]
|
||||
EOF
|
||||
|
||||
- name: Test with build-args
|
||||
id: build-with-args
|
||||
uses: ./docker-build
|
||||
with:
|
||||
image-name: 'test-build-args'
|
||||
tag: 'latest'
|
||||
dockerfile: './Dockerfile'
|
||||
context: '.'
|
||||
build-args: |
|
||||
ARG1=value1
|
||||
ARG2=value2
|
||||
platforms: 'linux/amd64'
|
||||
push: 'false'
|
||||
scan-image: 'false'
|
||||
|
||||
- name: Validate build-args handling
|
||||
run: |
|
||||
if [[ -z "${{ steps.build-with-args.outputs.image-digest }}" ]]; then
|
||||
echo "❌ ERROR: Build with build-args failed"
|
||||
exit 1
|
||||
fi
|
||||
echo "✅ Build-args handling validated"
|
||||
|
||||
test-platform-detection:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Create test Dockerfile
|
||||
run: |
|
||||
cat > Dockerfile <<EOF
|
||||
FROM alpine:3.19
|
||||
CMD ["echo", "multi-platform test"]
|
||||
EOF
|
||||
|
||||
- name: Test multi-platform build
|
||||
id: multi-platform
|
||||
uses: ./docker-build
|
||||
with:
|
||||
image-name: 'test-multiplatform'
|
||||
tag: 'latest'
|
||||
dockerfile: './Dockerfile'
|
||||
context: '.'
|
||||
platforms: 'linux/amd64,linux/arm64'
|
||||
push: 'false'
|
||||
scan-image: 'false'
|
||||
|
||||
- name: Validate platform matrix output
|
||||
run: |
|
||||
echo "Platform Matrix: ${{ steps.multi-platform.outputs.platform-matrix }}"
|
||||
|
||||
# Check that we got platform results
|
||||
if [[ -z "${{ steps.multi-platform.outputs.platform-matrix }}" ]]; then
|
||||
echo "⚠️ WARNING: No platform matrix output (may be expected for local builds)"
|
||||
else
|
||||
echo "✅ Platform matrix generated"
|
||||
fi
|
||||
|
||||
echo "✅ Multi-platform build validated"
|
||||
|
||||
test-input-validation:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test invalid tag format
|
||||
id: invalid-tag
|
||||
uses: ./docker-build
|
||||
with:
|
||||
image-name: 'test-image'
|
||||
tag: 'INVALID TAG WITH SPACES'
|
||||
dockerfile: './Dockerfile'
|
||||
context: '.'
|
||||
platforms: 'linux/amd64'
|
||||
push: 'false'
|
||||
continue-on-error: true
|
||||
|
||||
- name: Validate tag validation
|
||||
run: |
|
||||
if [ "${{ steps.invalid-tag.outcome }}" != "failure" ]; then
|
||||
echo "❌ ERROR: Invalid tag should have failed validation"
|
||||
exit 1
|
||||
fi
|
||||
echo "✅ Tag validation works correctly"
|
||||
|
||||
- name: Test invalid image name
|
||||
id: invalid-image
|
||||
uses: ./docker-build
|
||||
with:
|
||||
image-name: 'UPPERCASE_NOT_ALLOWED'
|
||||
tag: 'latest'
|
||||
dockerfile: './Dockerfile'
|
||||
context: '.'
|
||||
platforms: 'linux/amd64'
|
||||
push: 'false'
|
||||
continue-on-error: true
|
||||
|
||||
- name: Validate image name validation
|
||||
run: |
|
||||
if [ "${{ steps.invalid-image.outcome }}" != "failure" ]; then
|
||||
echo "❌ ERROR: Invalid image name should have failed validation"
|
||||
exit 1
|
||||
fi
|
||||
echo "✅ Image name validation works correctly"
|
||||
321
_tests/integration/workflows/lint-fix-chain-test.yml
Normal file
321
_tests/integration/workflows/lint-fix-chain-test.yml
Normal file
@@ -0,0 +1,321 @@
|
||||
---
|
||||
name: Test Lint & Fix Action Chains
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
paths:
|
||||
- 'eslint-lint/**'
|
||||
- 'prettier-lint/**'
|
||||
- 'node-setup/**'
|
||||
- '_tests/integration/workflows/lint-fix-chain-test.yml'
|
||||
|
||||
jobs:
|
||||
test-eslint-chain:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Create test JavaScript files
|
||||
run: |
|
||||
mkdir -p test-project/src
|
||||
|
||||
# Create package.json
|
||||
cat > test-project/package.json <<EOF
|
||||
{
|
||||
"name": "test-project",
|
||||
"version": "1.0.0",
|
||||
"devDependencies": {
|
||||
"eslint": "^8.0.0"
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
# Create .eslintrc.json
|
||||
cat > test-project/.eslintrc.json <<EOF
|
||||
{
|
||||
"env": {
|
||||
"node": true,
|
||||
"es2021": true
|
||||
},
|
||||
"extends": "eslint:recommended",
|
||||
"parserOptions": {
|
||||
"ecmaVersion": 12
|
||||
},
|
||||
"rules": {
|
||||
"semi": ["error", "always"],
|
||||
"quotes": ["error", "single"]
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
# Create test file with linting issues
|
||||
cat > test-project/src/test.js <<EOF
|
||||
const x = "double quotes"
|
||||
console.log(x)
|
||||
EOF
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: ./node-setup
|
||||
with:
|
||||
node-version: '18'
|
||||
working-directory: './test-project'
|
||||
|
||||
- name: Test eslint-lint check mode (should find errors)
|
||||
id: eslint-check
|
||||
uses: ./eslint-lint
|
||||
with:
|
||||
mode: 'check'
|
||||
working-directory: './test-project'
|
||||
continue-on-error: true
|
||||
|
||||
- name: Validate eslint-lint check found issues
|
||||
run: |
|
||||
echo "ESLint check outcome: ${{ steps.eslint-check.outcome }}"
|
||||
echo "Error count: ${{ steps.eslint-check.outputs.error-count }}"
|
||||
echo "Warning count: ${{ steps.eslint-check.outputs.warning-count }}"
|
||||
|
||||
# Check should fail or find issues
|
||||
if [[ "${{ steps.eslint-check.outcome }}" == "success" ]]; then
|
||||
if [[ "${{ steps.eslint-check.outputs.error-count }}" == "0" ]]; then
|
||||
echo "⚠️ WARNING: Expected to find linting errors but found none"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "✅ ESLint check validated"
|
||||
|
||||
- name: Test eslint-lint fix mode (should fix issues)
|
||||
id: eslint-fix
|
||||
uses: ./eslint-lint
|
||||
with:
|
||||
mode: 'fix'
|
||||
working-directory: './test-project'
|
||||
token: ${{ github.token }}
|
||||
email: 'test@example.com'
|
||||
username: 'test-user'
|
||||
|
||||
- name: Validate eslint-lint fix ran
|
||||
run: |
|
||||
echo "Errors fixed: ${{ steps.eslint-fix.outputs.errors-fixed }}"
|
||||
echo "Files changed: ${{ steps.eslint-fix.outputs.files-changed }}"
|
||||
|
||||
# Check that fixes were attempted
|
||||
if [[ -n "${{ steps.eslint-fix.outputs.errors-fixed }}" ]]; then
|
||||
echo "✅ ESLint fixed ${{ steps.eslint-fix.outputs.errors-fixed }} issues"
|
||||
else
|
||||
echo "⚠️ No fix count reported (may be expected if no fixable issues)"
|
||||
fi
|
||||
|
||||
echo "✅ ESLint fix validated"
|
||||
|
||||
test-prettier-chain:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Create test files for Prettier
|
||||
run: |
|
||||
mkdir -p test-prettier
|
||||
|
||||
# Create package.json
|
||||
cat > test-prettier/package.json <<EOF
|
||||
{
|
||||
"name": "test-prettier",
|
||||
"version": "1.0.0",
|
||||
"devDependencies": {
|
||||
"prettier": "^3.0.0"
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
# Create .prettierrc
|
||||
cat > test-prettier/.prettierrc <<EOF
|
||||
{
|
||||
"semi": true,
|
||||
"singleQuote": true,
|
||||
"printWidth": 80
|
||||
}
|
||||
EOF
|
||||
|
||||
# Create badly formatted file
|
||||
cat > test-prettier/test.js <<EOF
|
||||
const x={"key":"value","another":"data"}
|
||||
console.log(x)
|
||||
EOF
|
||||
|
||||
# Create badly formatted JSON
|
||||
cat > test-prettier/test.json <<EOF
|
||||
{"key":"value","nested":{"data":"here"}}
|
||||
EOF
|
||||
|
||||
- name: Setup Node.js for Prettier
|
||||
uses: ./node-setup
|
||||
with:
|
||||
node-version: '18'
|
||||
working-directory: './test-prettier'
|
||||
|
||||
- name: Test prettier-lint check mode (should find issues)
|
||||
id: prettier-check
|
||||
uses: ./prettier-lint
|
||||
with:
|
||||
mode: 'check'
|
||||
working-directory: './test-prettier'
|
||||
continue-on-error: true
|
||||
|
||||
- name: Validate prettier-check found issues
|
||||
run: |
|
||||
echo "Prettier check outcome: ${{ steps.prettier-check.outcome }}"
|
||||
|
||||
# Check should find formatting issues
|
||||
if [[ "${{ steps.prettier-check.outcome }}" == "failure" ]]; then
|
||||
echo "✅ Prettier correctly found formatting issues"
|
||||
else
|
||||
echo "⚠️ WARNING: Expected Prettier to find formatting issues"
|
||||
fi
|
||||
|
||||
- name: Test prettier-lint fix mode (should fix issues)
|
||||
id: prettier-fix
|
||||
uses: ./prettier-lint
|
||||
with:
|
||||
mode: 'fix'
|
||||
working-directory: './test-prettier'
|
||||
token: ${{ github.token }}
|
||||
email: 'test@example.com'
|
||||
username: 'test-user'
|
||||
|
||||
- name: Validate prettier-lint fix ran
|
||||
run: |
|
||||
echo "Prettier fix completed"
|
||||
|
||||
# Check that files exist and have been processed
|
||||
if [[ -f "test-prettier/test.js" ]]; then
|
||||
echo "✅ Test file exists after Prettier fix"
|
||||
else
|
||||
echo "❌ ERROR: Test file missing after Prettier fix"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✅ Prettier fix validated"
|
||||
|
||||
test-action-chain-integration:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Create comprehensive test project
|
||||
run: |
|
||||
mkdir -p test-chain/src
|
||||
|
||||
# Create package.json with both ESLint and Prettier
|
||||
cat > test-chain/package.json <<EOF
|
||||
{
|
||||
"name": "test-chain",
|
||||
"version": "1.0.0",
|
||||
"devDependencies": {
|
||||
"eslint": "^8.0.0",
|
||||
"prettier": "^3.0.0"
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
# Create .eslintrc.json
|
||||
cat > test-chain/.eslintrc.json <<EOF
|
||||
{
|
||||
"env": {
|
||||
"node": true,
|
||||
"es2021": true
|
||||
},
|
||||
"extends": "eslint:recommended",
|
||||
"parserOptions": {
|
||||
"ecmaVersion": 12
|
||||
},
|
||||
"rules": {
|
||||
"semi": ["error", "always"],
|
||||
"quotes": ["error", "single"]
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
# Create .prettierrc
|
||||
cat > test-chain/.prettierrc <<EOF
|
||||
{
|
||||
"semi": true,
|
||||
"singleQuote": true,
|
||||
"printWidth": 80
|
||||
}
|
||||
EOF
|
||||
|
||||
# Create test file with both linting and formatting issues
|
||||
cat > test-chain/src/app.js <<EOF
|
||||
const message="hello world"
|
||||
function greet(){console.log(message)}
|
||||
greet()
|
||||
EOF
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: ./node-setup
|
||||
with:
|
||||
node-version: '18'
|
||||
working-directory: './test-chain'
|
||||
|
||||
- name: Run ESLint check
|
||||
id: lint-check
|
||||
uses: ./eslint-lint
|
||||
with:
|
||||
mode: 'check'
|
||||
working-directory: './test-chain'
|
||||
continue-on-error: true
|
||||
|
||||
- name: Run Prettier check
|
||||
id: format-check
|
||||
uses: ./prettier-lint
|
||||
with:
|
||||
mode: 'check'
|
||||
working-directory: './test-chain'
|
||||
continue-on-error: true
|
||||
|
||||
- name: Run ESLint fix
|
||||
id: lint-fix
|
||||
uses: ./eslint-lint
|
||||
with:
|
||||
mode: 'fix'
|
||||
working-directory: './test-chain'
|
||||
token: ${{ github.token }}
|
||||
email: 'test@example.com'
|
||||
username: 'test-user'
|
||||
|
||||
- name: Run Prettier fix
|
||||
id: format-fix
|
||||
uses: ./prettier-lint
|
||||
with:
|
||||
mode: 'fix'
|
||||
working-directory: './test-chain'
|
||||
token: ${{ github.token }}
|
||||
email: 'test@example.com'
|
||||
username: 'test-user'
|
||||
|
||||
- name: Validate complete chain
|
||||
run: |
|
||||
echo "=== Action Chain Results ==="
|
||||
echo "Lint Check: ${{ steps.lint-check.outcome }}"
|
||||
echo "Format Check: ${{ steps.format-check.outcome }}"
|
||||
echo "Lint Fix: ${{ steps.lint-fix.outcome }}"
|
||||
echo "Format Fix: ${{ steps.format-fix.outcome }}"
|
||||
|
||||
# Validate that all steps ran
|
||||
steps_run=0
|
||||
[[ "${{ steps.lint-check.outcome }}" != "skipped" ]] && ((steps_run++))
|
||||
[[ "${{ steps.format-check.outcome }}" != "skipped" ]] && ((steps_run++))
|
||||
[[ "${{ steps.lint-fix.outcome }}" != "skipped" ]] && ((steps_run++))
|
||||
[[ "${{ steps.format-fix.outcome }}" != "skipped" ]] && ((steps_run++))
|
||||
|
||||
if [[ $steps_run -eq 4 ]]; then
|
||||
echo "✅ Complete action chain executed successfully"
|
||||
else
|
||||
echo "❌ ERROR: Not all steps in chain executed (ran: $steps_run/4)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✅ Action chain integration validated"
|
||||
353
_tests/integration/workflows/npm-publish-test.yml
Normal file
353
_tests/integration/workflows/npm-publish-test.yml
Normal file
@@ -0,0 +1,353 @@
|
||||
---
|
||||
name: Integration Test - NPM Publish
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
paths:
|
||||
- 'npm-publish/**'
|
||||
- 'node-setup/**'
|
||||
- '_tests/integration/workflows/npm-publish-test.yml'
|
||||
|
||||
jobs:
|
||||
test-npm-publish-validation:
|
||||
name: Test Input Validation
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Create test package.json
|
||||
run: |
|
||||
mkdir -p test-package
|
||||
cd test-package
|
||||
cat > package.json <<'EOF'
|
||||
{
|
||||
"name": "@test/integration-test",
|
||||
"version": "1.0.0",
|
||||
"description": "Test package for npm-publish integration",
|
||||
"main": "index.js"
|
||||
}
|
||||
EOF
|
||||
echo "module.exports = { test: true };" > index.js
|
||||
|
||||
- name: Test valid inputs (should succeed validation)
|
||||
id: valid-test
|
||||
uses: ./npm-publish
|
||||
continue-on-error: true
|
||||
with:
|
||||
registry-url: 'https://registry.npmjs.org/'
|
||||
scope: '@test'
|
||||
package-version: '1.0.0'
|
||||
npm_token: 'test-token-12345678'
|
||||
env:
|
||||
GITHUB_WORKSPACE: ${{ github.workspace }}/test-package
|
||||
|
||||
- name: Validate success (validation only)
|
||||
run: |
|
||||
# This will fail at publish step but validation should pass
|
||||
echo "✓ Input validation passed for valid inputs"
|
||||
|
||||
- name: Test invalid registry URL
|
||||
id: invalid-registry
|
||||
uses: ./npm-publish
|
||||
continue-on-error: true
|
||||
with:
|
||||
registry-url: 'not-a-url'
|
||||
scope: '@test'
|
||||
package-version: '1.0.0'
|
||||
npm_token: 'test-token'
|
||||
env:
|
||||
GITHUB_WORKSPACE: ${{ github.workspace }}/test-package
|
||||
|
||||
- name: Verify invalid registry URL failed
|
||||
run: |
|
||||
if [[ "${{ steps.invalid-registry.outcome }}" == "success" ]]; then
|
||||
echo "❌ ERROR: Invalid registry URL should have failed"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Invalid registry URL correctly rejected"
|
||||
|
||||
- name: Test invalid version format
|
||||
id: invalid-version
|
||||
uses: ./npm-publish
|
||||
continue-on-error: true
|
||||
with:
|
||||
registry-url: 'https://registry.npmjs.org/'
|
||||
scope: '@test'
|
||||
package-version: 'not.a.version'
|
||||
npm_token: 'test-token'
|
||||
env:
|
||||
GITHUB_WORKSPACE: ${{ github.workspace }}/test-package
|
||||
|
||||
- name: Verify invalid version failed
|
||||
run: |
|
||||
if [[ "${{ steps.invalid-version.outcome }}" == "success" ]]; then
|
||||
echo "❌ ERROR: Invalid version should have failed"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Invalid version format correctly rejected"
|
||||
|
||||
- name: Test invalid scope format
|
||||
id: invalid-scope
|
||||
uses: ./npm-publish
|
||||
continue-on-error: true
|
||||
with:
|
||||
registry-url: 'https://registry.npmjs.org/'
|
||||
scope: 'invalid-scope'
|
||||
package-version: '1.0.0'
|
||||
npm_token: 'test-token'
|
||||
env:
|
||||
GITHUB_WORKSPACE: ${{ github.workspace }}/test-package
|
||||
|
||||
- name: Verify invalid scope failed
|
||||
run: |
|
||||
if [[ "${{ steps.invalid-scope.outcome }}" == "success" ]]; then
|
||||
echo "❌ ERROR: Invalid scope format should have failed"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Invalid scope format correctly rejected"
|
||||
|
||||
- name: Test missing npm token
|
||||
id: missing-token
|
||||
uses: ./npm-publish
|
||||
continue-on-error: true
|
||||
with:
|
||||
registry-url: 'https://registry.npmjs.org/'
|
||||
scope: '@test'
|
||||
package-version: '1.0.0'
|
||||
npm_token: ''
|
||||
env:
|
||||
GITHUB_WORKSPACE: ${{ github.workspace }}/test-package
|
||||
|
||||
- name: Verify missing token failed
|
||||
run: |
|
||||
if [[ "${{ steps.missing-token.outcome }}" == "success" ]]; then
|
||||
echo "❌ ERROR: Missing token should have failed"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Missing NPM token correctly rejected"
|
||||
|
||||
test-npm-publish-package-validation:
|
||||
name: Test Package Validation
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test missing package.json
|
||||
id: missing-package
|
||||
uses: ./npm-publish
|
||||
continue-on-error: true
|
||||
with:
|
||||
registry-url: 'https://registry.npmjs.org/'
|
||||
scope: '@test'
|
||||
package-version: '1.0.0'
|
||||
npm_token: 'test-token'
|
||||
|
||||
- name: Verify missing package.json failed
|
||||
run: |
|
||||
if [[ "${{ steps.missing-package.outcome }}" == "success" ]]; then
|
||||
echo "❌ ERROR: Missing package.json should have failed"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Missing package.json correctly detected"
|
||||
|
||||
- name: Create test package with version mismatch
|
||||
run: |
|
||||
mkdir -p test-mismatch
|
||||
cd test-mismatch
|
||||
cat > package.json <<'EOF'
|
||||
{
|
||||
"name": "@test/mismatch-test",
|
||||
"version": "2.0.0",
|
||||
"description": "Test version mismatch"
|
||||
}
|
||||
EOF
|
||||
|
||||
- name: Test version mismatch detection
|
||||
id: version-mismatch
|
||||
uses: ./npm-publish
|
||||
continue-on-error: true
|
||||
with:
|
||||
registry-url: 'https://registry.npmjs.org/'
|
||||
scope: '@test'
|
||||
package-version: '1.0.0'
|
||||
npm_token: 'test-token'
|
||||
env:
|
||||
GITHUB_WORKSPACE: ${{ github.workspace }}/test-mismatch
|
||||
|
||||
- name: Verify version mismatch failed
|
||||
run: |
|
||||
if [[ "${{ steps.version-mismatch.outcome }}" == "success" ]]; then
|
||||
echo "❌ ERROR: Version mismatch should have been detected"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Version mismatch correctly detected"
|
||||
|
||||
test-npm-publish-version-formats:
|
||||
name: Test Version Format Support
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test SemVer with v prefix
|
||||
run: |
|
||||
mkdir -p test-v-prefix
|
||||
cd test-v-prefix
|
||||
cat > package.json <<'EOF'
|
||||
{
|
||||
"name": "@test/v-prefix",
|
||||
"version": "1.2.3",
|
||||
"description": "Test v prefix"
|
||||
}
|
||||
EOF
|
||||
|
||||
# Should accept v1.2.3 and strip to 1.2.3
|
||||
echo "Testing v prefix version..."
|
||||
|
||||
- name: Test prerelease versions
|
||||
run: |
|
||||
mkdir -p test-prerelease
|
||||
cd test-prerelease
|
||||
cat > package.json <<'EOF'
|
||||
{
|
||||
"name": "@test/prerelease",
|
||||
"version": "1.0.0-alpha.1",
|
||||
"description": "Test prerelease"
|
||||
}
|
||||
EOF
|
||||
|
||||
echo "Testing prerelease version format..."
|
||||
|
||||
- name: Test build metadata
|
||||
run: |
|
||||
mkdir -p test-build
|
||||
cd test-build
|
||||
cat > package.json <<'EOF'
|
||||
{
|
||||
"name": "@test/build-meta",
|
||||
"version": "1.0.0+build.123",
|
||||
"description": "Test build metadata"
|
||||
}
|
||||
EOF
|
||||
|
||||
echo "Testing build metadata format..."
|
||||
|
||||
test-npm-publish-outputs:
|
||||
name: Test Output Values
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Create test package
|
||||
run: |
|
||||
mkdir -p test-outputs
|
||||
cd test-outputs
|
||||
cat > package.json <<'EOF'
|
||||
{
|
||||
"name": "@test/outputs-test",
|
||||
"version": "1.5.0",
|
||||
"description": "Test outputs"
|
||||
}
|
||||
EOF
|
||||
|
||||
- name: Run npm-publish (validation only)
|
||||
id: publish-outputs
|
||||
uses: ./npm-publish
|
||||
continue-on-error: true
|
||||
with:
|
||||
registry-url: 'https://npm.custom.com/'
|
||||
scope: '@custom-scope'
|
||||
package-version: '1.5.0'
|
||||
npm_token: 'test-token-outputs'
|
||||
env:
|
||||
GITHUB_WORKSPACE: ${{ github.workspace }}/test-outputs
|
||||
|
||||
- name: Verify outputs
|
||||
run: |
|
||||
registry="${{ steps.publish-outputs.outputs.registry-url }}"
|
||||
scope="${{ steps.publish-outputs.outputs.scope }}"
|
||||
version="${{ steps.publish-outputs.outputs.package-version }}"
|
||||
|
||||
echo "Registry URL: $registry"
|
||||
echo "Scope: $scope"
|
||||
echo "Version: $version"
|
||||
|
||||
# Verify output values match inputs
|
||||
if [[ "$registry" != "https://npm.custom.com/" ]]; then
|
||||
echo "❌ ERROR: Registry URL output mismatch"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$scope" != "@custom-scope" ]]; then
|
||||
echo "❌ ERROR: Scope output mismatch"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$version" != "1.5.0" ]]; then
|
||||
echo "❌ ERROR: Version output mismatch"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✓ All outputs match expected values"
|
||||
|
||||
test-npm-publish-secret-masking:
|
||||
name: Test Secret Masking
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Create test package
|
||||
run: |
|
||||
mkdir -p test-secrets
|
||||
cd test-secrets
|
||||
cat > package.json <<'EOF'
|
||||
{
|
||||
"name": "@test/secrets-test",
|
||||
"version": "1.0.0"
|
||||
}
|
||||
EOF
|
||||
|
||||
- name: Test that token gets masked
|
||||
id: test-masking
|
||||
uses: ./npm-publish
|
||||
continue-on-error: true
|
||||
with:
|
||||
registry-url: 'https://registry.npmjs.org/'
|
||||
scope: '@test'
|
||||
package-version: '1.0.0'
|
||||
npm_token: 'super-secret-token-12345'
|
||||
env:
|
||||
GITHUB_WORKSPACE: ${{ github.workspace }}/test-secrets
|
||||
|
||||
- name: Verify token is not in logs
|
||||
run: |
|
||||
echo "✓ Token should be masked in GitHub Actions logs"
|
||||
echo "✓ Secret masking test completed"
|
||||
|
||||
integration-test-summary:
|
||||
name: Integration Test Summary
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- test-npm-publish-validation
|
||||
- test-npm-publish-package-validation
|
||||
- test-npm-publish-version-formats
|
||||
- test-npm-publish-outputs
|
||||
- test-npm-publish-secret-masking
|
||||
steps:
|
||||
- name: Summary
|
||||
run: |
|
||||
echo "=========================================="
|
||||
echo "NPM Publish Integration Tests - PASSED"
|
||||
echo "=========================================="
|
||||
echo ""
|
||||
echo "✓ Input validation tests"
|
||||
echo "✓ Package validation tests"
|
||||
echo "✓ Version format tests"
|
||||
echo "✓ Output verification tests"
|
||||
echo "✓ Secret masking tests"
|
||||
echo ""
|
||||
echo "All npm-publish integration tests completed successfully!"
|
||||
434
_tests/integration/workflows/pre-commit-test.yml
Normal file
434
_tests/integration/workflows/pre-commit-test.yml
Normal file
@@ -0,0 +1,434 @@
|
||||
---
|
||||
name: Integration Test - Pre-commit
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
paths:
|
||||
- 'pre-commit/**'
|
||||
- 'validate-inputs/**'
|
||||
- '_tests/integration/workflows/pre-commit-test.yml'
|
||||
|
||||
jobs:
|
||||
test-pre-commit-validation:
|
||||
name: Test Input Validation
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test with default inputs (should pass validation)
|
||||
id: default-inputs
|
||||
uses: ./pre-commit
|
||||
continue-on-error: true
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Verify validation passed
|
||||
run: |
|
||||
echo "✓ Default inputs validation completed"
|
||||
|
||||
- name: Test with custom config file
|
||||
id: custom-config
|
||||
uses: ./pre-commit
|
||||
continue-on-error: true
|
||||
with:
|
||||
pre-commit-config: '.pre-commit-config.yaml'
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Verify custom config accepted
|
||||
run: |
|
||||
echo "✓ Custom config file accepted"
|
||||
|
||||
- name: Test with base branch
|
||||
id: with-base-branch
|
||||
uses: ./pre-commit
|
||||
continue-on-error: true
|
||||
with:
|
||||
base-branch: 'main'
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Verify base branch accepted
|
||||
run: |
|
||||
echo "✓ Base branch parameter accepted"
|
||||
|
||||
test-pre-commit-git-config:
|
||||
name: Test Git Configuration
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test custom git user
|
||||
run: |
|
||||
# Simulate set-git-config step
|
||||
git config user.name "Test User"
|
||||
git config user.email "test@example.com"
|
||||
|
||||
# Verify configuration
|
||||
user_name=$(git config user.name)
|
||||
user_email=$(git config user.email)
|
||||
|
||||
if [[ "$user_name" != "Test User" ]]; then
|
||||
echo "❌ ERROR: Git user name not set correctly"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$user_email" != "test@example.com" ]]; then
|
||||
echo "❌ ERROR: Git user email not set correctly"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✓ Git configuration works correctly"
|
||||
|
||||
- name: Test default git user
|
||||
run: |
|
||||
# Simulate default configuration
|
||||
git config user.name "GitHub Actions"
|
||||
git config user.email "github-actions@github.com"
|
||||
|
||||
# Verify default configuration
|
||||
user_name=$(git config user.name)
|
||||
user_email=$(git config user.email)
|
||||
|
||||
if [[ "$user_name" != "GitHub Actions" ]]; then
|
||||
echo "❌ ERROR: Default git user name not set correctly"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$user_email" != "github-actions@github.com" ]]; then
|
||||
echo "❌ ERROR: Default git user email not set correctly"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✓ Default git configuration works correctly"
|
||||
|
||||
test-pre-commit-option-generation:
|
||||
name: Test Option Generation
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test all-files option (no base branch)
|
||||
run: |
|
||||
BASE_BRANCH=""
|
||||
if [ -z "$BASE_BRANCH" ]; then
|
||||
option="--all-files"
|
||||
else
|
||||
option="--from-ref $BASE_BRANCH --to-ref HEAD"
|
||||
fi
|
||||
|
||||
if [[ "$option" != "--all-files" ]]; then
|
||||
echo "❌ ERROR: Should use --all-files when no base branch"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✓ Correctly generates --all-files option"
|
||||
|
||||
- name: Test diff option (with base branch)
|
||||
run: |
|
||||
BASE_BRANCH="main"
|
||||
if [ -z "$BASE_BRANCH" ]; then
|
||||
option="--all-files"
|
||||
else
|
||||
option="--from-ref $BASE_BRANCH --to-ref HEAD"
|
||||
fi
|
||||
|
||||
expected="--from-ref main --to-ref HEAD"
|
||||
if [[ "$option" != "$expected" ]]; then
|
||||
echo "❌ ERROR: Option mismatch. Expected: $expected, Got: $option"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✓ Correctly generates diff option with base branch"
|
||||
|
||||
test-pre-commit-config-file-detection:
|
||||
name: Test Config File Detection
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Verify default config exists
|
||||
run: |
|
||||
if [[ -f ".pre-commit-config.yaml" ]]; then
|
||||
echo "✓ Default .pre-commit-config.yaml found"
|
||||
else
|
||||
echo "⚠️ Default config not found (may use repo default)"
|
||||
fi
|
||||
|
||||
- name: Test custom config path validation
|
||||
run: |
|
||||
CONFIG_FILE="custom-pre-commit-config.yaml"
|
||||
|
||||
# In real action, this would be validated
|
||||
if [[ ! -f "$CONFIG_FILE" ]]; then
|
||||
echo "✓ Custom config file validation would fail (expected)"
|
||||
else
|
||||
echo "✓ Custom config file exists"
|
||||
fi
|
||||
|
||||
test-pre-commit-hook-execution:
|
||||
name: Test Hook Execution Scenarios
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test pre-commit installed
|
||||
run: |
|
||||
if command -v pre-commit >/dev/null 2>&1; then
|
||||
echo "✓ pre-commit is installed"
|
||||
pre-commit --version
|
||||
else
|
||||
echo "⚠️ pre-commit not installed (would be installed by action)"
|
||||
fi
|
||||
|
||||
- name: Create test file with issues
|
||||
run: |
|
||||
mkdir -p test-pre-commit
|
||||
cd test-pre-commit
|
||||
|
||||
# Create a file with trailing whitespace
|
||||
echo "Line with trailing spaces " > test.txt
|
||||
echo "Line without issues" >> test.txt
|
||||
|
||||
# Create a minimal .pre-commit-config.yaml
|
||||
cat > .pre-commit-config.yaml <<'EOF'
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.5.0
|
||||
hooks:
|
||||
- id: trailing-whitespace
|
||||
- id: end-of-file-fixer
|
||||
EOF
|
||||
|
||||
echo "✓ Test environment created"
|
||||
|
||||
- name: Test hook detection of issues
|
||||
run: |
|
||||
cd test-pre-commit
|
||||
|
||||
# Check if trailing whitespace exists
|
||||
if grep -q " $" test.txt; then
|
||||
echo "✓ Test file has trailing whitespace (as expected)"
|
||||
else
|
||||
echo "❌ ERROR: Test file should have trailing whitespace"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
test-pre-commit-outputs:
|
||||
name: Test Output Values
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test hooks_passed output
|
||||
run: |
|
||||
# Simulate successful hooks
|
||||
HOOKS_OUTCOME="success"
|
||||
|
||||
if [[ "$HOOKS_OUTCOME" == "success" ]]; then
|
||||
hooks_passed="true"
|
||||
else
|
||||
hooks_passed="false"
|
||||
fi
|
||||
|
||||
if [[ "$hooks_passed" != "true" ]]; then
|
||||
echo "❌ ERROR: hooks_passed should be true for success"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✓ hooks_passed output correct for success"
|
||||
|
||||
- name: Test hooks_passed output on failure
|
||||
run: |
|
||||
# Simulate failed hooks
|
||||
HOOKS_OUTCOME="failure"
|
||||
|
||||
if [[ "$HOOKS_OUTCOME" == "success" ]]; then
|
||||
hooks_passed="true"
|
||||
else
|
||||
hooks_passed="false"
|
||||
fi
|
||||
|
||||
if [[ "$hooks_passed" != "false" ]]; then
|
||||
echo "❌ ERROR: hooks_passed should be false for failure"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✓ hooks_passed output correct for failure"
|
||||
|
||||
- name: Test files_changed output
|
||||
run: |
|
||||
# Simulate git status check
|
||||
echo "test.txt" > /tmp/test-changes.txt
|
||||
|
||||
if [[ -s /tmp/test-changes.txt ]]; then
|
||||
files_changed="true"
|
||||
else
|
||||
files_changed="false"
|
||||
fi
|
||||
|
||||
if [[ "$files_changed" != "true" ]]; then
|
||||
echo "❌ ERROR: files_changed should be true when files exist"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✓ files_changed output correct"
|
||||
|
||||
test-pre-commit-uv-integration:
|
||||
name: Test UV Integration
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test PRE_COMMIT_USE_UV environment variable
|
||||
run: |
|
||||
PRE_COMMIT_USE_UV='1'
|
||||
|
||||
if [[ "$PRE_COMMIT_USE_UV" != "1" ]]; then
|
||||
echo "❌ ERROR: PRE_COMMIT_USE_UV should be set to 1"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✓ PRE_COMMIT_USE_UV correctly set"
|
||||
echo "✓ pre-commit will use UV for faster installations"
|
||||
|
||||
test-pre-commit-workflow-scenarios:
|
||||
name: Test Workflow Scenarios
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test full workflow (all files)
|
||||
run: |
|
||||
echo "Simulating full workflow with --all-files..."
|
||||
|
||||
# 1. Validate inputs
|
||||
CONFIG_FILE=".pre-commit-config.yaml"
|
||||
echo "✓ Step 1: Validate inputs"
|
||||
|
||||
# 2. Set git config
|
||||
git config user.name "Test User"
|
||||
git config user.email "test@example.com"
|
||||
echo "✓ Step 2: Set git config"
|
||||
|
||||
# 3. Determine option
|
||||
BASE_BRANCH=""
|
||||
if [ -z "$BASE_BRANCH" ]; then
|
||||
OPTION="--all-files"
|
||||
else
|
||||
OPTION="--from-ref $BASE_BRANCH --to-ref HEAD"
|
||||
fi
|
||||
echo "✓ Step 3: Option set to: $OPTION"
|
||||
|
||||
# 4. Run pre-commit (simulated)
|
||||
echo "✓ Step 4: Would run: pre-commit run $OPTION"
|
||||
|
||||
# 5. Check for changes
|
||||
echo "✓ Step 5: Check for changes to commit"
|
||||
|
||||
echo "✓ Full workflow simulation completed"
|
||||
|
||||
- name: Test diff workflow (with base branch)
|
||||
run: |
|
||||
echo "Simulating diff workflow with base branch..."
|
||||
|
||||
# 1. Validate inputs
|
||||
CONFIG_FILE=".pre-commit-config.yaml"
|
||||
BASE_BRANCH="main"
|
||||
echo "✓ Step 1: Validate inputs (base-branch: $BASE_BRANCH)"
|
||||
|
||||
# 2. Set git config
|
||||
git config user.name "GitHub Actions"
|
||||
git config user.email "github-actions@github.com"
|
||||
echo "✓ Step 2: Set git config"
|
||||
|
||||
# 3. Determine option
|
||||
if [ -z "$BASE_BRANCH" ]; then
|
||||
OPTION="--all-files"
|
||||
else
|
||||
OPTION="--from-ref $BASE_BRANCH --to-ref HEAD"
|
||||
fi
|
||||
echo "✓ Step 3: Option set to: $OPTION"
|
||||
|
||||
# 4. Run pre-commit (simulated)
|
||||
echo "✓ Step 4: Would run: pre-commit run $OPTION"
|
||||
|
||||
# 5. Check for changes
|
||||
echo "✓ Step 5: Check for changes to commit"
|
||||
|
||||
echo "✓ Diff workflow simulation completed"
|
||||
|
||||
test-pre-commit-autofix-behavior:
|
||||
name: Test Autofix Behavior
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test autofix commit message
|
||||
run: |
|
||||
COMMIT_MESSAGE="style(pre-commit): autofix"
|
||||
|
||||
if [[ "$COMMIT_MESSAGE" != "style(pre-commit): autofix" ]]; then
|
||||
echo "❌ ERROR: Incorrect commit message"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✓ Autofix commit message correct"
|
||||
|
||||
- name: Test git add options
|
||||
run: |
|
||||
ADD_OPTIONS="-u"
|
||||
|
||||
if [[ "$ADD_OPTIONS" != "-u" ]]; then
|
||||
echo "❌ ERROR: Incorrect add options"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✓ Git add options correct (-u for tracked files)"
|
||||
|
||||
- name: Test autofix always runs
|
||||
run: |
|
||||
# Simulate pre-commit failure
|
||||
PRECOMMIT_FAILED=true
|
||||
|
||||
# Autofix should still run (if: always())
|
||||
echo "✓ Autofix runs even when pre-commit fails"
|
||||
|
||||
integration-test-summary:
|
||||
name: Integration Test Summary
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- test-pre-commit-validation
|
||||
- test-pre-commit-git-config
|
||||
- test-pre-commit-option-generation
|
||||
- test-pre-commit-config-file-detection
|
||||
- test-pre-commit-hook-execution
|
||||
- test-pre-commit-outputs
|
||||
- test-pre-commit-uv-integration
|
||||
- test-pre-commit-workflow-scenarios
|
||||
- test-pre-commit-autofix-behavior
|
||||
steps:
|
||||
- name: Summary
|
||||
run: |
|
||||
echo "=========================================="
|
||||
echo "Pre-commit Integration Tests - PASSED"
|
||||
echo "=========================================="
|
||||
echo ""
|
||||
echo "✓ Input validation tests"
|
||||
echo "✓ Git configuration tests"
|
||||
echo "✓ Option generation tests"
|
||||
echo "✓ Config file detection tests"
|
||||
echo "✓ Hook execution tests"
|
||||
echo "✓ Output verification tests"
|
||||
echo "✓ UV integration tests"
|
||||
echo "✓ Workflow scenario tests"
|
||||
echo "✓ Autofix behavior tests"
|
||||
echo ""
|
||||
echo "All pre-commit integration tests completed successfully!"
|
||||
414
_tests/integration/workflows/version-validator-test.yml
Normal file
414
_tests/integration/workflows/version-validator-test.yml
Normal file
@@ -0,0 +1,414 @@
|
||||
---
|
||||
name: Integration Test - Version Validator
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
paths:
|
||||
- 'version-validator/**'
|
||||
- '_tests/integration/workflows/version-validator-test.yml'
|
||||
|
||||
jobs:
|
||||
test-version-validator-input-validation:
|
||||
name: Test Input Validation
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test empty version (should fail)
|
||||
run: |
|
||||
VERSION=""
|
||||
if [[ -z "$VERSION" ]]; then
|
||||
echo "✓ Empty version correctly rejected"
|
||||
else
|
||||
echo "❌ ERROR: Empty version should be rejected"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Test dangerous characters in version
|
||||
run: |
|
||||
for version in "1.2.3;rm -rf /" "1.0&&echo" "1.0|cat" '1.0`cmd`' "1.0\$variable"; do
|
||||
if [[ "$version" == *";"* ]] || [[ "$version" == *"&&"* ]] || \
|
||||
[[ "$version" == *"|"* ]] || [[ "$version" == *"\`"* ]] || [[ "$version" == *"\$"* ]]; then
|
||||
echo "✓ Dangerous version '$version' correctly detected"
|
||||
else
|
||||
echo "❌ ERROR: Should detect dangerous characters in: $version"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
- name: Test valid version strings
|
||||
run: |
|
||||
for version in "1.2.3" "v1.0.0" "2.0.0-alpha" "1.0.0+build"; do
|
||||
if [[ "$version" == *";"* ]] || [[ "$version" == *"&&"* ]] || \
|
||||
[[ "$version" == *"|"* ]] || [[ "$version" == *"\`"* ]] || [[ "$version" == *"\$"* ]]; then
|
||||
echo "❌ ERROR: Valid version should not be rejected: $version"
|
||||
exit 1
|
||||
else
|
||||
echo "✓ Valid version '$version' accepted"
|
||||
fi
|
||||
done
|
||||
|
||||
test-version-validator-regex-validation:
|
||||
name: Test Regex Validation
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test empty regex (should fail)
|
||||
run: |
|
||||
REGEX=""
|
||||
if [[ -z "$REGEX" ]]; then
|
||||
echo "✓ Empty regex correctly rejected"
|
||||
else
|
||||
echo "❌ ERROR: Empty regex should be rejected"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Test potential ReDoS patterns
|
||||
run: |
|
||||
for regex in ".*.*" ".+.+"; do
|
||||
if [[ "$regex" == *".*.*"* ]] || [[ "$regex" == *".+.+"* ]]; then
|
||||
echo "✓ ReDoS pattern '$regex' detected (would show warning)"
|
||||
else
|
||||
echo "❌ ERROR: Should detect ReDoS pattern: $regex"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
- name: Test safe regex patterns
|
||||
run: |
|
||||
for regex in "^[0-9]+\.[0-9]+$" "^v?[0-9]+"; do
|
||||
if [[ "$regex" == *".*.*"* ]] || [[ "$regex" == *".+.+"* ]]; then
|
||||
echo "❌ ERROR: Safe regex should not be flagged: $regex"
|
||||
exit 1
|
||||
else
|
||||
echo "✓ Safe regex '$regex' accepted"
|
||||
fi
|
||||
done
|
||||
|
||||
test-version-validator-language-validation:
|
||||
name: Test Language Parameter Validation
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test dangerous characters in language
|
||||
run: |
|
||||
for lang in "node;rm" "python&&cmd" "ruby|cat"; do
|
||||
if [[ "$lang" == *";"* ]] || [[ "$lang" == *"&&"* ]] || [[ "$lang" == *"|"* ]]; then
|
||||
echo "✓ Dangerous language parameter '$lang' correctly detected"
|
||||
else
|
||||
echo "❌ ERROR: Should detect dangerous characters in: $lang"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
- name: Test valid language parameters
|
||||
run: |
|
||||
for lang in "node" "python" "ruby" "go" "java"; do
|
||||
if [[ "$lang" == *";"* ]] || [[ "$lang" == *"&&"* ]] || [[ "$lang" == *"|"* ]]; then
|
||||
echo "❌ ERROR: Valid language should not be rejected: $lang"
|
||||
exit 1
|
||||
else
|
||||
echo "✓ Valid language '$lang' accepted"
|
||||
fi
|
||||
done
|
||||
|
||||
test-version-validator-version-cleaning:
|
||||
name: Test Version Cleaning
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test v prefix removal
|
||||
run: |
|
||||
for input in "v1.2.3" "V2.0.0"; do
|
||||
cleaned=$(echo "$input" | sed -e 's/^[vV]//')
|
||||
if [[ "$cleaned" == "1.2.3" ]] || [[ "$cleaned" == "2.0.0" ]]; then
|
||||
echo "✓ v prefix removed from '$input' -> '$cleaned'"
|
||||
else
|
||||
echo "❌ ERROR: Failed to clean '$input', got '$cleaned'"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
- name: Test whitespace removal
|
||||
run: |
|
||||
input=" 1.2.3 "
|
||||
cleaned=$(echo "$input" | tr -d ' ')
|
||||
if [[ "$cleaned" == "1.2.3" ]]; then
|
||||
echo "✓ Whitespace removed: '$input' -> '$cleaned'"
|
||||
else
|
||||
echo "❌ ERROR: Failed to remove whitespace"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Test newline removal
|
||||
run: |
|
||||
input=$'1.2.3\n'
|
||||
cleaned=$(echo "$input" | tr -d '\n' | tr -d '\r')
|
||||
if [[ "$cleaned" == "1.2.3" ]]; then
|
||||
echo "✓ Newlines removed"
|
||||
else
|
||||
echo "❌ ERROR: Failed to remove newlines"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
test-version-validator-regex-matching:
|
||||
name: Test Regex Matching
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test default SemVer regex
|
||||
run: |
|
||||
REGEX='^[0-9]+\.[0-9]+(\.[0-9]+)?(-[a-zA-Z0-9.-]+)?(\+[a-zA-Z0-9.-]+)?$'
|
||||
|
||||
for version in "1.0.0" "1.2" "1.0.0-alpha" "1.0.0+build" "2.1.0-rc.1+build.123"; do
|
||||
if [[ $version =~ $REGEX ]]; then
|
||||
echo "✓ Version '$version' matches SemVer regex"
|
||||
else
|
||||
echo "❌ ERROR: Version '$version' should match SemVer"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
- name: Test invalid versions against SemVer regex
|
||||
run: |
|
||||
REGEX='^[0-9]+\.[0-9]+(\.[0-9]+)?(-[a-zA-Z0-9.-]+)?(\+[a-zA-Z0-9.-]+)?$'
|
||||
|
||||
for version in "abc" "1.a.b" "not.a.version"; do
|
||||
if [[ $version =~ $REGEX ]]; then
|
||||
echo "❌ ERROR: Invalid version '$version' should not match"
|
||||
exit 1
|
||||
else
|
||||
echo "✓ Invalid version '$version' correctly rejected"
|
||||
fi
|
||||
done
|
||||
|
||||
- name: Test custom strict regex
|
||||
run: |
|
||||
REGEX='^[0-9]+\.[0-9]+\.[0-9]+$'
|
||||
|
||||
# Should match
|
||||
for version in "1.0.0" "2.5.10"; do
|
||||
if [[ $version =~ $REGEX ]]; then
|
||||
echo "✓ Version '$version' matches strict regex"
|
||||
else
|
||||
echo "❌ ERROR: Version '$version' should match strict regex"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
# Should not match
|
||||
for version in "1.0" "1.0.0-alpha"; do
|
||||
if [[ $version =~ $REGEX ]]; then
|
||||
echo "❌ ERROR: Version '$version' should not match strict regex"
|
||||
exit 1
|
||||
else
|
||||
echo "✓ Version '$version' correctly rejected by strict regex"
|
||||
fi
|
||||
done
|
||||
|
||||
test-version-validator-outputs:
|
||||
name: Test Output Generation
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test valid version outputs (simulation)
|
||||
run: |
|
||||
VERSION="v1.2.3"
|
||||
REGEX='^[0-9]+\.[0-9]+\.[0-9]+$'
|
||||
|
||||
# Clean version
|
||||
cleaned=$(echo "$VERSION" | sed -e 's/^[vV]//' | tr -d ' ' | tr -d '\n' | tr -d '\r')
|
||||
|
||||
# Validate
|
||||
if [[ $cleaned =~ $REGEX ]]; then
|
||||
is_valid="true"
|
||||
validated_version="$cleaned"
|
||||
error_message=""
|
||||
|
||||
echo "is_valid=$is_valid"
|
||||
echo "validated_version=$validated_version"
|
||||
echo "error_message=$error_message"
|
||||
|
||||
if [[ "$is_valid" != "true" ]]; then
|
||||
echo "❌ ERROR: Should be valid"
|
||||
exit 1
|
||||
fi
|
||||
if [[ "$validated_version" != "1.2.3" ]]; then
|
||||
echo "❌ ERROR: Wrong validated version"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Valid version outputs correct"
|
||||
fi
|
||||
|
||||
- name: Test invalid version outputs (simulation)
|
||||
run: |
|
||||
VERSION="not.a.version"
|
||||
REGEX='^[0-9]+\.[0-9]+\.[0-9]+$'
|
||||
LANGUAGE="test"
|
||||
|
||||
# Clean version
|
||||
cleaned=$(echo "$VERSION" | sed -e 's/^[vV]//' | tr -d ' ' | tr -d '\n' | tr -d '\r')
|
||||
|
||||
# Validate
|
||||
if [[ $cleaned =~ $REGEX ]]; then
|
||||
is_valid="true"
|
||||
else
|
||||
is_valid="false"
|
||||
validated_version=""
|
||||
error_msg="Invalid $LANGUAGE version format: '$VERSION' (cleaned: '$cleaned'). Expected pattern: $REGEX"
|
||||
error_message=$(echo "$error_msg" | tr -d '\n\r')
|
||||
|
||||
echo "is_valid=$is_valid"
|
||||
echo "validated_version=$validated_version"
|
||||
echo "error_message=$error_message"
|
||||
|
||||
if [[ "$is_valid" != "false" ]]; then
|
||||
echo "❌ ERROR: Should be invalid"
|
||||
exit 1
|
||||
fi
|
||||
if [[ -n "$validated_version" ]]; then
|
||||
echo "❌ ERROR: Validated version should be empty"
|
||||
exit 1
|
||||
fi
|
||||
if [[ -z "$error_message" ]]; then
|
||||
echo "❌ ERROR: Error message should not be empty"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Invalid version outputs correct"
|
||||
fi
|
||||
|
||||
test-version-validator-sanitization:
|
||||
name: Test Output Sanitization
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test error message sanitization
|
||||
run: |
|
||||
error_msg=$'Error message\nwith newlines'
|
||||
|
||||
sanitized=$(echo "$error_msg" | tr -d '\n\r')
|
||||
|
||||
if [[ "$sanitized" == *$'\n'* ]] || [[ "$sanitized" == *$'\r'* ]]; then
|
||||
echo "❌ ERROR: Newlines not removed from error message"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Error message sanitization works"
|
||||
|
||||
- name: Test validated version sanitization
|
||||
run: |
|
||||
VERSION=$'1.2.3\n'
|
||||
cleaned=$(echo "$VERSION" | sed -e 's/^[vV]//' | tr -d ' ' | tr -d '\n' | tr -d '\r')
|
||||
|
||||
if [[ "$cleaned" == *$'\n'* ]] || [[ "$cleaned" == *$'\r'* ]]; then
|
||||
echo "❌ ERROR: Newlines not removed from validated version"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Validated version sanitization works"
|
||||
|
||||
test-version-validator-real-world-scenarios:
|
||||
name: Test Real World Scenarios
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Test Node.js version validation
|
||||
run: |
|
||||
REGEX='^[0-9]+(\.[0-9]+(\.[0-9]+)?)?$'
|
||||
|
||||
for version in "20" "20.9" "20.9.0" "18.17.1"; do
|
||||
cleaned=$(echo "$version" | sed -e 's/^[vV]//')
|
||||
if [[ $cleaned =~ $REGEX ]]; then
|
||||
echo "✓ Node.js version '$version' valid"
|
||||
else
|
||||
echo "❌ ERROR: Node.js version should be valid"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
- name: Test Python version validation
|
||||
run: |
|
||||
REGEX='^[0-9]+\.[0-9]+(\.[0-9]+)?$'
|
||||
|
||||
for version in "3.11" "3.11.5" "3.12.0"; do
|
||||
cleaned=$(echo "$version" | sed -e 's/^[vV]//')
|
||||
if [[ $cleaned =~ $REGEX ]]; then
|
||||
echo "✓ Python version '$version' valid"
|
||||
else
|
||||
echo "❌ ERROR: Python version should be valid"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
- name: Test CalVer validation
|
||||
run: |
|
||||
REGEX='^[0-9]{4}\.[0-9]{1,2}(\.[0-9]+)?$'
|
||||
|
||||
for version in "2024.3" "2024.3.15" "2024.10.1"; do
|
||||
cleaned=$(echo "$version" | sed -e 's/^[vV]//')
|
||||
if [[ $cleaned =~ $REGEX ]]; then
|
||||
echo "✓ CalVer version '$version' valid"
|
||||
else
|
||||
echo "❌ ERROR: CalVer version should be valid"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
- name: Test Docker tag validation
|
||||
run: |
|
||||
REGEX='^[a-z0-9][a-z0-9._-]*$'
|
||||
|
||||
for tag in "latest" "v1.2.3" "stable-alpine" "2024.10.15"; do
|
||||
cleaned=$(echo "$tag" | sed -e 's/^[vV]//')
|
||||
# Note: Docker tags are case-insensitive, so convert to lowercase
|
||||
cleaned=$(echo "$cleaned" | tr '[:upper:]' '[:lower:]')
|
||||
if [[ $cleaned =~ $REGEX ]]; then
|
||||
echo "✓ Docker tag '$tag' valid"
|
||||
else
|
||||
echo "❌ ERROR: Docker tag should be valid: $tag"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
integration-test-summary:
|
||||
name: Integration Test Summary
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- test-version-validator-input-validation
|
||||
- test-version-validator-regex-validation
|
||||
- test-version-validator-language-validation
|
||||
- test-version-validator-version-cleaning
|
||||
- test-version-validator-regex-matching
|
||||
- test-version-validator-outputs
|
||||
- test-version-validator-sanitization
|
||||
- test-version-validator-real-world-scenarios
|
||||
steps:
|
||||
- name: Summary
|
||||
run: |
|
||||
echo "=========================================="
|
||||
echo "Version Validator Integration Tests - PASSED"
|
||||
echo "=========================================="
|
||||
echo ""
|
||||
echo "✓ Input validation tests"
|
||||
echo "✓ Regex validation tests"
|
||||
echo "✓ Language validation tests"
|
||||
echo "✓ Version cleaning tests"
|
||||
echo "✓ Regex matching tests"
|
||||
echo "✓ Output generation tests"
|
||||
echo "✓ Sanitization tests"
|
||||
echo "✓ Real world scenario tests"
|
||||
echo ""
|
||||
echo "All version-validator integration tests completed successfully!"
|
||||
757
_tests/run-tests.sh
Executable file
757
_tests/run-tests.sh
Executable file
@@ -0,0 +1,757 @@
|
||||
#!/usr/bin/env bash
|
||||
# GitHub Actions Testing Framework - Main Test Runner
|
||||
# Executes tests across all levels: unit, integration, and e2e
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Script directory and test root
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
TEST_ROOT="$SCRIPT_DIR"
|
||||
|
||||
# Source framework utilities
|
||||
# shellcheck source=_tests/framework/setup.sh
|
||||
source "${TEST_ROOT}/framework/setup.sh"
|
||||
|
||||
# Configuration
|
||||
DEFAULT_TEST_TYPE="all"
|
||||
DEFAULT_ACTION_FILTER=""
|
||||
PARALLEL_JOBS=4
|
||||
COVERAGE_ENABLED=true
|
||||
REPORT_FORMAT="console"
|
||||
|
||||
# Usage information
|
||||
usage() {
|
||||
cat <<EOF
|
||||
GitHub Actions Testing Framework
|
||||
|
||||
Usage: $0 [OPTIONS] [ACTION_NAME...]
|
||||
|
||||
OPTIONS:
|
||||
-t, --type TYPE Test type: unit, integration, e2e, all (default: all)
|
||||
-a, --action ACTION Filter by specific action name
|
||||
-j, --jobs JOBS Number of parallel jobs (default: 4)
|
||||
-c, --coverage Enable coverage reporting (default: true)
|
||||
--no-coverage Disable coverage reporting
|
||||
-f, --format FORMAT Report format: console, json, junit, sarif (default: console)
|
||||
-v, --verbose Enable verbose output
|
||||
-h, --help Show this help message
|
||||
|
||||
EXAMPLES:
|
||||
$0 # Run all tests for all actions
|
||||
$0 -t unit # Run only unit tests
|
||||
$0 -a node-setup # Test only node-setup action
|
||||
$0 -t integration docker-build # Integration tests for docker-build
|
||||
$0 --format json --coverage # Full tests with JSON output and coverage
|
||||
$0 --format sarif # Generate SARIF report for security scanning
|
||||
|
||||
TEST TYPES:
|
||||
unit - Fast unit tests for action validation and logic
|
||||
integration - Integration tests using nektos/act or workflows
|
||||
e2e - End-to-end tests with complete workflows
|
||||
all - All test types (default)
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
# Parse command line arguments
|
||||
parse_args() {
|
||||
local test_type="$DEFAULT_TEST_TYPE"
|
||||
local action_filter="$DEFAULT_ACTION_FILTER"
|
||||
local actions=()
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
-t | --type)
|
||||
if [[ $# -lt 2 ]]; then
|
||||
echo "Error: $1 requires an argument" >&2
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
test_type="$2"
|
||||
shift 2
|
||||
;;
|
||||
-a | --action)
|
||||
if [[ $# -lt 2 ]]; then
|
||||
echo "Error: $1 requires an argument" >&2
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
action_filter="$2"
|
||||
shift 2
|
||||
;;
|
||||
-j | --jobs)
|
||||
if [[ $# -lt 2 ]]; then
|
||||
echo "Error: $1 requires an argument" >&2
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
PARALLEL_JOBS="$2"
|
||||
shift 2
|
||||
;;
|
||||
-c | --coverage)
|
||||
COVERAGE_ENABLED=true
|
||||
shift
|
||||
;;
|
||||
--no-coverage)
|
||||
COVERAGE_ENABLED=false
|
||||
shift
|
||||
;;
|
||||
-f | --format)
|
||||
if [[ $# -lt 2 ]]; then
|
||||
echo "Error: $1 requires an argument" >&2
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
REPORT_FORMAT="$2"
|
||||
shift 2
|
||||
;;
|
||||
-v | --verbose)
|
||||
set -x
|
||||
shift
|
||||
;;
|
||||
-h | --help)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
--)
|
||||
shift
|
||||
actions+=("$@")
|
||||
break
|
||||
;;
|
||||
-*)
|
||||
log_error "Unknown option: $1"
|
||||
usage
|
||||
exit 1
|
||||
;;
|
||||
*)
|
||||
actions+=("$1")
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Export for use in other functions
|
||||
export TEST_TYPE="$test_type"
|
||||
export ACTION_FILTER="$action_filter"
|
||||
TARGET_ACTIONS=("${actions[@]+"${actions[@]}"}")
|
||||
}
|
||||
|
||||
# Discover available actions
|
||||
discover_actions() {
|
||||
local actions=()
|
||||
|
||||
if [[ ${#TARGET_ACTIONS[@]} -gt 0 ]]; then
|
||||
# Use provided actions
|
||||
actions=("${TARGET_ACTIONS[@]}")
|
||||
elif [[ -n $ACTION_FILTER ]]; then
|
||||
# Filter by pattern
|
||||
while IFS= read -r action_dir; do
|
||||
local action_name
|
||||
action_name=$(basename "$action_dir")
|
||||
if [[ $action_name == *"$ACTION_FILTER"* ]]; then
|
||||
actions+=("$action_name")
|
||||
fi
|
||||
done < <(find "${TEST_ROOT}/.." -mindepth 2 -maxdepth 2 -type f -name "action.yml" -exec dirname {} \; | sort)
|
||||
else
|
||||
# All actions
|
||||
while IFS= read -r action_dir; do
|
||||
local action_name
|
||||
action_name=$(basename "$action_dir")
|
||||
actions+=("$action_name")
|
||||
done < <(find "${TEST_ROOT}/.." -mindepth 2 -maxdepth 2 -type f -name "action.yml" -exec dirname {} \; | sort)
|
||||
fi
|
||||
|
||||
log_info "Discovered ${#actions[@]} actions to test: ${actions[*]}"
|
||||
printf '%s\n' "${actions[@]}"
|
||||
}
|
||||
|
||||
# Check if required tools are available
|
||||
check_dependencies() {
|
||||
# Check for ShellSpec
|
||||
if ! command -v shellspec >/dev/null 2>&1; then
|
||||
log_warning "ShellSpec not found, attempting to install..."
|
||||
install_shellspec
|
||||
fi
|
||||
|
||||
# Check for act (if running integration tests)
|
||||
if [[ $TEST_TYPE == "integration" || $TEST_TYPE == "all" ]]; then
|
||||
if ! command -v act >/dev/null 2>&1; then
|
||||
log_warning "nektos/act not found, integration tests will be limited"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Check for coverage tools (if enabled)
|
||||
if [[ $COVERAGE_ENABLED == "true" ]]; then
|
||||
if ! command -v kcov >/dev/null 2>&1; then
|
||||
log_warning "kcov not found - coverage will use alternative methods"
|
||||
fi
|
||||
fi
|
||||
|
||||
log_success "Dependency check completed"
|
||||
}
|
||||
|
||||
# Install ShellSpec if not available
|
||||
install_shellspec() {
|
||||
log_info "Installing ShellSpec testing framework..."
|
||||
|
||||
local shellspec_version="0.28.1"
|
||||
local install_dir="${HOME}/.local"
|
||||
|
||||
# Download and install ShellSpec (download -> verify SHA256 -> extract -> install)
|
||||
local tarball
|
||||
tarball="$(mktemp /tmp/shellspec-XXXXXX.tar.gz)"
|
||||
|
||||
# Pinned SHA256 checksum for ShellSpec 0.28.1
|
||||
# Source: https://github.com/shellspec/shellspec/archive/refs/tags/0.28.1.tar.gz
|
||||
local checksum="400d835466429a5fe6c77a62775a9173729d61dd43e05dfa893e8cf6cb511783"
|
||||
|
||||
# Ensure cleanup of the downloaded file
|
||||
# Use ${tarball:-} to handle unbound variable when trap fires after function returns
|
||||
cleanup() {
|
||||
rm -f "${tarball:-}"
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
log_info "Downloading ShellSpec ${shellspec_version} to ${tarball}..."
|
||||
if ! curl -fsSL -o "$tarball" "https://github.com/shellspec/shellspec/archive/refs/tags/${shellspec_version}.tar.gz"; then
|
||||
log_error "Failed to download ShellSpec ${shellspec_version}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Compute SHA256 in a portable way
|
||||
local actual_sha
|
||||
if command -v sha256sum >/dev/null 2>&1; then
|
||||
actual_sha="$(sha256sum "$tarball" | awk '{print $1}')"
|
||||
elif command -v shasum >/dev/null 2>&1; then
|
||||
actual_sha="$(shasum -a 256 "$tarball" | awk '{print $1}')"
|
||||
else
|
||||
log_error "No SHA256 utility available (sha256sum or shasum required) to verify download"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$actual_sha" != "$checksum" ]]; then
|
||||
log_error "Checksum mismatch for ShellSpec ${shellspec_version} (expected ${checksum}, got ${actual_sha})"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log_info "Checksum verified for ShellSpec ${shellspec_version}, extracting..."
|
||||
if ! tar -xzf "$tarball" -C /tmp/; then
|
||||
log_error "Failed to extract ShellSpec archive"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! (cd "/tmp/shellspec-${shellspec_version}" && make install PREFIX="$install_dir"); then
|
||||
log_error "ShellSpec make install failed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Add to PATH if not already there
|
||||
if [[ ":$PATH:" != *":${install_dir}/bin:"* ]]; then
|
||||
export PATH="${install_dir}/bin:$PATH"
|
||||
# Append to shell rc only in non-CI environments
|
||||
if [[ -z "${CI:-}" ]]; then
|
||||
if ! grep -qxF "export PATH=\"${install_dir}/bin:\$PATH\"" ~/.bashrc 2>/dev/null; then
|
||||
echo "export PATH=\"${install_dir}/bin:\$PATH\"" >>~/.bashrc
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if command -v shellspec >/dev/null 2>&1; then
|
||||
log_success "ShellSpec installed successfully"
|
||||
# Clear the trap now that we've succeeded to prevent unbound variable error on script exit
|
||||
trap - EXIT
|
||||
rm -f "$tarball"
|
||||
else
|
||||
log_error "Failed to install ShellSpec"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Run unit tests
|
||||
run_unit_tests() {
|
||||
local actions=("$@")
|
||||
local failed_tests=()
|
||||
local passed_tests=()
|
||||
|
||||
log_info "Running unit tests for ${#actions[@]} actions..."
|
||||
|
||||
# Create test results directory
|
||||
mkdir -p "${TEST_ROOT}/reports/unit"
|
||||
|
||||
for action in "${actions[@]}"; do
|
||||
local unit_test_dir="${TEST_ROOT}/unit/${action}"
|
||||
|
||||
if [[ -d $unit_test_dir ]]; then
|
||||
log_info "Running unit tests for: $action"
|
||||
|
||||
# Run ShellSpec tests
|
||||
local test_result=0
|
||||
local output_file="${TEST_ROOT}/reports/unit/${action}.txt"
|
||||
|
||||
# Run shellspec and capture both exit code and output
|
||||
# Note: ShellSpec returns non-zero exit codes for warnings (101) and other conditions
|
||||
# We need to check the actual output to determine if tests failed
|
||||
# Pass action name relative to --default-path (_tests/unit) for proper spec_helper loading
|
||||
(cd "$TEST_ROOT/.." && shellspec \
|
||||
--format documentation \
|
||||
"$action") >"$output_file" 2>&1 || true
|
||||
|
||||
# Parse the output to determine if tests actually failed
|
||||
# Look for the summary line which shows "X examples, Y failures"
|
||||
if grep -qE "[0-9]+ examples?, 0 failures?" "$output_file" && ! grep -q "Fatal error occurred" "$output_file"; then
|
||||
log_success "Unit tests passed: $action"
|
||||
passed_tests+=("$action")
|
||||
else
|
||||
# Check if there were actual failures (not just warnings)
|
||||
if grep -qE "[0-9]+ examples?, [1-9][0-9]* failures?" "$output_file"; then
|
||||
log_error "Unit tests failed: $action"
|
||||
failed_tests+=("$action")
|
||||
test_result=1
|
||||
else
|
||||
# No summary line found, treat as passed if no fatal errors
|
||||
if ! grep -q "Fatal error occurred" "$output_file"; then
|
||||
log_success "Unit tests passed: $action"
|
||||
passed_tests+=("$action")
|
||||
else
|
||||
log_error "Unit tests failed: $action"
|
||||
failed_tests+=("$action")
|
||||
test_result=1
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Show summary if verbose or on failure
|
||||
if [[ $test_result -ne 0 || ${BASHOPTS:-} == *"xtrace"* || $- == *x* ]]; then
|
||||
echo "--- Test output for $action ---"
|
||||
cat "$output_file"
|
||||
echo "--- End test output ---"
|
||||
fi
|
||||
else
|
||||
log_warning "No unit tests found for: $action"
|
||||
fi
|
||||
done
|
||||
|
||||
# Report results
|
||||
log_info "Unit test results:"
|
||||
log_success " Passed: ${#passed_tests[@]} actions"
|
||||
if [[ ${#failed_tests[@]} -gt 0 ]]; then
|
||||
log_error " Failed: ${#failed_tests[@]} actions (${failed_tests[*]})"
|
||||
return 1
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# Run integration tests using nektos/act
|
||||
run_integration_tests() {
|
||||
local actions=("$@")
|
||||
local failed_tests=()
|
||||
local passed_tests=()
|
||||
|
||||
log_info "Running integration tests for ${#actions[@]} actions..."
|
||||
|
||||
# Create test results directory
|
||||
mkdir -p "${TEST_ROOT}/reports/integration"
|
||||
|
||||
for action in "${actions[@]}"; do
|
||||
local workflow_file="${TEST_ROOT}/integration/workflows/${action}-test.yml"
|
||||
|
||||
if [[ -f $workflow_file ]]; then
|
||||
log_info "Running integration test workflow for: $action"
|
||||
|
||||
# Run with act if available, otherwise skip
|
||||
if command -v act >/dev/null 2>&1; then
|
||||
local output_file="${TEST_ROOT}/reports/integration/${action}.txt"
|
||||
|
||||
# Create temp directory for artifacts
|
||||
local artifacts_dir
|
||||
artifacts_dir=$(mktemp -d) || exit 1
|
||||
|
||||
if act workflow_dispatch \
|
||||
-W "$workflow_file" \
|
||||
--container-architecture linux/amd64 \
|
||||
--artifact-server-path "$artifacts_dir" \
|
||||
-P ubuntu-latest=catthehacker/ubuntu:act-latest \
|
||||
>"$output_file" 2>&1; then
|
||||
|
||||
log_success "Integration tests passed: $action"
|
||||
passed_tests+=("$action")
|
||||
else
|
||||
log_error "Integration tests failed: $action"
|
||||
failed_tests+=("$action")
|
||||
|
||||
# Show output on failure
|
||||
echo "--- Integration test output for $action ---"
|
||||
cat "$output_file"
|
||||
echo "--- End integration test output ---"
|
||||
fi
|
||||
|
||||
# Clean up artifacts directory
|
||||
rm -rf "$artifacts_dir"
|
||||
else
|
||||
log_warning "Skipping integration test for $action (act not available)"
|
||||
fi
|
||||
else
|
||||
log_warning "No integration test workflow found for: $action"
|
||||
fi
|
||||
done
|
||||
|
||||
# Report results
|
||||
log_info "Integration test results:"
|
||||
log_success " Passed: ${#passed_tests[@]} actions"
|
||||
if [[ ${#failed_tests[@]} -gt 0 ]]; then
|
||||
log_error " Failed: ${#failed_tests[@]} actions (${failed_tests[*]})"
|
||||
return 1
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# Generate test coverage report
|
||||
generate_coverage_report() {
|
||||
if [[ $COVERAGE_ENABLED != "true" ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
log_info "Generating coverage report..."
|
||||
|
||||
local coverage_dir="${TEST_ROOT}/coverage"
|
||||
mkdir -p "$coverage_dir"
|
||||
|
||||
# This is a simplified coverage implementation
|
||||
# In practice, you'd integrate with kcov or similar tools
|
||||
|
||||
# Count tested vs total actions (count directories with action.yml files, excluding hidden/internal dirs and node_modules)
|
||||
local project_root
|
||||
project_root="$(cd "${TEST_ROOT}/.." && pwd)"
|
||||
local total_actions
|
||||
total_actions=$(find "$project_root" -mindepth 2 -maxdepth 2 -type f -name "action.yml" 2>/dev/null | wc -l | tr -d ' ')
|
||||
|
||||
# Count actions that have unit tests (by checking if validation.spec.sh exists)
|
||||
local tested_actions
|
||||
tested_actions=$(find "${TEST_ROOT}/unit" -mindepth 2 -maxdepth 2 -type f -name "validation.spec.sh" 2>/dev/null | wc -l | tr -d ' ')
|
||||
|
||||
local coverage_percent
|
||||
if [[ $total_actions -gt 0 ]]; then
|
||||
coverage_percent=$(((tested_actions * 100) / total_actions))
|
||||
else
|
||||
coverage_percent=0
|
||||
fi
|
||||
|
||||
cat >"${coverage_dir}/summary.json" <<EOF
|
||||
{
|
||||
"total_actions": $total_actions,
|
||||
"tested_actions": $tested_actions,
|
||||
"coverage_percent": $coverage_percent,
|
||||
"generated_at": "$(date -u +"%Y-%m-%dT%H:%M:%SZ")"
|
||||
}
|
||||
EOF
|
||||
|
||||
log_success "Coverage report generated: ${coverage_percent}% ($tested_actions/$total_actions actions)"
|
||||
}
|
||||
|
||||
# Generate test report
|
||||
generate_test_report() {
|
||||
log_info "Generating test report in format: $REPORT_FORMAT"
|
||||
|
||||
local report_dir="${TEST_ROOT}/reports"
|
||||
mkdir -p "$report_dir"
|
||||
|
||||
case "$REPORT_FORMAT" in
|
||||
"json")
|
||||
generate_json_report
|
||||
;;
|
||||
"junit")
|
||||
log_warning "JUnit report format not yet implemented, using JSON instead"
|
||||
generate_json_report
|
||||
;;
|
||||
"sarif")
|
||||
generate_sarif_report
|
||||
;;
|
||||
"console" | *)
|
||||
generate_console_report
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Generate JSON test report
|
||||
generate_json_report() {
|
||||
local report_file="${TEST_ROOT}/reports/test-results.json"
|
||||
|
||||
cat >"$report_file" <<EOF
|
||||
{
|
||||
"test_run": {
|
||||
"timestamp": "$(date -u +"%Y-%m-%dT%H:%M:%SZ")",
|
||||
"type": "$TEST_TYPE",
|
||||
"action_filter": "$ACTION_FILTER",
|
||||
"parallel_jobs": $PARALLEL_JOBS,
|
||||
"coverage_enabled": $COVERAGE_ENABLED
|
||||
},
|
||||
"results": {
|
||||
"unit_tests": $(find "${TEST_ROOT}/reports/unit" -name "*.txt" 2>/dev/null | wc -l | tr -d ' '),
|
||||
"integration_tests": $(find "${TEST_ROOT}/reports/integration" -name "*.txt" 2>/dev/null | wc -l | tr -d ' ')
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
log_success "JSON report generated: $report_file"
|
||||
}
|
||||
|
||||
# Generate SARIF test report
|
||||
generate_sarif_report() {
|
||||
# Check for jq availability
|
||||
if ! command -v jq >/dev/null 2>&1; then
|
||||
log_warning "jq not found, skipping SARIF report generation"
|
||||
return 0
|
||||
fi
|
||||
|
||||
local report_file="${TEST_ROOT}/reports/test-results.sarif"
|
||||
local run_id
|
||||
run_id="github-actions-test-$(date +%s)"
|
||||
local timestamp
|
||||
timestamp="$(date -u +"%Y-%m-%dT%H:%M:%SZ")"
|
||||
|
||||
# Initialize SARIF structure using jq to ensure proper escaping
|
||||
jq -n \
|
||||
--arg run_id "$run_id" \
|
||||
--arg timestamp "$timestamp" \
|
||||
--arg test_type "$TEST_TYPE" \
|
||||
'{
|
||||
"$schema": "https://json.schemastore.org/sarif-2.1.0.json",
|
||||
"version": "2.1.0",
|
||||
"runs": [
|
||||
{
|
||||
"automationDetails": {
|
||||
"id": $run_id
|
||||
},
|
||||
"tool": {
|
||||
"driver": {
|
||||
"name": "GitHub Actions Testing Framework",
|
||||
"version": "1.0.0",
|
||||
"informationUri": "https://github.com/ivuorinen/actions",
|
||||
"rules": []
|
||||
}
|
||||
},
|
||||
"results": [],
|
||||
"invocations": [
|
||||
{
|
||||
"executionSuccessful": true,
|
||||
"startTimeUtc": $timestamp,
|
||||
"arguments": ["--type", $test_type, "--format", "sarif"]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}' >"$report_file"
|
||||
|
||||
# Parse test results and add SARIF findings
|
||||
local results_array="[]"
|
||||
local rules_array="[]"
|
||||
|
||||
# Process unit test failures
|
||||
if [[ -d "${TEST_ROOT}/reports/unit" ]]; then
|
||||
for test_file in "${TEST_ROOT}/reports/unit"/*.txt; do
|
||||
if [[ -f "$test_file" ]]; then
|
||||
local action_name
|
||||
action_name=$(basename "$test_file" .txt)
|
||||
|
||||
# Check if test failed by looking for actual failures in the summary line
|
||||
if grep -qE "[0-9]+ examples?, [1-9][0-9]* failures?" "$test_file" || grep -q "Fatal error occurred" "$test_file"; then
|
||||
# Extract failure details
|
||||
local failure_message
|
||||
failure_message=$(grep -E "(Fatal error|failure|FAILED)" "$test_file" | head -1 || echo "Test failed")
|
||||
|
||||
# Add rule if not exists
|
||||
if ! echo "$rules_array" | jq -e '.[] | select(.id == "test-failure")' >/dev/null 2>&1; then
|
||||
rules_array=$(echo "$rules_array" | jq '. + [{
|
||||
"id": "test-failure",
|
||||
"name": "TestFailure",
|
||||
"shortDescription": {"text": "Test execution failed"},
|
||||
"fullDescription": {"text": "A unit or integration test failed during execution"},
|
||||
"defaultConfiguration": {"level": "error"}
|
||||
}]')
|
||||
fi
|
||||
|
||||
# Add result using jq --arg to safely escape dynamic strings
|
||||
results_array=$(echo "$results_array" | jq \
|
||||
--arg failure_msg "$failure_message" \
|
||||
--arg action_name "$action_name" \
|
||||
'. + [{
|
||||
"ruleId": "test-failure",
|
||||
"level": "error",
|
||||
"message": {"text": $failure_msg},
|
||||
"locations": [{
|
||||
"physicalLocation": {
|
||||
"artifactLocation": {"uri": ($action_name + "/action.yml")},
|
||||
"region": {"startLine": 1, "startColumn": 1}
|
||||
}
|
||||
}]
|
||||
}]')
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# Process integration test failures similarly
|
||||
if [[ -d "${TEST_ROOT}/reports/integration" ]]; then
|
||||
for test_file in "${TEST_ROOT}/reports/integration"/*.txt; do
|
||||
if [[ -f "$test_file" ]]; then
|
||||
local action_name
|
||||
action_name=$(basename "$test_file" .txt)
|
||||
|
||||
if grep -qE "FAILED|ERROR|error:" "$test_file"; then
|
||||
local failure_message
|
||||
failure_message=$(grep -E "(FAILED|ERROR|error:)" "$test_file" | head -1 || echo "Integration test failed")
|
||||
|
||||
# Add integration rule if not exists
|
||||
if ! echo "$rules_array" | jq -e '.[] | select(.id == "integration-failure")' >/dev/null 2>&1; then
|
||||
rules_array=$(echo "$rules_array" | jq '. + [{
|
||||
"id": "integration-failure",
|
||||
"name": "IntegrationFailure",
|
||||
"shortDescription": {"text": "Integration test failed"},
|
||||
"fullDescription": {"text": "An integration test failed during workflow execution"},
|
||||
"defaultConfiguration": {"level": "warning"}
|
||||
}]')
|
||||
fi
|
||||
|
||||
# Add result using jq --arg to safely escape dynamic strings
|
||||
results_array=$(echo "$results_array" | jq \
|
||||
--arg failure_msg "$failure_message" \
|
||||
--arg action_name "$action_name" \
|
||||
'. + [{
|
||||
"ruleId": "integration-failure",
|
||||
"level": "warning",
|
||||
"message": {"text": $failure_msg},
|
||||
"locations": [{
|
||||
"physicalLocation": {
|
||||
"artifactLocation": {"uri": ($action_name + "/action.yml")},
|
||||
"region": {"startLine": 1, "startColumn": 1}
|
||||
}
|
||||
}]
|
||||
}]')
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# Update SARIF file with results and rules
|
||||
local temp_file
|
||||
temp_file=$(mktemp)
|
||||
jq --argjson rules "$rules_array" --argjson results "$results_array" \
|
||||
'.runs[0].tool.driver.rules = $rules | .runs[0].results = $results' \
|
||||
"$report_file" >"$temp_file" && mv "$temp_file" "$report_file"
|
||||
|
||||
log_success "SARIF report generated: $report_file"
|
||||
}
|
||||
|
||||
# Generate console test report
|
||||
generate_console_report() {
|
||||
echo ""
|
||||
echo "========================================"
|
||||
echo " GitHub Actions Test Framework Report"
|
||||
echo "========================================"
|
||||
echo "Test Type: $TEST_TYPE"
|
||||
echo "Timestamp: $(date)"
|
||||
echo "Coverage Enabled: $COVERAGE_ENABLED"
|
||||
echo ""
|
||||
|
||||
if [[ -d "${TEST_ROOT}/reports/unit" ]]; then
|
||||
local unit_tests
|
||||
unit_tests=$(find "${TEST_ROOT}/reports/unit" -name "*.txt" 2>/dev/null | wc -l | tr -d ' ')
|
||||
printf "%-25s %4s\n" "Unit Tests Run:" "$unit_tests"
|
||||
fi
|
||||
|
||||
if [[ -d "${TEST_ROOT}/reports/integration" ]]; then
|
||||
local integration_tests
|
||||
integration_tests=$(find "${TEST_ROOT}/reports/integration" -name "*.txt" 2>/dev/null | wc -l | tr -d ' ')
|
||||
printf "%-25s %4s\n" "Integration Tests Run:" "$integration_tests"
|
||||
fi
|
||||
|
||||
if [[ -f "${TEST_ROOT}/coverage/summary.json" ]]; then
|
||||
local coverage
|
||||
coverage=$(jq -r '.coverage_percent' "${TEST_ROOT}/coverage/summary.json" 2>/dev/null || echo "N/A")
|
||||
if [[ "$coverage" =~ ^[0-9]+$ ]]; then
|
||||
printf "%-25s %4s%%\n" "Test Coverage:" "$coverage"
|
||||
else
|
||||
printf "%-25s %s\n" "Test Coverage:" "$coverage"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "========================================"
|
||||
}
|
||||
|
||||
# Main test execution function
|
||||
main() {
|
||||
log_info "Starting GitHub Actions Testing Framework"
|
||||
|
||||
# Parse arguments
|
||||
parse_args "$@"
|
||||
|
||||
# Initialize framework
|
||||
init_testing_framework
|
||||
|
||||
# Check dependencies
|
||||
check_dependencies
|
||||
|
||||
# Discover actions to test
|
||||
local actions=()
|
||||
while IFS= read -r action; do
|
||||
actions+=("$action")
|
||||
done < <(discover_actions)
|
||||
|
||||
if [[ ${#actions[@]} -eq 0 ]]; then
|
||||
log_error "No actions found to test"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Run tests based on type
|
||||
local test_failed=false
|
||||
|
||||
case "$TEST_TYPE" in
|
||||
"unit")
|
||||
if ! run_unit_tests "${actions[@]}"; then
|
||||
test_failed=true
|
||||
fi
|
||||
;;
|
||||
"integration")
|
||||
if ! run_integration_tests "${actions[@]}"; then
|
||||
test_failed=true
|
||||
fi
|
||||
;;
|
||||
"e2e")
|
||||
log_warning "E2E tests not yet implemented"
|
||||
;;
|
||||
"all")
|
||||
if ! run_unit_tests "${actions[@]}"; then
|
||||
test_failed=true
|
||||
fi
|
||||
if ! run_integration_tests "${actions[@]}"; then
|
||||
test_failed=true
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
log_error "Unknown test type: $TEST_TYPE"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
# Generate coverage report
|
||||
generate_coverage_report
|
||||
|
||||
# Generate test report
|
||||
generate_test_report
|
||||
|
||||
# Final status
|
||||
if [[ $test_failed == "true" ]]; then
|
||||
log_error "Some tests failed"
|
||||
exit 1
|
||||
else
|
||||
log_success "All tests passed!"
|
||||
exit 0
|
||||
fi
|
||||
}
|
||||
|
||||
# Run main function if script is executed directly
|
||||
if [[ ${BASH_SOURCE[0]} == "${0}" ]]; then
|
||||
main "$@"
|
||||
fi
|
||||
62
_tests/shared/test_docker_image_regex.py
Executable file
62
_tests/shared/test_docker_image_regex.py
Executable file
@@ -0,0 +1,62 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Test docker image name regex fix for dots in validation_core.py."""
|
||||
|
||||
from pathlib import Path
|
||||
import sys
|
||||
|
||||
# Add parent directory to path
|
||||
sys.path.insert(0, str(Path(__file__).parent))
|
||||
|
||||
# pylint: disable=wrong-import-position
|
||||
from validation_core import ValidationCore
|
||||
|
||||
|
||||
def test_docker_image_names_with_dots():
|
||||
"""Test that docker image names with dots are accepted."""
|
||||
validator = ValidationCore()
|
||||
|
||||
# Valid docker image names with dots (should pass)
|
||||
valid_names = [
|
||||
"my.app",
|
||||
"app.with.dots",
|
||||
"registry.example.com/myapp",
|
||||
"docker.io/library/nginx",
|
||||
"ghcr.io/owner/repo",
|
||||
"gcr.io/project-id/image",
|
||||
"quay.io/organization/app",
|
||||
"my.registry.local/app.name",
|
||||
"registry.example.com/namespace/app.name",
|
||||
"harbor.example.com/project/image.name",
|
||||
"nexus.company.local/docker/app",
|
||||
]
|
||||
|
||||
print("Testing valid Docker image names with dots:")
|
||||
for name in valid_names:
|
||||
is_valid, error = validator.validate_docker_image_name(name)
|
||||
status = "✓" if is_valid else "✗"
|
||||
print(f" {status} {name:50s} {'PASS' if is_valid else f'FAIL: {error}'}")
|
||||
assert is_valid, f"Should accept: {name} (got error: {error})"
|
||||
|
||||
# Invalid names (should fail)
|
||||
invalid_names = [
|
||||
"MyApp", # Uppercase
|
||||
"my app", # Space
|
||||
"-myapp", # Leading dash
|
||||
"myapp-", # Trailing dash
|
||||
"_myapp", # Leading underscore
|
||||
]
|
||||
|
||||
print("\nTesting invalid Docker image names:")
|
||||
for name in invalid_names:
|
||||
is_valid, error = validator.validate_docker_image_name(name)
|
||||
status = "✓" if not is_valid else "✗"
|
||||
print(
|
||||
f" {status} {name:50s} {'PASS (rejected)' if not is_valid else 'FAIL (should reject)'}"
|
||||
)
|
||||
assert not is_valid, f"Should reject: {name}"
|
||||
|
||||
print("\n✅ All tests passed!")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_docker_image_names_with_dots()
|
||||
904
_tests/shared/validation_core.py
Executable file
904
_tests/shared/validation_core.py
Executable file
@@ -0,0 +1,904 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Shared validation core module for GitHub Actions.
|
||||
|
||||
This module consolidates all validation logic to eliminate duplication between
|
||||
the framework validation and the centralized validator. It provides:
|
||||
|
||||
1. Standardized token patterns (resolved GitHub documentation discrepancies)
|
||||
2. Common validation functions
|
||||
3. Unified security validation
|
||||
4. Centralized YAML parsing utilities
|
||||
5. Command-line interface for ShellSpec test integration
|
||||
|
||||
This replaces inline Python code in ShellSpec tests and duplicate functions
|
||||
across multiple files.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
from pathlib import Path
|
||||
import re
|
||||
import sys
|
||||
from typing import Any
|
||||
|
||||
import yaml # pylint: disable=import-error
|
||||
|
||||
|
||||
class ValidationCore:
|
||||
"""Core validation functionality with standardized patterns and functions."""
|
||||
|
||||
# Standardized token patterns - resolved based on GitHub documentation
|
||||
# Fine-grained tokens are 50-255 characters with underscores
|
||||
TOKEN_PATTERNS = {
|
||||
"classic": r"^gh[efpousr]_[a-zA-Z0-9]{36}$",
|
||||
"fine_grained": r"^github_pat_[A-Za-z0-9_]{50,255}$", # 50-255 chars with underscores
|
||||
"installation": r"^ghs_[a-zA-Z0-9]{36}$",
|
||||
"npm_classic": r"^npm_[a-zA-Z0-9]{40,}$", # NPM classic tokens
|
||||
}
|
||||
|
||||
# Injection detection pattern - characters commonly used in command injection
|
||||
INJECTION_CHARS_PATTERN = r"[;&|`$()]"
|
||||
|
||||
# Security injection patterns
|
||||
SECURITY_PATTERNS = [
|
||||
r";\s*(rm|del|format|shutdown|reboot)",
|
||||
r"&&\s*(rm|del|format|shutdown|reboot)",
|
||||
r"\|\s*(rm|del|format|shutdown|reboot)",
|
||||
r"`[^`]*`", # Command substitution
|
||||
r"\$\([^)]*\)", # Command substitution
|
||||
# Path traversal only dangerous when combined with commands
|
||||
r"\.\./.*;\s*(rm|del|format|shutdown|reboot)",
|
||||
r"\.\.\\+.*;\s*(rm|del|format|shutdown|reboot)", # Windows: ..\ or ..\\ patterns
|
||||
]
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize the validation core."""
|
||||
|
||||
def validate_github_token(self, token: str, *, required: bool = False) -> tuple[bool, str]:
|
||||
"""
|
||||
Validate GitHub token format using standardized PCRE patterns.
|
||||
|
||||
Args:
|
||||
token: The token to validate
|
||||
required: Whether the token is required
|
||||
|
||||
Returns:
|
||||
Tuple of (is_valid, error_message)
|
||||
"""
|
||||
if not token or token.strip() == "":
|
||||
if required:
|
||||
return False, "Token is required but not provided"
|
||||
return True, ""
|
||||
|
||||
# Allow GitHub Actions expressions
|
||||
if token == "${{ github.token }}" or (token.startswith("${{") and token.endswith("}}")):
|
||||
return True, ""
|
||||
|
||||
# Allow environment variable references (e.g., $GITHUB_TOKEN)
|
||||
if re.match(r"^\$[A-Za-z_][\w]*$", token):
|
||||
return True, ""
|
||||
|
||||
# Check against standardized token patterns
|
||||
for _token_type, pattern in self.TOKEN_PATTERNS.items():
|
||||
if re.match(pattern, token):
|
||||
return True, ""
|
||||
|
||||
return (
|
||||
False,
|
||||
"Invalid token format. Expected: gh[efpousr]_* (36 chars), "
|
||||
"github_pat_[A-Za-z0-9_]* (50-255 chars), ghs_* (36 chars), or npm_* (40+ chars)",
|
||||
)
|
||||
|
||||
def validate_namespace_with_lookahead(self, namespace: str) -> tuple[bool, str]:
|
||||
"""
|
||||
Validate namespace using lookahead pattern for .NET namespaces.
|
||||
|
||||
Args:
|
||||
namespace: The namespace to validate
|
||||
|
||||
Returns:
|
||||
Tuple of (is_valid, error_message)
|
||||
"""
|
||||
if not namespace or namespace.strip() == "":
|
||||
return False, "Namespace cannot be empty"
|
||||
|
||||
# Pattern with lookahead ensures hyphens are only allowed when followed by alphanumeric
|
||||
pattern = r"^[a-zA-Z0-9]([a-zA-Z0-9]|-(?=[a-zA-Z0-9])){0,38}$"
|
||||
|
||||
if re.match(pattern, namespace):
|
||||
return True, ""
|
||||
return (
|
||||
False,
|
||||
"Invalid namespace format. Must be 1-39 characters, "
|
||||
"alphanumeric and hyphens, no trailing hyphens",
|
||||
)
|
||||
|
||||
def validate_security_patterns(
|
||||
self,
|
||||
input_value: str,
|
||||
input_name: str = "",
|
||||
) -> tuple[bool, str]:
|
||||
"""
|
||||
Check for common security injection patterns.
|
||||
|
||||
Args:
|
||||
input_value: The value to validate
|
||||
input_name: Name of the input (for context)
|
||||
|
||||
Returns:
|
||||
Tuple of (is_valid, error_message)
|
||||
"""
|
||||
# Allow empty values for most inputs (they're often optional)
|
||||
if not input_value or input_value.strip() == "":
|
||||
return True, ""
|
||||
|
||||
for pattern in self.SECURITY_PATTERNS:
|
||||
if re.search(pattern, input_value, re.IGNORECASE):
|
||||
return (
|
||||
False,
|
||||
f"Potential security injection pattern detected in {input_name or 'input'}",
|
||||
)
|
||||
|
||||
return True, ""
|
||||
|
||||
def validate_boolean(self, value: str, input_name: str) -> tuple[bool, str]:
|
||||
"""Validate boolean input with intelligent fallback for misclassified inputs."""
|
||||
# Handle empty values
|
||||
if not value:
|
||||
return True, ""
|
||||
|
||||
# Standard boolean values
|
||||
if value.lower() in ["true", "false"]:
|
||||
return True, ""
|
||||
|
||||
# Intelligent fallback for misclassified inputs
|
||||
# If input name suggests it should accept paths/directories, validate as such
|
||||
if any(
|
||||
keyword in input_name.lower()
|
||||
for keyword in ["directories", "directory", "path", "file"]
|
||||
):
|
||||
return self.validate_cache_directories(value)
|
||||
|
||||
return False, f"Input '{input_name}' must be 'true' or 'false'"
|
||||
|
||||
def validate_version_format(
|
||||
self,
|
||||
value: str,
|
||||
*,
|
||||
allow_v_prefix: bool = False,
|
||||
) -> tuple[bool, str]:
|
||||
"""Validate semantic version format."""
|
||||
if value.lower() == "latest":
|
||||
return True, ""
|
||||
if not allow_v_prefix and value.startswith("v"):
|
||||
return False, f"Version should not start with 'v': {value}"
|
||||
value = value.removeprefix("v") # Remove v prefix for validation
|
||||
# Split validation to reduce complexity
|
||||
# Base version: major.minor.patch (or simpler forms)
|
||||
base_pattern = r"^[\d]+(\.[\d]+)?(\.[\d]+)?$"
|
||||
# Version with prerelease/build: major.minor.patch-prerelease+build
|
||||
extended_pattern = r"^[\d]+(\.[\d]+)?(\.[\d]+)?[-+][0-9A-Za-z.-]+$"
|
||||
|
||||
if re.match(base_pattern, value) or re.match(extended_pattern, value):
|
||||
return True, ""
|
||||
return False, f"Invalid version format: {value}"
|
||||
|
||||
def validate_file_path(self, value: str, *, allow_traversal: bool = False) -> tuple[bool, str]:
|
||||
"""Validate file path format."""
|
||||
if not value:
|
||||
return True, ""
|
||||
|
||||
# Check for injection patterns
|
||||
if re.search(self.INJECTION_CHARS_PATTERN, value):
|
||||
return False, f"Potential injection detected in file path: {value}"
|
||||
|
||||
# Check for path traversal (unless explicitly allowed)
|
||||
if not allow_traversal and ("../" in value or "..\\" in value):
|
||||
return False, f"Path traversal not allowed: {value}"
|
||||
|
||||
# Check for absolute paths (often not allowed)
|
||||
if value.startswith("/") or (len(value) > 1 and value[1] == ":"):
|
||||
return False, f"Absolute paths not allowed: {value}"
|
||||
|
||||
return True, ""
|
||||
|
||||
def validate_docker_image_name(self, value: str) -> tuple[bool, str]:
|
||||
"""Validate docker image name format."""
|
||||
if not value:
|
||||
return True, ""
|
||||
# Split validation into parts to reduce regex complexity
|
||||
# Valid format: lowercase alphanumeric with separators (., _, __, -) and optional namespace
|
||||
if not re.match(r"^[a-z0-9]", value):
|
||||
return False, f"Invalid docker image name format: {value}"
|
||||
if not re.match(r"^[a-z0-9._/-]+$", value):
|
||||
return False, f"Invalid docker image name format: {value}"
|
||||
# Check for invalid patterns
|
||||
if value.endswith((".", "_", "-", "/")):
|
||||
return False, f"Invalid docker image name format: {value}"
|
||||
if "//" in value or ".." in value:
|
||||
return False, f"Invalid docker image name format: {value}"
|
||||
return True, ""
|
||||
|
||||
def validate_docker_tag(self, value: str) -> tuple[bool, str]:
|
||||
"""Validate Docker tag format."""
|
||||
if not value:
|
||||
return True, ""
|
||||
# Docker tags must be valid ASCII and may contain lowercase and uppercase letters,
|
||||
# digits, underscores, periods and dashes. Cannot start with period or dash.
|
||||
# Max length is 128 characters.
|
||||
if len(value) > 128:
|
||||
return False, f"Docker tag too long (max 128 characters): {value}"
|
||||
if not re.match(r"^[a-zA-Z0-9_][a-zA-Z0-9._-]*$", value):
|
||||
return False, f"Invalid docker tag format: {value}"
|
||||
return True, ""
|
||||
|
||||
def validate_php_extensions(self, value: str) -> tuple[bool, str]:
|
||||
"""Validate PHP extensions format."""
|
||||
if not value:
|
||||
return True, ""
|
||||
if re.search(r"[;&|`$()@#]", value):
|
||||
return False, f"Potential injection detected in PHP extensions: {value}"
|
||||
if not re.match(r"^[a-zA-Z0-9_,\s]+$", value):
|
||||
return False, f"Invalid PHP extensions format: {value}"
|
||||
return True, ""
|
||||
|
||||
def validate_coverage_driver(self, value: str) -> tuple[bool, str]:
|
||||
"""Validate coverage driver."""
|
||||
if value not in ["none", "xdebug", "pcov", "xdebug3"]:
|
||||
return False, "Invalid coverage driver. Must be 'none', 'xdebug', 'pcov', or 'xdebug3'"
|
||||
return True, ""
|
||||
|
||||
def validate_numeric_range(self, value: str, min_val: int, max_val: int) -> tuple[bool, str]:
|
||||
"""Validate numeric value within range."""
|
||||
try:
|
||||
num = int(value)
|
||||
if min_val <= num <= max_val:
|
||||
return True, ""
|
||||
return False, f"Value must be between {min_val} and {max_val}, got {num}"
|
||||
except ValueError:
|
||||
return False, f"Invalid numeric value: {value}"
|
||||
|
||||
def validate_php_version(self, value: str) -> tuple[bool, str]:
|
||||
"""Validate PHP version format (allows X.Y and X.Y.Z)."""
|
||||
if not value:
|
||||
return True, ""
|
||||
# PHP versions can be X.Y or X.Y.Z format
|
||||
if re.match(r"^[\d]+\.[\d]+(\.[\d]+)?$", value):
|
||||
return True, ""
|
||||
return False, f"Invalid PHP version format: {value}"
|
||||
|
||||
def validate_composer_version(self, value: str) -> tuple[bool, str]:
|
||||
"""Validate Composer version (1 or 2)."""
|
||||
if value in ["1", "2"]:
|
||||
return True, ""
|
||||
return False, f"Invalid Composer version. Must be '1' or '2', got '{value}'"
|
||||
|
||||
def validate_stability(self, value: str) -> tuple[bool, str]:
|
||||
"""Validate Composer stability."""
|
||||
valid_stabilities = ["stable", "RC", "beta", "alpha", "dev"]
|
||||
if value in valid_stabilities:
|
||||
return True, ""
|
||||
return False, f"Invalid stability. Must be one of: {', '.join(valid_stabilities)}"
|
||||
|
||||
def validate_cache_directories(self, value: str) -> tuple[bool, str]:
|
||||
"""Validate cache directories (comma-separated paths)."""
|
||||
if not value:
|
||||
return True, ""
|
||||
|
||||
# Split by comma and validate each directory
|
||||
directories = [d.strip() for d in value.split(",")]
|
||||
for directory in directories:
|
||||
if not directory:
|
||||
continue
|
||||
|
||||
# Basic path validation
|
||||
if re.search(self.INJECTION_CHARS_PATTERN, directory):
|
||||
return False, f"Potential injection detected in directory path: {directory}"
|
||||
|
||||
# Check for path traversal (both Unix and Windows)
|
||||
if re.search(r"\.\.[/\\]", directory):
|
||||
return False, f"Path traversal not allowed in directory: {directory}"
|
||||
|
||||
# Check for absolute paths
|
||||
if directory.startswith("/") or (len(directory) > 1 and directory[1] == ":"):
|
||||
return False, f"Absolute paths not allowed in directory: {directory}"
|
||||
|
||||
return True, ""
|
||||
|
||||
def validate_tools(self, value: str) -> tuple[bool, str]:
|
||||
"""Validate Composer tools format (allows @ for stability flags like dev-master@dev)."""
|
||||
if not value:
|
||||
return True, ""
|
||||
|
||||
# Check for injection patterns (@ removed to allow Composer stability flags)
|
||||
if re.search(self.INJECTION_CHARS_PATTERN, value):
|
||||
return False, f"Potential injection detected in tools: {value}"
|
||||
|
||||
return True, ""
|
||||
|
||||
def validate_numeric_range_1_10(self, value: str) -> tuple[bool, str]:
|
||||
"""Validate numeric value between 1 and 10."""
|
||||
return self.validate_numeric_range(value, 1, 10)
|
||||
|
||||
def validate_enhanced_business_logic(
|
||||
self,
|
||||
action_name: str,
|
||||
input_name: str,
|
||||
value: str,
|
||||
) -> tuple[bool | None, str]:
|
||||
"""
|
||||
Enhanced business logic validation for specific action/input combinations.
|
||||
Returns (None, "") if no enhanced validation applies, otherwise returns validation result.
|
||||
"""
|
||||
if not value: # Empty values are generally allowed, except for specific cases
|
||||
# Some inputs should not be empty even if they're optional
|
||||
if action_name == "php-composer" and input_name in ["composer-version"]:
|
||||
return False, f"Empty {input_name} is not allowed"
|
||||
return None, ""
|
||||
|
||||
# PHP Composer specific validations
|
||||
if action_name == "php-composer":
|
||||
return self._validate_php_composer_business_logic(input_name, value)
|
||||
|
||||
# Prettier-check specific validations
|
||||
if action_name == "prettier-check":
|
||||
return self._validate_prettier_check_business_logic(input_name, value)
|
||||
|
||||
# Add more action-specific validations here as needed
|
||||
|
||||
return None, "" # No enhanced validation applies
|
||||
|
||||
def _validate_composer_version(self, value: str) -> tuple[bool, str]:
|
||||
"""Validate composer version input."""
|
||||
if value not in ["1", "2"]:
|
||||
return False, f"Composer version must be '1' or '2', got '{value}'"
|
||||
return True, ""
|
||||
|
||||
def _validate_stability(self, value: str) -> tuple[bool, str]:
|
||||
"""Validate stability input."""
|
||||
valid_stabilities = ["stable", "RC", "beta", "alpha", "dev"]
|
||||
if value not in valid_stabilities:
|
||||
return (
|
||||
False,
|
||||
f"Invalid stability '{value}'. Must be one of: {', '.join(valid_stabilities)}",
|
||||
)
|
||||
return True, ""
|
||||
|
||||
def _validate_php_version(self, value: str) -> tuple[bool, str]:
|
||||
"""Validate PHP version input."""
|
||||
if not re.match(r"^[\d]+\.[\d]+(\.[\d]+)?$", value):
|
||||
return False, f"Invalid PHP version format: {value}"
|
||||
|
||||
try:
|
||||
major, minor = value.split(".")[:2]
|
||||
major_num, minor_num = int(major), int(minor)
|
||||
|
||||
if major_num < 7:
|
||||
return False, f"PHP version {value} is too old (minimum 7.0)"
|
||||
|
||||
if major_num > 20:
|
||||
return False, f"Invalid PHP version: {value}"
|
||||
|
||||
if minor_num < 0 or minor_num > 99:
|
||||
return False, f"Invalid PHP version: {value}"
|
||||
|
||||
except (ValueError, IndexError):
|
||||
return False, f"Invalid PHP version format: {value}"
|
||||
return True, ""
|
||||
|
||||
def _validate_extensions(self, value: str) -> tuple[bool, str]:
|
||||
"""Validate PHP extensions input."""
|
||||
if re.search(r"[@#$&*(){}\[\]|\\]", value):
|
||||
return False, f"Invalid characters in PHP extensions: {value}"
|
||||
return True, ""
|
||||
|
||||
def _validate_tools(self, value: str) -> tuple[bool, str]:
|
||||
"""Validate tools input (@ allowed for Composer stability flags like dev-master@dev)."""
|
||||
if re.search(r"[#$&*(){}\[\]|\\]", value):
|
||||
return False, f"Invalid characters in tools specification: {value}"
|
||||
return True, ""
|
||||
|
||||
def _validate_args(self, value: str) -> tuple[bool, str]:
|
||||
"""Validate args input."""
|
||||
if re.search(self.INJECTION_CHARS_PATTERN, value):
|
||||
return False, f"Potentially dangerous characters in args: {value}"
|
||||
return True, ""
|
||||
|
||||
def _validate_php_composer_business_logic(
|
||||
self,
|
||||
input_name: str,
|
||||
value: str,
|
||||
) -> tuple[bool | None, str]:
|
||||
"""Business logic validation specific to php-composer action."""
|
||||
validators = {
|
||||
"composer-version": self._validate_composer_version,
|
||||
"stability": self._validate_stability,
|
||||
"php": self._validate_php_version,
|
||||
"extensions": self._validate_extensions,
|
||||
"tools": self._validate_tools,
|
||||
"args": self._validate_args,
|
||||
}
|
||||
|
||||
if input_name in validators:
|
||||
is_valid, error_msg = validators[input_name](value)
|
||||
return is_valid, error_msg
|
||||
|
||||
return None, "" # No specific validation for this input
|
||||
|
||||
def _validate_file_pattern_security(self, value: str) -> tuple[bool, str]:
|
||||
"""Validate file-pattern for security issues."""
|
||||
if ".." in value:
|
||||
return False, "Path traversal detected in file-pattern"
|
||||
if value.startswith("/"):
|
||||
return False, "Absolute path not allowed in file-pattern"
|
||||
if "$" in value:
|
||||
return False, "Shell expansion not allowed in file-pattern"
|
||||
return True, ""
|
||||
|
||||
def _validate_plugins_security(self, value: str) -> tuple[bool, str]:
|
||||
"""Validate plugins for security issues."""
|
||||
if re.search(self.INJECTION_CHARS_PATTERN, value):
|
||||
return False, "Potentially dangerous characters in plugins"
|
||||
if re.search(r"\$\{.*\}", value):
|
||||
return False, "Variable expansion not allowed in plugins"
|
||||
if re.search(r"\$\(.*\)", value):
|
||||
return False, "Command substitution not allowed in plugins"
|
||||
return True, ""
|
||||
|
||||
def _validate_prettier_check_business_logic(
|
||||
self,
|
||||
input_name: str,
|
||||
value: str,
|
||||
) -> tuple[bool | None, str]:
|
||||
"""Business logic validation specific to prettier-check action."""
|
||||
# Handle prettier-version specially (accepts "latest" or semantic version)
|
||||
if input_name == "prettier-version":
|
||||
if value == "latest":
|
||||
return True, ""
|
||||
# Otherwise validate as semantic version
|
||||
return None, "" # Let standard semantic version validation handle it
|
||||
|
||||
# Validate file-pattern for security issues
|
||||
if input_name == "file-pattern":
|
||||
return self._validate_file_pattern_security(value)
|
||||
|
||||
# Validate report-format enum
|
||||
if input_name == "report-format":
|
||||
if value == "":
|
||||
return False, "report-format cannot be empty"
|
||||
if value not in ["json", "sarif"]:
|
||||
return False, f"Invalid report-format: {value}"
|
||||
return True, ""
|
||||
|
||||
# Validate plugins for security issues
|
||||
if input_name == "plugins":
|
||||
return self._validate_plugins_security(value)
|
||||
|
||||
return None, "" # No specific validation for this input
|
||||
|
||||
|
||||
class ActionFileParser:
|
||||
"""Parser for GitHub Action YAML files."""
|
||||
|
||||
@staticmethod
|
||||
def load_action_file(action_file: str) -> dict[str, Any]:
|
||||
"""Load and parse an action.yml file."""
|
||||
try:
|
||||
with Path(action_file).open(encoding="utf-8") as f:
|
||||
return yaml.safe_load(f)
|
||||
except (OSError, yaml.YAMLError) as e:
|
||||
msg = f"Failed to load action file {action_file}: {e}"
|
||||
raise ValueError(msg) from e
|
||||
|
||||
@staticmethod
|
||||
def get_action_name(action_file: str) -> str:
|
||||
"""Get the action name from an action.yml file."""
|
||||
try:
|
||||
data = ActionFileParser.load_action_file(action_file)
|
||||
return data.get("name", "Unknown")
|
||||
except (OSError, ValueError, yaml.YAMLError, AttributeError):
|
||||
return "Unknown"
|
||||
|
||||
@staticmethod
|
||||
def get_action_inputs(action_file: str) -> list[str]:
|
||||
"""Get all input names from an action.yml file."""
|
||||
try:
|
||||
data = ActionFileParser.load_action_file(action_file)
|
||||
inputs = data.get("inputs", {})
|
||||
return list(inputs.keys())
|
||||
except (OSError, ValueError, yaml.YAMLError, AttributeError):
|
||||
return []
|
||||
|
||||
@staticmethod
|
||||
def get_action_outputs(action_file: str) -> list[str]:
|
||||
"""Get all output names from an action.yml file."""
|
||||
try:
|
||||
data = ActionFileParser.load_action_file(action_file)
|
||||
outputs = data.get("outputs", {})
|
||||
return list(outputs.keys())
|
||||
except (OSError, ValueError, yaml.YAMLError, AttributeError):
|
||||
return []
|
||||
|
||||
@staticmethod
|
||||
def get_action_runs_using(action_file: str) -> str:
|
||||
"""Get the runs.using value from an action.yml file."""
|
||||
try:
|
||||
data = ActionFileParser.load_action_file(action_file)
|
||||
runs = data.get("runs", {})
|
||||
return runs.get("using", "unknown")
|
||||
except (OSError, ValueError, yaml.YAMLError, AttributeError):
|
||||
return "unknown"
|
||||
|
||||
@staticmethod
|
||||
def _get_required_property(input_data: dict, property_name: str) -> str:
|
||||
"""Get the required/optional property."""
|
||||
is_required = input_data.get("required") in [True, "true"]
|
||||
if property_name == "required":
|
||||
return "required" if is_required else "optional"
|
||||
return "optional" if not is_required else "required"
|
||||
|
||||
@staticmethod
|
||||
def _get_default_property(input_data: dict) -> str:
|
||||
"""Get the default property."""
|
||||
default_value = input_data.get("default", "")
|
||||
return str(default_value) if default_value else "no-default"
|
||||
|
||||
@staticmethod
|
||||
def _get_description_property(input_data: dict) -> str:
|
||||
"""Get the description property."""
|
||||
description = input_data.get("description", "")
|
||||
return description if description else "no-description"
|
||||
|
||||
@staticmethod
|
||||
def _get_all_optional_property(inputs: dict) -> str:
|
||||
"""Get the all_optional property (list of required inputs)."""
|
||||
required_inputs = [k for k, v in inputs.items() if v.get("required") in [True, "true"]]
|
||||
return "none" if not required_inputs else ",".join(required_inputs)
|
||||
|
||||
@staticmethod
|
||||
def get_input_property(action_file: str, input_name: str, property_name: str) -> str:
|
||||
"""
|
||||
Get a property of an input from an action.yml file.
|
||||
|
||||
Args:
|
||||
action_file: Path to the action.yml file
|
||||
input_name: Name of the input to check
|
||||
property_name: Property to check (required, optional, default, description,
|
||||
all_optional)
|
||||
|
||||
Returns:
|
||||
- For 'required': 'required' or 'optional'
|
||||
- For 'optional': 'optional' or 'required'
|
||||
- For 'default': the default value or 'no-default'
|
||||
- For 'description': the description or 'no-description'
|
||||
- For 'all_optional': 'none' if no required inputs, else comma-separated list
|
||||
"""
|
||||
try:
|
||||
data = ActionFileParser.load_action_file(action_file)
|
||||
inputs = data.get("inputs", {})
|
||||
input_data = inputs.get(input_name, {})
|
||||
|
||||
property_handlers = {
|
||||
"required": lambda: ActionFileParser._get_required_property(
|
||||
input_data, property_name
|
||||
),
|
||||
"optional": lambda: ActionFileParser._get_required_property(
|
||||
input_data, property_name
|
||||
),
|
||||
"default": lambda: ActionFileParser._get_default_property(input_data),
|
||||
"description": lambda: ActionFileParser._get_description_property(input_data),
|
||||
"all_optional": lambda: ActionFileParser._get_all_optional_property(inputs),
|
||||
}
|
||||
|
||||
if property_name in property_handlers:
|
||||
return property_handlers[property_name]()
|
||||
|
||||
return f"unknown-property-{property_name}"
|
||||
|
||||
except (OSError, ValueError, yaml.YAMLError, AttributeError, KeyError) as e:
|
||||
return f"error: {e}"
|
||||
|
||||
|
||||
def resolve_action_file_path(action_dir: str) -> str:
|
||||
"""Resolve the path to the action.yml file."""
|
||||
action_dir_path = Path(action_dir)
|
||||
if not action_dir_path.is_absolute():
|
||||
# If relative, assume we're in _tests/shared and actions are at ../../
|
||||
script_dir = Path(__file__).resolve().parent
|
||||
project_root = script_dir.parent.parent
|
||||
return str(project_root / action_dir / "action.yml")
|
||||
return f"{action_dir}/action.yml"
|
||||
|
||||
|
||||
def _apply_validation_by_type(
|
||||
validator: ValidationCore,
|
||||
validation_type: str,
|
||||
input_value: str,
|
||||
input_name: str,
|
||||
required_inputs: list,
|
||||
) -> tuple[bool, str]:
|
||||
"""Apply validation based on the validation type."""
|
||||
validation_map = {
|
||||
"github_token": lambda: validator.validate_github_token(
|
||||
input_value, required=input_name in required_inputs
|
||||
),
|
||||
"namespace_with_lookahead": lambda: validator.validate_namespace_with_lookahead(
|
||||
input_value,
|
||||
),
|
||||
"boolean": lambda: validator.validate_boolean(input_value, input_name),
|
||||
"file_path": lambda: validator.validate_file_path(input_value),
|
||||
"docker_image_name": lambda: validator.validate_docker_image_name(input_value),
|
||||
"docker_tag": lambda: validator.validate_docker_tag(input_value),
|
||||
"php_extensions": lambda: validator.validate_php_extensions(input_value),
|
||||
"coverage_driver": lambda: validator.validate_coverage_driver(input_value),
|
||||
"php_version": lambda: validator.validate_php_version(input_value),
|
||||
"composer_version": lambda: validator.validate_composer_version(input_value),
|
||||
"stability": lambda: validator.validate_stability(input_value),
|
||||
"cache_directories": lambda: validator.validate_cache_directories(input_value),
|
||||
"tools": lambda: validator.validate_tools(input_value),
|
||||
"numeric_range_1_10": lambda: validator.validate_numeric_range_1_10(input_value),
|
||||
}
|
||||
|
||||
# Handle version formats
|
||||
if validation_type in ["semantic_version", "calver_version", "flexible_version"]:
|
||||
return validator.validate_version_format(input_value)
|
||||
|
||||
if validation_type == "terraform_version":
|
||||
return validator.validate_version_format(input_value, allow_v_prefix=True)
|
||||
|
||||
# Use validation map for other types
|
||||
if validation_type in validation_map:
|
||||
return validation_map[validation_type]()
|
||||
|
||||
return True, "" # Unknown validation type, assume valid
|
||||
|
||||
|
||||
def _load_and_validate_rules(
|
||||
rules_file: Path,
|
||||
input_name: str,
|
||||
input_value: str,
|
||||
) -> tuple[str | None, dict, list]:
|
||||
"""Load validation rules and perform basic validation."""
|
||||
try:
|
||||
with Path(rules_file).open(encoding="utf-8") as f:
|
||||
rules_data = yaml.safe_load(f)
|
||||
|
||||
conventions = rules_data.get("conventions", {})
|
||||
overrides = rules_data.get("overrides", {})
|
||||
required_inputs = rules_data.get("required_inputs", [])
|
||||
|
||||
# Check if input is required and empty
|
||||
if input_name in required_inputs and (not input_value or input_value.strip() == ""):
|
||||
return None, {}, [] # Will cause error in caller
|
||||
|
||||
# Get validation type
|
||||
validation_type = overrides.get(input_name, conventions.get(input_name))
|
||||
return validation_type, rules_data, required_inputs
|
||||
|
||||
except (OSError, yaml.YAMLError, KeyError, AttributeError):
|
||||
return None, {}, []
|
||||
|
||||
|
||||
def validate_input(action_dir: str, input_name: str, input_value: str) -> tuple[bool | None, str]:
|
||||
"""
|
||||
Validate an input value for a specific action.
|
||||
|
||||
This is the main validation entry point that replaces the complex
|
||||
validation logic in the original framework.
|
||||
"""
|
||||
validator = ValidationCore()
|
||||
|
||||
# Always perform security validation first
|
||||
security_valid, security_error = validator.validate_security_patterns(input_value, input_name)
|
||||
if not security_valid:
|
||||
return False, security_error
|
||||
|
||||
# Get action name for business logic and rules
|
||||
action_name = Path(action_dir).name
|
||||
|
||||
# Check enhanced business logic first (takes precedence over general rules)
|
||||
enhanced_validation = validator.validate_enhanced_business_logic(
|
||||
action_name,
|
||||
input_name,
|
||||
input_value,
|
||||
)
|
||||
if enhanced_validation[0] is not None: # If enhanced validation has an opinion
|
||||
return enhanced_validation
|
||||
|
||||
# Load validation rules from action folder
|
||||
script_dir = Path(__file__).resolve().parent
|
||||
project_root = script_dir.parent.parent
|
||||
rules_file = project_root / action_name / "rules.yml"
|
||||
|
||||
if rules_file.exists():
|
||||
validation_type, _rules_data, required_inputs = _load_and_validate_rules(
|
||||
rules_file,
|
||||
input_name,
|
||||
input_value,
|
||||
)
|
||||
|
||||
# Check for required input error
|
||||
if input_name in required_inputs and (not input_value or input_value.strip() == ""):
|
||||
return False, f"Required input '{input_name}' cannot be empty"
|
||||
|
||||
if validation_type:
|
||||
try:
|
||||
return _apply_validation_by_type(
|
||||
validator,
|
||||
validation_type,
|
||||
input_value,
|
||||
input_name,
|
||||
required_inputs,
|
||||
)
|
||||
except (ValueError, AttributeError, KeyError, TypeError) as e:
|
||||
print(
|
||||
f"Warning: Could not apply validation for {action_name}: {e}",
|
||||
file=sys.stderr,
|
||||
)
|
||||
|
||||
# If no specific validation found, the security check is sufficient
|
||||
return True, ""
|
||||
|
||||
|
||||
def _handle_legacy_interface():
|
||||
"""Handle legacy CLI interface for backward compatibility."""
|
||||
if len(sys.argv) == 5 and all(not arg.startswith("-") for arg in sys.argv[1:]):
|
||||
action_dir, input_name, input_value, expected_result = sys.argv[1:5]
|
||||
is_valid, error_msg = validate_input(action_dir, input_name, input_value)
|
||||
|
||||
actual_result = "success" if is_valid else "failure"
|
||||
if actual_result == expected_result:
|
||||
sys.exit(0)
|
||||
else:
|
||||
print(f"Expected {expected_result}, got {actual_result}: {error_msg}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
return False # Not legacy interface
|
||||
|
||||
|
||||
def _create_argument_parser():
|
||||
"""Create and configure the argument parser."""
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Shared validation core for GitHub Actions",
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
epilog="""
|
||||
Examples:
|
||||
# Validate an input value
|
||||
python3 validation_core.py --validate action-dir input-name input-value
|
||||
|
||||
# Get input property
|
||||
python3 validation_core.py --property action.yml input-name required
|
||||
|
||||
# List inputs
|
||||
python3 validation_core.py --inputs action.yml
|
||||
|
||||
# List outputs
|
||||
python3 validation_core.py --outputs action.yml
|
||||
|
||||
# Get action name
|
||||
python3 validation_core.py --name action.yml
|
||||
""",
|
||||
)
|
||||
|
||||
mode_group = parser.add_mutually_exclusive_group(required=True)
|
||||
mode_group.add_argument(
|
||||
"--validate",
|
||||
nargs=3,
|
||||
metavar=("ACTION_DIR", "INPUT_NAME", "INPUT_VALUE"),
|
||||
help="Validate an input value",
|
||||
)
|
||||
mode_group.add_argument(
|
||||
"--property",
|
||||
nargs=3,
|
||||
metavar=("ACTION_FILE", "INPUT_NAME", "PROPERTY"),
|
||||
help="Get input property",
|
||||
)
|
||||
mode_group.add_argument("--inputs", metavar="ACTION_FILE", help="List action inputs")
|
||||
mode_group.add_argument("--outputs", metavar="ACTION_FILE", help="List action outputs")
|
||||
mode_group.add_argument("--name", metavar="ACTION_FILE", help="Get action name")
|
||||
mode_group.add_argument(
|
||||
"--runs-using",
|
||||
metavar="ACTION_FILE",
|
||||
help="Get action runs.using value",
|
||||
)
|
||||
mode_group.add_argument(
|
||||
"--validate-yaml",
|
||||
metavar="YAML_FILE",
|
||||
help="Validate YAML file syntax",
|
||||
)
|
||||
|
||||
return parser
|
||||
|
||||
|
||||
def _handle_validate_command(args):
|
||||
"""Handle the validate command."""
|
||||
action_dir, input_name, input_value = args.validate
|
||||
is_valid, error_msg = validate_input(action_dir, input_name, input_value)
|
||||
if is_valid:
|
||||
sys.exit(0)
|
||||
else:
|
||||
print(f"INVALID: {error_msg}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def _handle_property_command(args):
|
||||
"""Handle the property command."""
|
||||
action_file, input_name, property_name = args.property
|
||||
result = ActionFileParser.get_input_property(action_file, input_name, property_name)
|
||||
print(result)
|
||||
|
||||
|
||||
def _handle_inputs_command(args):
|
||||
"""Handle the inputs command."""
|
||||
inputs = ActionFileParser.get_action_inputs(args.inputs)
|
||||
for input_name in inputs:
|
||||
print(input_name)
|
||||
|
||||
|
||||
def _handle_outputs_command(args):
|
||||
"""Handle the outputs command."""
|
||||
outputs = ActionFileParser.get_action_outputs(args.outputs)
|
||||
for output_name in outputs:
|
||||
print(output_name)
|
||||
|
||||
|
||||
def _handle_name_command(args):
|
||||
"""Handle the name command."""
|
||||
name = ActionFileParser.get_action_name(args.name)
|
||||
print(name)
|
||||
|
||||
|
||||
def _handle_runs_using_command(args):
|
||||
"""Handle the runs-using command."""
|
||||
runs_using = ActionFileParser.get_action_runs_using(args.runs_using)
|
||||
print(runs_using)
|
||||
|
||||
|
||||
def _handle_validate_yaml_command(args):
|
||||
"""Handle the validate-yaml command."""
|
||||
try:
|
||||
with Path(args.validate_yaml).open(encoding="utf-8") as f:
|
||||
yaml.safe_load(f)
|
||||
sys.exit(0)
|
||||
except (OSError, yaml.YAMLError) as e:
|
||||
print(f"Invalid YAML: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def _execute_command(args):
|
||||
"""Execute the appropriate command based on arguments."""
|
||||
command_handlers = {
|
||||
"validate": _handle_validate_command,
|
||||
"property": _handle_property_command,
|
||||
"inputs": _handle_inputs_command,
|
||||
"outputs": _handle_outputs_command,
|
||||
"name": _handle_name_command,
|
||||
"runs_using": _handle_runs_using_command,
|
||||
"validate_yaml": _handle_validate_yaml_command,
|
||||
}
|
||||
|
||||
for command, handler in command_handlers.items():
|
||||
if getattr(args, command, None):
|
||||
handler(args)
|
||||
return
|
||||
|
||||
|
||||
def main():
|
||||
"""Command-line interface for validation core."""
|
||||
# Handle legacy interface first
|
||||
_handle_legacy_interface()
|
||||
|
||||
# Parse arguments and execute command
|
||||
parser = _create_argument_parser()
|
||||
args = parser.parse_args()
|
||||
|
||||
try:
|
||||
_execute_command(args)
|
||||
except (ValueError, OSError, AttributeError) as e:
|
||||
print(f"Error: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
142
_tests/unit/action-versioning/validation.spec.sh
Executable file
142
_tests/unit/action-versioning/validation.spec.sh
Executable file
@@ -0,0 +1,142 @@
|
||||
#!/usr/bin/env shellspec
|
||||
# Unit tests for action-versioning action validation and logic
|
||||
# Framework is automatically loaded via spec_helper.sh
|
||||
|
||||
Describe "action-versioning action"
|
||||
ACTION_DIR="action-versioning"
|
||||
ACTION_FILE="$ACTION_DIR/action.yml"
|
||||
|
||||
Context "when validating major-version input"
|
||||
It "accepts valid year-based version (vYYYY)"
|
||||
When call validate_input_python "action-versioning" "major-version" "v2025"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts valid semantic version (v1)"
|
||||
When call validate_input_python "action-versioning" "major-version" "v1"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts valid semantic version (v2)"
|
||||
When call validate_input_python "action-versioning" "major-version" "v2"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts year-based version from 2020"
|
||||
When call validate_input_python "action-versioning" "major-version" "v2020"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts year-based version for 2030"
|
||||
When call validate_input_python "action-versioning" "major-version" "v2030"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects version without v prefix"
|
||||
When call validate_input_python "action-versioning" "major-version" "2025"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects invalid version format"
|
||||
When call validate_input_python "action-versioning" "major-version" "invalid"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects empty version"
|
||||
When call validate_input_python "action-versioning" "major-version" ""
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects version with command injection"
|
||||
When call validate_input_python "action-versioning" "major-version" "v2025; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating token input"
|
||||
It "accepts valid GitHub token (classic)"
|
||||
When call validate_input_python "action-versioning" "token" "ghp_123456789012345678901234567890123456"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts valid GitHub fine-grained token"
|
||||
When call validate_input_python "action-versioning" "token" "github_pat_1234567890123456789012345678901234567890123456789012345678901234567890a"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts empty token (optional input)"
|
||||
When call validate_input_python "action-versioning" "token" ""
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects invalid token format"
|
||||
When call validate_input_python "action-versioning" "token" "invalid-token"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects token with command injection"
|
||||
When call validate_input_python "action-versioning" "token" "ghp_123456789012345678901234567890123456; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when checking action.yml structure"
|
||||
It "has valid YAML syntax"
|
||||
When call validate_action_yml_quiet "$ACTION_FILE"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "has correct action name"
|
||||
name=$(get_action_name "$ACTION_FILE")
|
||||
When call echo "$name"
|
||||
The output should equal "Action Versioning"
|
||||
End
|
||||
|
||||
It "defines expected inputs"
|
||||
When call get_action_inputs "$ACTION_FILE"
|
||||
The output should include "major-version"
|
||||
The output should include "token"
|
||||
End
|
||||
|
||||
It "defines expected outputs"
|
||||
When call get_action_outputs "$ACTION_FILE"
|
||||
The output should include "updated"
|
||||
The output should include "commit-sha"
|
||||
The output should include "needs-annual-bump"
|
||||
End
|
||||
End
|
||||
|
||||
Context "when testing input requirements"
|
||||
It "requires major-version input"
|
||||
When call is_input_required "$ACTION_FILE" "major-version"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "has token as optional input"
|
||||
When call is_input_required "$ACTION_FILE" "token"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when testing security validations"
|
||||
It "validates against path traversal in major-version"
|
||||
When call validate_input_python "action-versioning" "major-version" "v../../etc"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates against shell metacharacters in major-version"
|
||||
When call validate_input_python "action-versioning" "major-version" "v2025|echo"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates against command substitution in major-version"
|
||||
When call validate_input_python "action-versioning" "major-version" "v\$(whoami)"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates against path traversal in token"
|
||||
When call validate_input_python "action-versioning" "token" "../../../etc/passwd"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
End
|
||||
150
_tests/unit/ansible-lint-fix/validation.spec.sh
Executable file
150
_tests/unit/ansible-lint-fix/validation.spec.sh
Executable file
@@ -0,0 +1,150 @@
|
||||
#!/usr/bin/env shellspec
|
||||
# Unit tests for ansible-lint-fix action validation and logic
|
||||
|
||||
# Framework is automatically loaded via spec_helper.sh
|
||||
|
||||
Describe "ansible-lint-fix action"
|
||||
ACTION_DIR="ansible-lint-fix"
|
||||
ACTION_FILE="$ACTION_DIR/action.yml"
|
||||
|
||||
Context "when validating token input"
|
||||
It "accepts all GitHub token formats"
|
||||
When call validate_input_python "ansible-lint-fix" "token" "ghp_123456789012345678901234567890123456"
|
||||
The status should be success
|
||||
End
|
||||
It "accepts organization token"
|
||||
When call validate_input_python "ansible-lint-fix" "token" "gho_123456789012345678901234567890123456"
|
||||
The status should be success
|
||||
End
|
||||
It "accepts user token"
|
||||
When call validate_input_python "ansible-lint-fix" "token" "ghu_123456789012345678901234567890123456"
|
||||
The status should be success
|
||||
End
|
||||
It "accepts server token"
|
||||
When call validate_input_python "ansible-lint-fix" "token" "ghs_123456789012345678901234567890123456"
|
||||
The status should be success
|
||||
End
|
||||
It "accepts refresh token"
|
||||
When call validate_input_python "ansible-lint-fix" "token" "ghr_123456789012345678901234567890123456"
|
||||
The status should be success
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating email input"
|
||||
It "accepts valid email"
|
||||
When call validate_input_python "ansible-lint-fix" "email" "test@example.com"
|
||||
The status should be success
|
||||
End
|
||||
It "rejects invalid email without @"
|
||||
When call validate_input_python "ansible-lint-fix" "email" "testexample.com"
|
||||
The status should be failure
|
||||
End
|
||||
It "rejects invalid email without domain"
|
||||
When call validate_input_python "ansible-lint-fix" "email" "test@"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating username input"
|
||||
It "accepts valid username"
|
||||
When call validate_input_python "ansible-lint-fix" "username" "github-actions"
|
||||
The status should be success
|
||||
End
|
||||
It "rejects semicolon injection"
|
||||
When call validate_input_python "ansible-lint-fix" "username" "user;rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
It "rejects ampersand injection"
|
||||
When call validate_input_python "ansible-lint-fix" "username" "user&&malicious"
|
||||
The status should be failure
|
||||
End
|
||||
It "rejects pipe injection"
|
||||
When call validate_input_python "ansible-lint-fix" "username" "user|dangerous"
|
||||
The status should be failure
|
||||
End
|
||||
It "rejects overly long username"
|
||||
When call validate_input_python "ansible-lint-fix" "username" "this-username-is-definitely-too-long-for-github-maximum-length-limit"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating max-retries input"
|
||||
It "accepts valid retry count"
|
||||
When call validate_input_python "ansible-lint-fix" "max-retries" "5"
|
||||
The status should be success
|
||||
End
|
||||
It "rejects zero retries"
|
||||
When call validate_input_python "ansible-lint-fix" "max-retries" "0"
|
||||
The status should be failure
|
||||
End
|
||||
It "rejects negative retries"
|
||||
When call validate_input_python "ansible-lint-fix" "max-retries" "-1"
|
||||
The status should be failure
|
||||
End
|
||||
It "rejects retries above limit"
|
||||
When call validate_input_python "ansible-lint-fix" "max-retries" "15"
|
||||
The status should be failure
|
||||
End
|
||||
It "rejects non-numeric retries"
|
||||
When call validate_input_python "ansible-lint-fix" "max-retries" "invalid"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when checking action.yml structure"
|
||||
It "has valid YAML syntax"
|
||||
When call validate_action_yml_quiet "$ACTION_FILE"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "has correct action name"
|
||||
name=$(get_action_name "$ACTION_FILE")
|
||||
When call echo "$name"
|
||||
The output should equal "Ansible Lint and Fix"
|
||||
End
|
||||
|
||||
It "defines expected inputs"
|
||||
inputs=$(get_action_inputs "$ACTION_FILE")
|
||||
When call echo "$inputs"
|
||||
The output should include "token"
|
||||
The output should include "username"
|
||||
The output should include "email"
|
||||
The output should include "max-retries"
|
||||
End
|
||||
|
||||
It "defines expected outputs"
|
||||
outputs=$(get_action_outputs "$ACTION_FILE")
|
||||
When call echo "$outputs"
|
||||
The output should include "files_changed"
|
||||
The output should include "lint_status"
|
||||
The output should include "sarif_path"
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating security"
|
||||
It "rejects command injection in token"
|
||||
When call validate_input_python "ansible-lint-fix" "token" "ghp_123;rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects command injection in email"
|
||||
When call validate_input_python "ansible-lint-fix" "email" "user@domain.com;rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates all inputs for injection patterns"
|
||||
# Username injection testing already covered above
|
||||
When call validate_input_python "ansible-lint-fix" "max-retries" "3;malicious"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when testing outputs"
|
||||
It "produces all expected outputs consistently"
|
||||
When call test_action_outputs "$ACTION_DIR" "token" "ghp_123456789012345678901234567890123456" "username" "github-actions" "email" "test@example.com" "max-retries" "3"
|
||||
The status should be success
|
||||
The stderr should include "Testing action outputs for: ansible-lint-fix"
|
||||
The stderr should include "Output test passed for: ansible-lint-fix"
|
||||
End
|
||||
End
|
||||
End
|
||||
230
_tests/unit/biome-lint/validation.spec.sh
Executable file
230
_tests/unit/biome-lint/validation.spec.sh
Executable file
@@ -0,0 +1,230 @@
|
||||
#!/usr/bin/env shellspec
|
||||
# Unit tests for biome-lint action validation and logic
|
||||
# Framework is automatically loaded via spec_helper.sh
|
||||
|
||||
Describe "biome-lint action"
|
||||
ACTION_DIR="biome-lint"
|
||||
ACTION_FILE="$ACTION_DIR/action.yml"
|
||||
|
||||
Context "when validating mode input"
|
||||
It "accepts check mode"
|
||||
When call validate_input_python "biome-lint" "mode" "check"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts fix mode"
|
||||
When call validate_input_python "biome-lint" "mode" "fix"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts empty mode (uses default)"
|
||||
When call validate_input_python "biome-lint" "mode" ""
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects invalid mode"
|
||||
When call validate_input_python "biome-lint" "mode" "invalid"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects mode with command injection"
|
||||
When call validate_input_python "biome-lint" "mode" "check; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating token input"
|
||||
It "accepts valid GitHub token (classic)"
|
||||
When call validate_input_python "biome-lint" "token" "ghp_123456789012345678901234567890123456"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts valid GitHub fine-grained token"
|
||||
When call validate_input_python "biome-lint" "token" "github_pat_1234567890123456789012345678901234567890123456789012345678901234567890a"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts empty token (optional)"
|
||||
When call validate_input_python "biome-lint" "token" ""
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects invalid token format"
|
||||
When call validate_input_python "biome-lint" "token" "invalid-token"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects token with command injection"
|
||||
When call validate_input_python "biome-lint" "token" "ghp_123456789012345678901234567890123456; echo"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating username input"
|
||||
It "accepts valid username"
|
||||
When call validate_input_python "biome-lint" "username" "github-actions"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts username with hyphens"
|
||||
When call validate_input_python "biome-lint" "username" "my-bot-user"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts empty username (uses default)"
|
||||
When call validate_input_python "biome-lint" "username" ""
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects username with command injection"
|
||||
When call validate_input_python "biome-lint" "username" "user; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating email input"
|
||||
It "accepts valid email"
|
||||
When call validate_input_python "biome-lint" "email" "github-actions@github.com"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts email with plus sign"
|
||||
When call validate_input_python "biome-lint" "email" "user+bot@example.com"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts email with subdomain"
|
||||
When call validate_input_python "biome-lint" "email" "bot@ci.example.com"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts empty email (uses default)"
|
||||
When call validate_input_python "biome-lint" "email" ""
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects invalid email format"
|
||||
When call validate_input_python "biome-lint" "email" "not-an-email"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects email with command injection"
|
||||
When call validate_input_python "biome-lint" "email" "user@example.com; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating max-retries input"
|
||||
It "accepts valid retry count (default)"
|
||||
When call validate_input_python "biome-lint" "max-retries" "3"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts retry count of 1"
|
||||
When call validate_input_python "biome-lint" "max-retries" "1"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts retry count of 10"
|
||||
When call validate_input_python "biome-lint" "max-retries" "10"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts empty max-retries (uses default)"
|
||||
When call validate_input_python "biome-lint" "max-retries" ""
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects negative retry count"
|
||||
When call validate_input_python "biome-lint" "max-retries" "-1"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects non-numeric retry count"
|
||||
When call validate_input_python "biome-lint" "max-retries" "abc"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects retry count with command injection"
|
||||
When call validate_input_python "biome-lint" "max-retries" "3; echo"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating fail-on-error input"
|
||||
It "accepts true"
|
||||
When call validate_input_python "biome-lint" "fail-on-error" "true"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts false"
|
||||
When call validate_input_python "biome-lint" "fail-on-error" "false"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts empty (uses default)"
|
||||
When call validate_input_python "biome-lint" "fail-on-error" ""
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects invalid boolean value"
|
||||
When call validate_input_python "biome-lint" "fail-on-error" "maybe"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when checking action.yml structure"
|
||||
It "has valid YAML syntax"
|
||||
When call validate_action_yml_quiet "$ACTION_FILE"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "has correct action name"
|
||||
name=$(get_action_name "$ACTION_FILE")
|
||||
When call echo "$name"
|
||||
The output should equal "Biome Lint"
|
||||
End
|
||||
|
||||
It "defines expected inputs"
|
||||
When call get_action_inputs "$ACTION_FILE"
|
||||
The output should include "mode"
|
||||
The output should include "token"
|
||||
The output should include "username"
|
||||
The output should include "email"
|
||||
The output should include "max-retries"
|
||||
The output should include "fail-on-error"
|
||||
End
|
||||
|
||||
It "defines expected outputs"
|
||||
When call get_action_outputs "$ACTION_FILE"
|
||||
The output should include "status"
|
||||
The output should include "errors_count"
|
||||
The output should include "warnings_count"
|
||||
The output should include "files_changed"
|
||||
End
|
||||
End
|
||||
|
||||
Context "when testing input requirements"
|
||||
It "has all inputs as optional (with defaults)"
|
||||
When call is_input_required "$ACTION_FILE" "mode"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when testing security validations"
|
||||
It "validates against path traversal"
|
||||
When call validate_input_python "biome-lint" "username" "../../../etc"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates against shell metacharacters"
|
||||
When call validate_input_python "biome-lint" "email" "user@example.com|echo"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates against command substitution"
|
||||
When call validate_input_python "biome-lint" "username" "\$(whoami)"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
End
|
||||
377
_tests/unit/codeql-analysis/validation.spec.sh
Executable file
377
_tests/unit/codeql-analysis/validation.spec.sh
Executable file
@@ -0,0 +1,377 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
Describe "codeql-analysis validation"
|
||||
Include "_tests/unit/spec_helper.sh"
|
||||
|
||||
Describe "language validation"
|
||||
It "validates javascript language"
|
||||
When call validate_input_python "codeql-analysis" "language" "javascript"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "validates typescript language"
|
||||
When call validate_input_python "codeql-analysis" "language" "typescript"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "validates python language"
|
||||
When call validate_input_python "codeql-analysis" "language" "python"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "validates java language"
|
||||
When call validate_input_python "codeql-analysis" "language" "java"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "validates csharp language"
|
||||
When call validate_input_python "codeql-analysis" "language" "csharp"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "validates cpp language"
|
||||
When call validate_input_python "codeql-analysis" "language" "cpp"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "validates c language"
|
||||
When call validate_input_python "codeql-analysis" "language" "c"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "validates go language"
|
||||
When call validate_input_python "codeql-analysis" "language" "go"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "validates ruby language"
|
||||
When call validate_input_python "codeql-analysis" "language" "ruby"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "validates swift language"
|
||||
When call validate_input_python "codeql-analysis" "language" "swift"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "validates kotlin language"
|
||||
When call validate_input_python "codeql-analysis" "language" "kotlin"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "validates actions language"
|
||||
When call validate_input_python "codeql-analysis" "language" "actions"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "validates case insensitive languages"
|
||||
When call validate_input_python "codeql-analysis" "language" "JavaScript"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects invalid language"
|
||||
When call validate_input_python "codeql-analysis" "language" "invalid-lang"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects empty language"
|
||||
When call validate_input_python "codeql-analysis" "language" ""
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects unsupported language"
|
||||
When call validate_input_python "codeql-analysis" "language" "rust"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Describe "queries validation"
|
||||
It "validates security-extended queries"
|
||||
When call validate_input_python "codeql-analysis" "queries" "security-extended"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "validates security-and-quality queries"
|
||||
When call validate_input_python "codeql-analysis" "queries" "security-and-quality"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "validates code-scanning queries"
|
||||
When call validate_input_python "codeql-analysis" "queries" "code-scanning"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "validates default queries"
|
||||
When call validate_input_python "codeql-analysis" "queries" "default"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "validates case insensitive queries"
|
||||
When call validate_input_python "codeql-analysis" "queries" "Security-Extended"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "validates custom query file with .ql extension"
|
||||
When call validate_input_python "codeql-analysis" "queries" "custom-queries.ql"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "validates custom query suite with .qls extension"
|
||||
When call validate_input_python "codeql-analysis" "queries" "my-suite.qls"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "validates custom query file with path"
|
||||
When call validate_input_python "codeql-analysis" "queries" ".github/codeql/custom.ql"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects invalid query suite"
|
||||
When call validate_input_python "codeql-analysis" "queries" "invalid-suite"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects empty queries"
|
||||
When call validate_input_python "codeql-analysis" "queries" ""
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Describe "category validation"
|
||||
It "validates proper category format"
|
||||
When call validate_input_python "codeql-analysis" "category" "/language:javascript"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "validates custom category"
|
||||
When call validate_input_python "codeql-analysis" "category" "/custom/analysis"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "validates category with underscores"
|
||||
When call validate_input_python "codeql-analysis" "category" "/my_custom_category"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "validates category with hyphens"
|
||||
When call validate_input_python "codeql-analysis" "category" "/my-custom-category"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "validates category with colons"
|
||||
When call validate_input_python "codeql-analysis" "category" "/language:python:custom"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "validates empty category (optional)"
|
||||
When call validate_input_python "codeql-analysis" "category" ""
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects category without leading slash"
|
||||
When call validate_input_python "codeql-analysis" "category" "language:javascript"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects category with invalid characters"
|
||||
When call validate_input_python "codeql-analysis" "category" "/language@javascript"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects category with spaces"
|
||||
When call validate_input_python "codeql-analysis" "category" "/language javascript"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Describe "config-file validation"
|
||||
It "validates valid config file path"
|
||||
When call validate_input_python "codeql-analysis" "config-file" ".github/codeql/config.yml"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "validates relative config file path"
|
||||
When call validate_input_python "codeql-analysis" "config-file" "codeql-config.yml"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "validates empty config file (optional)"
|
||||
When call validate_input_python "codeql-analysis" "config-file" ""
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects absolute path"
|
||||
When call validate_input_python "codeql-analysis" "config-file" "/etc/config.yml"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects path traversal"
|
||||
When call validate_input_python "codeql-analysis" "config-file" "../config.yml"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Describe "checkout-ref validation"
|
||||
It "validates main branch"
|
||||
When call validate_input_python "codeql-analysis" "checkout-ref" "main"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "validates feature branch"
|
||||
When call validate_input_python "codeql-analysis" "checkout-ref" "feature/security-updates"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "validates commit SHA"
|
||||
When call validate_input_python "codeql-analysis" "checkout-ref" "abc123def456"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "validates tag"
|
||||
When call validate_input_python "codeql-analysis" "checkout-ref" "v1.2.3"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "validates empty checkout-ref (optional)"
|
||||
When call validate_input_python "codeql-analysis" "checkout-ref" ""
|
||||
The status should be success
|
||||
End
|
||||
End
|
||||
|
||||
Describe "token validation"
|
||||
It "validates classic GitHub token"
|
||||
When call validate_input_python "codeql-analysis" "token" "ghp_1234567890abcdef1234567890abcdef1234"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "validates fine-grained token"
|
||||
When call validate_input_python "codeql-analysis" "token" "github_pat_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "validates installation token"
|
||||
When call validate_input_python "codeql-analysis" "token" "ghs_1234567890abcdef1234567890abcdef1234"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects invalid token format"
|
||||
When call validate_input_python "codeql-analysis" "token" "invalid-token"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects empty token"
|
||||
When call validate_input_python "codeql-analysis" "token" ""
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Describe "working-directory validation"
|
||||
It "validates current directory"
|
||||
When call validate_input_python "codeql-analysis" "working-directory" "."
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "validates relative directory"
|
||||
When call validate_input_python "codeql-analysis" "working-directory" "src"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "validates nested directory"
|
||||
When call validate_input_python "codeql-analysis" "working-directory" "backend/src"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects absolute path"
|
||||
When call validate_input_python "codeql-analysis" "working-directory" "/home/user/project"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects path traversal"
|
||||
When call validate_input_python "codeql-analysis" "working-directory" "../other-project"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Describe "upload-results validation"
|
||||
It "validates true value"
|
||||
When call validate_input_python "codeql-analysis" "upload-results" "true"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "validates false value"
|
||||
When call validate_input_python "codeql-analysis" "upload-results" "false"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects uppercase TRUE"
|
||||
When call validate_input_python "codeql-analysis" "upload-results" "TRUE"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects uppercase FALSE"
|
||||
When call validate_input_python "codeql-analysis" "upload-results" "FALSE"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects invalid boolean"
|
||||
When call validate_input_python "codeql-analysis" "upload-results" "yes"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects empty value"
|
||||
When call validate_input_python "codeql-analysis" "upload-results" ""
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Describe "complete action validation"
|
||||
It "validates all required inputs with minimal config"
|
||||
# Set up environment for the validation
|
||||
export INPUT_ACTION_TYPE="codeql-analysis"
|
||||
export INPUT_LANGUAGE="javascript"
|
||||
|
||||
When call uv run validate-inputs/validator.py
|
||||
The status should be success
|
||||
The stderr should include "All input validation checks passed"
|
||||
End
|
||||
|
||||
It "validates all inputs with full config"
|
||||
# Set up environment for the validation
|
||||
export INPUT_ACTION_TYPE="codeql-analysis"
|
||||
export INPUT_LANGUAGE="python"
|
||||
export INPUT_QUERIES="security-extended"
|
||||
export INPUT_CONFIG_FILE=".github/codeql/config.yml"
|
||||
export INPUT_CATEGORY="/custom/python-analysis"
|
||||
export INPUT_CHECKOUT_REF="main"
|
||||
export INPUT_TOKEN="ghp_1234567890abcdef1234567890abcdef1234"
|
||||
export INPUT_WORKING_DIRECTORY="backend"
|
||||
export INPUT_UPLOAD_RESULTS="true"
|
||||
|
||||
When call uv run validate-inputs/validator.py
|
||||
The status should be success
|
||||
The stderr should include "All input validation checks passed"
|
||||
End
|
||||
|
||||
It "fails validation with missing required language"
|
||||
# Set up environment for the validation
|
||||
export INPUT_ACTION_TYPE="codeql-analysis"
|
||||
unset INPUT_LANGUAGE
|
||||
|
||||
When call uv run validate-inputs/validator.py
|
||||
The status should be failure
|
||||
The stderr should include "Required input 'language' is missing"
|
||||
End
|
||||
|
||||
It "fails validation with invalid language and queries"
|
||||
# Set up environment for the validation
|
||||
export INPUT_ACTION_TYPE="codeql-analysis"
|
||||
export INPUT_LANGUAGE="invalid-lang"
|
||||
export INPUT_QUERIES="invalid-suite"
|
||||
|
||||
When call uv run validate-inputs/validator.py
|
||||
The status should be failure
|
||||
The stderr should include "Unsupported CodeQL language"
|
||||
The stderr should include "Invalid CodeQL query suite"
|
||||
End
|
||||
End
|
||||
End
|
||||
52
_tests/unit/compress-images/validation.spec.sh
Executable file
52
_tests/unit/compress-images/validation.spec.sh
Executable file
@@ -0,0 +1,52 @@
|
||||
#!/usr/bin/env shellspec
|
||||
# Unit tests for compress-images action validation and logic
|
||||
|
||||
# Framework is automatically loaded via spec_helper.sh
|
||||
|
||||
Describe "compress-images action"
|
||||
ACTION_DIR="compress-images"
|
||||
ACTION_FILE="$ACTION_DIR/action.yml"
|
||||
|
||||
Context "when validating inputs"
|
||||
It "accepts valid quality setting"
|
||||
# pick one of the defined quality inputs
|
||||
inputs="$(get_action_inputs "$ACTION_FILE")"
|
||||
QUALITY_INPUT=$(echo "$inputs" | grep -E '^(image-quality|png-quality)$' | head -n1)
|
||||
[ -z "$QUALITY_INPUT" ] && Skip "No quality input found in action.yml"
|
||||
When call validate_input_python "compress-images" "$QUALITY_INPUT" "80"
|
||||
The status should be success
|
||||
End
|
||||
It "rejects invalid quality"
|
||||
# pick one of the defined quality inputs
|
||||
inputs="$(get_action_inputs "$ACTION_FILE")"
|
||||
QUALITY_INPUT=$(echo "$inputs" | grep -E '^(image-quality|png-quality)$' | head -n1)
|
||||
[ -z "$QUALITY_INPUT" ] && Skip "No quality input found in action.yml"
|
||||
When call validate_input_python "compress-images" "$QUALITY_INPUT" "150"
|
||||
The status should be failure
|
||||
End
|
||||
It "accepts valid path pattern"
|
||||
# use the defined path-filter input
|
||||
PATH_INPUT="ignore-paths"
|
||||
When call validate_input_python "compress-images" "$PATH_INPUT" "assets/**/*.{jpg,png}"
|
||||
The status should be success
|
||||
End
|
||||
It "rejects injection in path"
|
||||
# use the defined path-filter input
|
||||
PATH_INPUT="ignore-paths"
|
||||
When call validate_input_python "compress-images" "$PATH_INPUT" "images;rm -rf /tmp"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when checking action.yml structure"
|
||||
It "has valid YAML syntax"
|
||||
When call validate_action_yml_quiet "$ACTION_FILE"
|
||||
The status should be success
|
||||
End
|
||||
It "has correct action name"
|
||||
name=$(get_action_name "$ACTION_FILE")
|
||||
When call echo "$name"
|
||||
The output should match pattern "*Compress*"
|
||||
End
|
||||
End
|
||||
End
|
||||
81
_tests/unit/csharp-build/validation.spec.sh
Executable file
81
_tests/unit/csharp-build/validation.spec.sh
Executable file
@@ -0,0 +1,81 @@
|
||||
#!/usr/bin/env shellspec
|
||||
# Unit tests for csharp-build action validation and logic
|
||||
# Framework is automatically loaded via spec_helper.sh
|
||||
|
||||
Describe "csharp-build action"
|
||||
ACTION_DIR="csharp-build"
|
||||
ACTION_FILE="$ACTION_DIR/action.yml"
|
||||
|
||||
Context "when validating dotnet-version input"
|
||||
It "accepts valid dotnet version"
|
||||
When call validate_input_python "csharp-build" "dotnet-version" "8.0"
|
||||
The status should be success
|
||||
End
|
||||
It "accepts dotnet 6 LTS"
|
||||
When call validate_input_python "csharp-build" "dotnet-version" "6.0"
|
||||
The status should be success
|
||||
End
|
||||
It "rejects invalid version"
|
||||
When call validate_input_python "csharp-build" "dotnet-version" "invalid"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating max-retries input"
|
||||
It "accepts valid max-retries"
|
||||
When call validate_input_python "csharp-build" "max-retries" "3"
|
||||
The status should be success
|
||||
End
|
||||
It "accepts minimum retries"
|
||||
When call validate_input_python "csharp-build" "max-retries" "1"
|
||||
The status should be success
|
||||
End
|
||||
It "rejects zero retries"
|
||||
When call validate_input_python "csharp-build" "max-retries" "0"
|
||||
The status should be failure
|
||||
End
|
||||
It "rejects non-numeric retries"
|
||||
When call validate_input_python "csharp-build" "max-retries" "invalid"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when checking action.yml structure"
|
||||
It "has valid YAML syntax"
|
||||
When call validate_action_yml_quiet "$ACTION_FILE"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "has correct action name"
|
||||
name=$(get_action_name "$ACTION_FILE")
|
||||
When call echo "$name"
|
||||
The output should match pattern "*C#*"
|
||||
End
|
||||
|
||||
It "defines expected inputs"
|
||||
inputs=$(get_action_inputs "$ACTION_FILE")
|
||||
When call echo "$inputs"
|
||||
The output should include "dotnet-version"
|
||||
The output should include "max-retries"
|
||||
End
|
||||
|
||||
It "defines expected outputs"
|
||||
outputs=$(get_action_outputs "$ACTION_FILE")
|
||||
When call echo "$outputs"
|
||||
The output should include "build_status"
|
||||
The output should include "test_status"
|
||||
The output should include "dotnet_version"
|
||||
The output should include "artifacts_path"
|
||||
The output should include "test_results_path"
|
||||
End
|
||||
End
|
||||
|
||||
Context "when testing outputs"
|
||||
It "produces all expected outputs consistently"
|
||||
When call test_action_outputs "$ACTION_DIR" "dotnet-version" "8.0" "max-retries" "3"
|
||||
The status should be success
|
||||
The stderr should include "Testing action outputs for: csharp-build"
|
||||
The stderr should include "Output test passed for: csharp-build"
|
||||
End
|
||||
End
|
||||
End
|
||||
36
_tests/unit/csharp-lint-check/validation.spec.sh
Executable file
36
_tests/unit/csharp-lint-check/validation.spec.sh
Executable file
@@ -0,0 +1,36 @@
|
||||
#!/usr/bin/env shellspec
|
||||
# Unit tests for csharp-lint-check action validation and logic
|
||||
|
||||
# Framework is automatically loaded via spec_helper.sh
|
||||
|
||||
Describe "csharp-lint-check action"
|
||||
ACTION_DIR="csharp-lint-check"
|
||||
ACTION_FILE="$ACTION_DIR/action.yml"
|
||||
|
||||
Context "when validating inputs"
|
||||
It "accepts valid dotnet version"
|
||||
When call validate_input_python "csharp-lint-check" "dotnet-version" "8.0"
|
||||
The status should be success
|
||||
End
|
||||
It "accepts valid dotnet version format"
|
||||
When call validate_input_python "csharp-lint-check" "dotnet-version" "8.0.100"
|
||||
The status should be success
|
||||
End
|
||||
It "rejects injection"
|
||||
When call validate_input_python "csharp-lint-check" "dotnet-version" "8.0;malicious"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when checking action.yml structure"
|
||||
It "has valid YAML syntax"
|
||||
When call validate_action_yml_quiet "$ACTION_FILE"
|
||||
The status should be success
|
||||
End
|
||||
It "has correct action name"
|
||||
name=$(get_action_name "$ACTION_FILE")
|
||||
When call echo "$name"
|
||||
The output should match pattern "*C#*"
|
||||
End
|
||||
End
|
||||
End
|
||||
52
_tests/unit/csharp-publish/validation.spec.sh
Executable file
52
_tests/unit/csharp-publish/validation.spec.sh
Executable file
@@ -0,0 +1,52 @@
|
||||
#!/usr/bin/env shellspec
|
||||
# Unit tests for csharp-publish action validation and logic
|
||||
|
||||
# Framework is automatically loaded via spec_helper.sh
|
||||
|
||||
Describe "csharp-publish action"
|
||||
ACTION_DIR="csharp-publish"
|
||||
ACTION_FILE="$ACTION_DIR/action.yml"
|
||||
|
||||
Context "when validating inputs"
|
||||
It "accepts valid dotnet version"
|
||||
When call validate_input_python "csharp-publish" "dotnet-version" "8.0"
|
||||
The status should be success
|
||||
End
|
||||
It "accepts valid namespace"
|
||||
When call validate_input_python "csharp-publish" "namespace" "ivuorinen"
|
||||
The status should be success
|
||||
End
|
||||
It "accepts namespace with hyphens in middle"
|
||||
When call validate_input_python "csharp-publish" "namespace" "my-org-name"
|
||||
The status should be success
|
||||
End
|
||||
It "rejects namespace ending with hyphen"
|
||||
When call validate_input_python "csharp-publish" "namespace" "invalid-"
|
||||
The status should be failure
|
||||
End
|
||||
It "accepts valid GitHub token"
|
||||
When call validate_input_python "csharp-publish" "token" "ghp_123456789012345678901234567890123456"
|
||||
The status should be success
|
||||
End
|
||||
It "rejects injection in namespace"
|
||||
When call validate_input_python "csharp-publish" "namespace" "invalid;malicious"
|
||||
The status should be failure
|
||||
End
|
||||
It "rejects injection in token"
|
||||
When call validate_input_python "csharp-publish" "token" "token;rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when checking action.yml structure"
|
||||
It "has valid YAML syntax"
|
||||
When call validate_action_yml_quiet "$ACTION_FILE"
|
||||
The status should be success
|
||||
End
|
||||
It "has correct action name"
|
||||
name=$(get_action_name "$ACTION_FILE")
|
||||
When call echo "$name"
|
||||
The output should match pattern "*C#*"
|
||||
End
|
||||
End
|
||||
End
|
||||
218
_tests/unit/docker-build/validation.spec.sh
Executable file
218
_tests/unit/docker-build/validation.spec.sh
Executable file
@@ -0,0 +1,218 @@
|
||||
#!/usr/bin/env shellspec
|
||||
# Unit tests for docker-build action validation and logic
|
||||
# Framework is automatically loaded via spec_helper.sh
|
||||
|
||||
Describe "docker-build action"
|
||||
ACTION_DIR="docker-build"
|
||||
ACTION_FILE="$ACTION_DIR/action.yml"
|
||||
|
||||
Context "when validating image-name input"
|
||||
It "accepts valid image name"
|
||||
When call validate_input_python "docker-build" "image-name" "myapp"
|
||||
The status should be success
|
||||
End
|
||||
It "accepts image name with registry prefix"
|
||||
When call validate_input_python "docker-build" "image-name" "registry.example.com/myapp"
|
||||
The status should be success
|
||||
End
|
||||
It "rejects command injection in image name"
|
||||
When call validate_input_python "docker-build" "image-name" "app; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating tag input"
|
||||
It "accepts valid tag format"
|
||||
When call validate_input_python "docker-build" "tag" "v1.0.0"
|
||||
The status should be success
|
||||
End
|
||||
It "accepts semantic version tag"
|
||||
When call validate_input_python "docker-build" "tag" "1.2.3"
|
||||
The status should be success
|
||||
End
|
||||
It "accepts latest tag"
|
||||
When call validate_input_python "docker-build" "tag" "latest"
|
||||
The status should be success
|
||||
End
|
||||
It "rejects invalid tag format"
|
||||
When call validate_input_python "docker-build" "tag" "invalid_tag!"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating architectures input"
|
||||
It "accepts valid architectures list"
|
||||
When call validate_input_python "docker-build" "architectures" "linux/amd64,linux/arm64"
|
||||
The status should be success
|
||||
End
|
||||
It "accepts single architecture"
|
||||
When call validate_input_python "docker-build" "architectures" "linux/amd64"
|
||||
The status should be success
|
||||
End
|
||||
It "accepts ARM variants"
|
||||
When call validate_input_python "docker-build" "architectures" "linux/arm/v7,linux/arm/v6"
|
||||
The status should be success
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating dockerfile input"
|
||||
It "accepts valid dockerfile path"
|
||||
When call validate_input_python "docker-build" "dockerfile" "Dockerfile"
|
||||
The status should be success
|
||||
End
|
||||
It "accepts custom dockerfile path"
|
||||
When call validate_input_python "docker-build" "dockerfile" "docker/Dockerfile.prod"
|
||||
The status should be success
|
||||
End
|
||||
It "rejects malicious dockerfile path"
|
||||
When call validate_input_python "docker-build" "dockerfile" "../../../etc/passwd"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating context input"
|
||||
It "accepts valid build context"
|
||||
When call validate_input_python "docker-build" "context" "."
|
||||
The status should be success
|
||||
End
|
||||
It "accepts relative context path"
|
||||
When call validate_input_python "docker-build" "context" "src/app"
|
||||
The status should be success
|
||||
End
|
||||
It "accepts path traversal in context (no validation in action)"
|
||||
When call validate_input_python "docker-build" "context" "../../../etc"
|
||||
The status should be success
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating build-args input"
|
||||
It "accepts valid build args format"
|
||||
When call validate_input_python "docker-build" "build-args" "NODE_ENV=production,VERSION=1.0.0"
|
||||
The status should be success
|
||||
End
|
||||
It "accepts empty build args"
|
||||
When call validate_input_python "docker-build" "build-args" ""
|
||||
The status should be success
|
||||
End
|
||||
It "rejects malicious build args"
|
||||
When call validate_input_python "docker-build" "build-args" "ARG=\$(rm -rf /)"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating cache inputs"
|
||||
It "accepts valid cache mode"
|
||||
When call validate_input_python "docker-build" "cache-mode" "max"
|
||||
The status should be success
|
||||
End
|
||||
It "accepts min cache mode"
|
||||
When call validate_input_python "docker-build" "cache-mode" "min"
|
||||
The status should be success
|
||||
End
|
||||
It "accepts inline cache mode"
|
||||
When call validate_input_python "docker-build" "cache-mode" "inline"
|
||||
The status should be success
|
||||
End
|
||||
It "rejects invalid cache mode"
|
||||
When call validate_input_python "docker-build" "cache-mode" "invalid"
|
||||
The status should be failure
|
||||
End
|
||||
It "accepts valid cache-from format"
|
||||
When call validate_input_python "docker-build" "cache-from" "type=registry,ref=myapp:cache"
|
||||
The status should be success
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating security features"
|
||||
It "accepts scan-image boolean"
|
||||
When call validate_input_python "docker-build" "scan-image" "true"
|
||||
The status should be success
|
||||
End
|
||||
It "accepts sign-image boolean"
|
||||
When call validate_input_python "docker-build" "sign-image" "false"
|
||||
The status should be success
|
||||
End
|
||||
It "accepts valid SBOM format"
|
||||
When call validate_input_python "docker-build" "sbom-format" "spdx-json"
|
||||
The status should be success
|
||||
End
|
||||
It "accepts cyclonedx SBOM format"
|
||||
When call validate_input_python "docker-build" "sbom-format" "cyclonedx-json"
|
||||
The status should be success
|
||||
End
|
||||
It "rejects invalid SBOM format"
|
||||
When call validate_input_python "docker-build" "sbom-format" "invalid-format"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating performance options"
|
||||
It "accepts valid parallel builds number"
|
||||
When call validate_input_python "docker-build" "parallel-builds" "4"
|
||||
The status should be success
|
||||
End
|
||||
It "accepts auto parallel builds"
|
||||
When call validate_input_python "docker-build" "parallel-builds" "0"
|
||||
The status should be success
|
||||
End
|
||||
It "rejects negative parallel builds"
|
||||
When call validate_input_python "docker-build" "parallel-builds" "-1"
|
||||
The status should be failure
|
||||
End
|
||||
It "rejects non-numeric parallel builds"
|
||||
When call validate_input_python "docker-build" "parallel-builds" "not-a-number"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when checking action.yml structure"
|
||||
It "has valid YAML syntax"
|
||||
When call validate_action_yml_quiet "$ACTION_FILE"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "has correct action name"
|
||||
When call get_action_name "$ACTION_FILE"
|
||||
The output should match pattern "*Docker*"
|
||||
End
|
||||
|
||||
It "defines all required inputs"
|
||||
When call get_action_inputs "$ACTION_FILE"
|
||||
The output should include "tag"
|
||||
End
|
||||
|
||||
It "defines all expected outputs"
|
||||
When call get_action_outputs "$ACTION_FILE"
|
||||
The output should include "image-digest"
|
||||
The output should include "metadata"
|
||||
The output should include "platforms"
|
||||
The output should include "build-time"
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating security"
|
||||
It "rejects injection in all Docker inputs"
|
||||
When call validate_input_python "docker-build" "tag" "v1.0.0;rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates buildx version safely"
|
||||
When call validate_input_python "docker-build" "buildx-version" "0.12.0"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects malicious buildx version"
|
||||
When call validate_input_python "docker-build" "buildx-version" "0.12;malicious"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when testing outputs"
|
||||
It "produces all expected outputs consistently"
|
||||
When call test_action_outputs "$ACTION_DIR" "tag" "v1.0.0" "dockerfile" "Dockerfile"
|
||||
The status should be success
|
||||
The stderr should include "Testing action outputs for: docker-build"
|
||||
The stderr should include "Output test passed for: docker-build"
|
||||
End
|
||||
End
|
||||
End
|
||||
48
_tests/unit/docker-publish/validation.spec.sh
Executable file
48
_tests/unit/docker-publish/validation.spec.sh
Executable file
@@ -0,0 +1,48 @@
|
||||
#!/usr/bin/env shellspec
|
||||
# Unit tests for docker-publish action validation and logic
|
||||
|
||||
# Framework is automatically loaded via spec_helper.sh
|
||||
|
||||
Describe "docker-publish action"
|
||||
ACTION_DIR="docker-publish"
|
||||
ACTION_FILE="$ACTION_DIR/action.yml"
|
||||
|
||||
Context "when validating inputs"
|
||||
It "accepts valid registry"
|
||||
When call validate_input_python "docker-publish" "registry" "dockerhub"
|
||||
The status should be success
|
||||
End
|
||||
It "accepts github registry"
|
||||
When call validate_input_python "docker-publish" "registry" "github"
|
||||
The status should be success
|
||||
End
|
||||
It "accepts both registry"
|
||||
When call validate_input_python "docker-publish" "registry" "both"
|
||||
The status should be success
|
||||
End
|
||||
It "rejects empty registry input"
|
||||
When call validate_input_python "docker-publish" "registry" ""
|
||||
The status should be failure
|
||||
End
|
||||
It "accepts boolean values for nightly"
|
||||
When call validate_input_python "docker-publish" "nightly" "true"
|
||||
The status should be success
|
||||
End
|
||||
It "accepts valid platforms format"
|
||||
When call validate_input_python "docker-publish" "platforms" "linux/amd64,linux/arm64"
|
||||
The status should be success
|
||||
End
|
||||
End
|
||||
|
||||
Context "when checking action.yml structure"
|
||||
It "has valid YAML syntax"
|
||||
When call validate_action_yml_quiet "$ACTION_FILE"
|
||||
The status should be success
|
||||
End
|
||||
It "has correct action name"
|
||||
name=$(get_action_name "$ACTION_FILE")
|
||||
When call echo "$name"
|
||||
The output should match pattern "*Docker*"
|
||||
End
|
||||
End
|
||||
End
|
||||
527
_tests/unit/eslint-lint/validation.spec.sh
Executable file
527
_tests/unit/eslint-lint/validation.spec.sh
Executable file
@@ -0,0 +1,527 @@
|
||||
#!/usr/bin/env shellspec
|
||||
# Unit tests for eslint-lint action validation and logic
|
||||
# Framework is automatically loaded via spec_helper.sh
|
||||
|
||||
Describe "eslint-lint action"
|
||||
ACTION_DIR="eslint-lint"
|
||||
ACTION_FILE="$ACTION_DIR/action.yml"
|
||||
|
||||
Context "when validating mode input"
|
||||
It "accepts check mode"
|
||||
When call validate_input_python "eslint-lint" "mode" "check"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts fix mode"
|
||||
When call validate_input_python "eslint-lint" "mode" "fix"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts empty mode (uses default)"
|
||||
When call validate_input_python "eslint-lint" "mode" ""
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects invalid mode"
|
||||
When call validate_input_python "eslint-lint" "mode" "invalid"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects mode with command injection"
|
||||
When call validate_input_python "eslint-lint" "mode" "check; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating working-directory input"
|
||||
It "accepts default directory"
|
||||
When call validate_input_python "eslint-lint" "working-directory" "."
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts valid subdirectory"
|
||||
When call validate_input_python "eslint-lint" "working-directory" "src"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts empty working-directory (uses default)"
|
||||
When call validate_input_python "eslint-lint" "working-directory" ""
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects path traversal"
|
||||
When call validate_input_python "eslint-lint" "working-directory" "../../../etc"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects directory with command injection"
|
||||
When call validate_input_python "eslint-lint" "working-directory" "src; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating eslint-version input"
|
||||
It "accepts latest version"
|
||||
When call validate_input_python "eslint-lint" "eslint-version" "latest"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts semantic version"
|
||||
When call validate_input_python "eslint-lint" "eslint-version" "8.57.0"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts major.minor version"
|
||||
When call validate_input_python "eslint-lint" "eslint-version" "8.57"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts major version"
|
||||
When call validate_input_python "eslint-lint" "eslint-version" "8"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts version with pre-release"
|
||||
When call validate_input_python "eslint-lint" "eslint-version" "9.0.0-beta.1"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts empty version (uses default)"
|
||||
When call validate_input_python "eslint-lint" "eslint-version" ""
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects invalid version format"
|
||||
When call validate_input_python "eslint-lint" "eslint-version" "invalid"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects version with command injection"
|
||||
When call validate_input_python "eslint-lint" "eslint-version" "8.57.0; echo"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating config-file input"
|
||||
It "accepts default config file"
|
||||
When call validate_input_python "eslint-lint" "config-file" ".eslintrc"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts custom config file"
|
||||
When call validate_input_python "eslint-lint" "config-file" ".eslintrc.js"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts config file in subdirectory"
|
||||
When call validate_input_python "eslint-lint" "config-file" "config/eslint.config.js"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts empty config-file (uses default)"
|
||||
When call validate_input_python "eslint-lint" "config-file" ""
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects config file with path traversal"
|
||||
When call validate_input_python "eslint-lint" "config-file" "../../../.eslintrc"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects config file with command injection"
|
||||
When call validate_input_python "eslint-lint" "config-file" ".eslintrc; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating ignore-file input"
|
||||
It "accepts default ignore file"
|
||||
When call validate_input_python "eslint-lint" "ignore-file" ".eslintignore"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts custom ignore file"
|
||||
When call validate_input_python "eslint-lint" "ignore-file" ".eslintignore.custom"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts empty ignore-file (uses default)"
|
||||
When call validate_input_python "eslint-lint" "ignore-file" ""
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects ignore file with path traversal"
|
||||
When call validate_input_python "eslint-lint" "ignore-file" "../../../.eslintignore"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects ignore file with command injection"
|
||||
When call validate_input_python "eslint-lint" "ignore-file" ".eslintignore; echo"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating file-extensions input"
|
||||
It "accepts default extensions"
|
||||
When call validate_input_python "eslint-lint" "file-extensions" ".js,.jsx,.ts,.tsx"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts single extension"
|
||||
When call validate_input_python "eslint-lint" "file-extensions" ".js"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts multiple extensions"
|
||||
When call validate_input_python "eslint-lint" "file-extensions" ".js,.ts,.mjs"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts empty file-extensions (uses default)"
|
||||
When call validate_input_python "eslint-lint" "file-extensions" ""
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects extensions without leading dot"
|
||||
When call validate_input_python "eslint-lint" "file-extensions" "js,jsx"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects extensions with command injection"
|
||||
When call validate_input_python "eslint-lint" "file-extensions" ".js; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating cache input"
|
||||
It "accepts true"
|
||||
When call validate_input_python "eslint-lint" "cache" "true"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts false"
|
||||
When call validate_input_python "eslint-lint" "cache" "false"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts empty cache (uses default)"
|
||||
When call validate_input_python "eslint-lint" "cache" ""
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects invalid boolean value"
|
||||
When call validate_input_python "eslint-lint" "cache" "maybe"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating max-warnings input"
|
||||
It "accepts default value 0"
|
||||
When call validate_input_python "eslint-lint" "max-warnings" "0"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts positive integer"
|
||||
When call validate_input_python "eslint-lint" "max-warnings" "10"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts large number"
|
||||
When call validate_input_python "eslint-lint" "max-warnings" "1000"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts empty max-warnings (uses default)"
|
||||
When call validate_input_python "eslint-lint" "max-warnings" ""
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects negative number"
|
||||
When call validate_input_python "eslint-lint" "max-warnings" "-1"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects non-numeric value"
|
||||
When call validate_input_python "eslint-lint" "max-warnings" "abc"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects max-warnings with command injection"
|
||||
When call validate_input_python "eslint-lint" "max-warnings" "0; echo"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating fail-on-error input"
|
||||
It "accepts true"
|
||||
When call validate_input_python "eslint-lint" "fail-on-error" "true"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts false"
|
||||
When call validate_input_python "eslint-lint" "fail-on-error" "false"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts empty fail-on-error (uses default)"
|
||||
When call validate_input_python "eslint-lint" "fail-on-error" ""
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects invalid boolean value"
|
||||
When call validate_input_python "eslint-lint" "fail-on-error" "yes"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating report-format input"
|
||||
It "accepts stylish format"
|
||||
When call validate_input_python "eslint-lint" "report-format" "stylish"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts json format"
|
||||
When call validate_input_python "eslint-lint" "report-format" "json"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts sarif format"
|
||||
When call validate_input_python "eslint-lint" "report-format" "sarif"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts empty report-format (uses default)"
|
||||
When call validate_input_python "eslint-lint" "report-format" ""
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects invalid format"
|
||||
When call validate_input_python "eslint-lint" "report-format" "invalid"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects format with command injection"
|
||||
When call validate_input_python "eslint-lint" "report-format" "json; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating max-retries input"
|
||||
It "accepts default value 3"
|
||||
When call validate_input_python "eslint-lint" "max-retries" "3"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts retry count of 1"
|
||||
When call validate_input_python "eslint-lint" "max-retries" "1"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts retry count of 10"
|
||||
When call validate_input_python "eslint-lint" "max-retries" "10"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts empty max-retries (uses default)"
|
||||
When call validate_input_python "eslint-lint" "max-retries" ""
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects zero retries"
|
||||
When call validate_input_python "eslint-lint" "max-retries" "0"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects negative retry count"
|
||||
When call validate_input_python "eslint-lint" "max-retries" "-1"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects retry count above 10"
|
||||
When call validate_input_python "eslint-lint" "max-retries" "11"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects non-numeric retry count"
|
||||
When call validate_input_python "eslint-lint" "max-retries" "abc"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects retry count with command injection"
|
||||
When call validate_input_python "eslint-lint" "max-retries" "3; echo"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating token input"
|
||||
It "accepts valid GitHub token (classic)"
|
||||
When call validate_input_python "eslint-lint" "token" "ghp_123456789012345678901234567890123456"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts valid GitHub fine-grained token"
|
||||
When call validate_input_python "eslint-lint" "token" "github_pat_1234567890123456789012345678901234567890123456789012345678901234567890a"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts empty token (optional)"
|
||||
When call validate_input_python "eslint-lint" "token" ""
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects invalid token format"
|
||||
When call validate_input_python "eslint-lint" "token" "invalid-token"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects token with command injection"
|
||||
When call validate_input_python "eslint-lint" "token" "ghp_123456789012345678901234567890123456; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating username input"
|
||||
It "accepts valid username"
|
||||
When call validate_input_python "eslint-lint" "username" "github-actions"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts username with hyphens"
|
||||
When call validate_input_python "eslint-lint" "username" "my-bot-user"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts alphanumeric username"
|
||||
When call validate_input_python "eslint-lint" "username" "user123"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts empty username (uses default)"
|
||||
When call validate_input_python "eslint-lint" "username" ""
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects username with command injection"
|
||||
When call validate_input_python "eslint-lint" "username" "user; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects username with special characters"
|
||||
When call validate_input_python "eslint-lint" "username" "user@bot"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating email input"
|
||||
It "accepts valid email"
|
||||
When call validate_input_python "eslint-lint" "email" "github-actions@github.com"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts email with plus sign"
|
||||
When call validate_input_python "eslint-lint" "email" "user+bot@example.com"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts email with subdomain"
|
||||
When call validate_input_python "eslint-lint" "email" "bot@ci.example.com"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts empty email (uses default)"
|
||||
When call validate_input_python "eslint-lint" "email" ""
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects invalid email format"
|
||||
When call validate_input_python "eslint-lint" "email" "not-an-email"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects email with command injection"
|
||||
When call validate_input_python "eslint-lint" "email" "user@example.com; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when checking action.yml structure"
|
||||
It "has valid YAML syntax"
|
||||
When call validate_action_yml_quiet "$ACTION_FILE"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "has correct action name"
|
||||
name=$(get_action_name "$ACTION_FILE")
|
||||
When call echo "$name"
|
||||
The output should equal "ESLint Lint"
|
||||
End
|
||||
|
||||
It "defines expected inputs"
|
||||
When call get_action_inputs "$ACTION_FILE"
|
||||
The output should include "mode"
|
||||
The output should include "working-directory"
|
||||
The output should include "eslint-version"
|
||||
The output should include "config-file"
|
||||
The output should include "ignore-file"
|
||||
The output should include "file-extensions"
|
||||
The output should include "cache"
|
||||
The output should include "max-warnings"
|
||||
The output should include "fail-on-error"
|
||||
The output should include "report-format"
|
||||
The output should include "max-retries"
|
||||
The output should include "token"
|
||||
The output should include "username"
|
||||
The output should include "email"
|
||||
End
|
||||
|
||||
It "defines expected outputs"
|
||||
When call get_action_outputs "$ACTION_FILE"
|
||||
The output should include "status"
|
||||
The output should include "error-count"
|
||||
The output should include "warning-count"
|
||||
The output should include "sarif-file"
|
||||
The output should include "files-checked"
|
||||
The output should include "files-changed"
|
||||
The output should include "errors-fixed"
|
||||
End
|
||||
End
|
||||
|
||||
Context "when testing input requirements"
|
||||
It "has all inputs as optional (with defaults)"
|
||||
When call is_input_required "$ACTION_FILE" "mode"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when testing security validations"
|
||||
It "validates against path traversal in working-directory"
|
||||
When call validate_input_python "eslint-lint" "working-directory" "../../../etc"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates against shell metacharacters in mode"
|
||||
When call validate_input_python "eslint-lint" "mode" "check|echo"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates against command substitution in config-file"
|
||||
When call validate_input_python "eslint-lint" "config-file" "\$(whoami)"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates against path traversal in token"
|
||||
When call validate_input_python "eslint-lint" "token" "../../../etc/passwd"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates against shell metacharacters in username"
|
||||
When call validate_input_python "eslint-lint" "username" "user&whoami"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates against command injection in email"
|
||||
When call validate_input_python "eslint-lint" "email" "user@example.com\`whoami\`"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
End
|
||||
173
_tests/unit/go-build/validation.spec.sh
Executable file
173
_tests/unit/go-build/validation.spec.sh
Executable file
@@ -0,0 +1,173 @@
|
||||
#!/usr/bin/env shellspec
|
||||
# Unit tests for go-build action validation and logic
|
||||
|
||||
# Framework is automatically loaded via spec_helper.sh
|
||||
|
||||
Describe "go-build action"
|
||||
ACTION_DIR="go-build"
|
||||
ACTION_FILE="$ACTION_DIR/action.yml"
|
||||
|
||||
Context "when validating go-version input"
|
||||
It "accepts valid Go version"
|
||||
When call validate_input_python "go-build" "go-version" "1.21.0"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts Go version with v prefix"
|
||||
When call validate_input_python "go-build" "go-version" "v1.21.0"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts newer Go version"
|
||||
When call validate_input_python "go-build" "go-version" "1.22.1"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts prerelease Go version"
|
||||
When call validate_input_python "go-build" "go-version" "1.21.0-rc1"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects invalid Go version format"
|
||||
When call validate_input_python "go-build" "go-version" "invalid-version"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects Go version with command injection"
|
||||
When call validate_input_python "go-build" "go-version" "1.21; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating destination input"
|
||||
It "accepts valid relative path"
|
||||
When call validate_input_python "go-build" "destination" "./bin"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts nested directory path"
|
||||
When call validate_input_python "go-build" "destination" "build/output"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts simple directory name"
|
||||
When call validate_input_python "go-build" "destination" "dist"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects path traversal in destination"
|
||||
When call validate_input_python "go-build" "destination" "../bin"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects absolute path"
|
||||
When call validate_input_python "go-build" "destination" "/usr/bin"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects destination with command injection"
|
||||
When call validate_input_python "go-build" "destination" "./bin; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating max-retries input"
|
||||
It "accepts valid retry count"
|
||||
When call validate_input_python "go-build" "max-retries" "3"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts minimum retry count"
|
||||
When call validate_input_python "go-build" "max-retries" "1"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts maximum retry count"
|
||||
When call validate_input_python "go-build" "max-retries" "10"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects retry count below minimum"
|
||||
When call validate_input_python "go-build" "max-retries" "0"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects retry count above maximum"
|
||||
When call validate_input_python "go-build" "max-retries" "15"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects non-numeric retry count"
|
||||
When call validate_input_python "go-build" "max-retries" "many"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects decimal retry count"
|
||||
When call validate_input_python "go-build" "max-retries" "3.5"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when checking action.yml structure"
|
||||
It "has valid YAML syntax"
|
||||
When call validate_action_yml_quiet "$ACTION_FILE"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "has correct action name"
|
||||
name=$(get_action_name "$ACTION_FILE")
|
||||
When call echo "$name"
|
||||
The output should equal "Go Build"
|
||||
End
|
||||
|
||||
It "defines expected inputs"
|
||||
When call get_action_inputs "$ACTION_FILE"
|
||||
The output should include "go-version"
|
||||
The output should include "destination"
|
||||
The output should include "max-retries"
|
||||
End
|
||||
|
||||
It "defines expected outputs"
|
||||
When call get_action_outputs "$ACTION_FILE"
|
||||
The output should include "build_status"
|
||||
The output should include "test_status"
|
||||
The output should include "go_version"
|
||||
The output should include "binary_path"
|
||||
The output should include "coverage_path"
|
||||
End
|
||||
End
|
||||
|
||||
Context "when testing input defaults"
|
||||
It "has default destination"
|
||||
When call uv run "_tests/shared/validation_core.py" --property "$ACTION_FILE" "destination" "default"
|
||||
The output should equal "./bin"
|
||||
End
|
||||
|
||||
It "has default max-retries"
|
||||
When call uv run "_tests/shared/validation_core.py" --property "$ACTION_FILE" "max-retries" "default"
|
||||
The output should equal "3"
|
||||
End
|
||||
|
||||
It "has all inputs as optional"
|
||||
When call uv run "_tests/shared/validation_core.py" --property "$ACTION_FILE" "" "all_optional"
|
||||
The output should equal "none"
|
||||
End
|
||||
End
|
||||
|
||||
Context "when testing security validations"
|
||||
It "validates against shell injection in go-version"
|
||||
When call validate_input_python "go-build" "go-version" "1.21.0|echo test"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates against shell injection in destination"
|
||||
When call validate_input_python "go-build" "destination" "bin\$(whoami)"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates against shell injection in max-retries"
|
||||
When call validate_input_python "go-build" "max-retries" "3;echo test"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
End
|
||||
255
_tests/unit/go-lint/validation.spec.sh
Executable file
255
_tests/unit/go-lint/validation.spec.sh
Executable file
@@ -0,0 +1,255 @@
|
||||
#!/usr/bin/env shellspec
|
||||
# Unit tests for go-lint action validation and logic
|
||||
|
||||
# Framework is automatically loaded via spec_helper.sh
|
||||
|
||||
Describe "go-lint action"
|
||||
ACTION_DIR="go-lint"
|
||||
ACTION_FILE="$ACTION_DIR/action.yml"
|
||||
|
||||
Context "when validating working-directory input"
|
||||
It "accepts current directory"
|
||||
When call validate_input_python "go-lint" "working-directory" "."
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts relative directory path"
|
||||
When call validate_input_python "go-lint" "working-directory" "src/main"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects path traversal"
|
||||
When call validate_input_python "go-lint" "working-directory" "../src"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects absolute path"
|
||||
When call validate_input_python "go-lint" "working-directory" "/usr/src"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating golangci-lint-version input"
|
||||
It "accepts latest version"
|
||||
When call validate_input_python "go-lint" "golangci-lint-version" "latest"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts semantic version"
|
||||
When call validate_input_python "go-lint" "golangci-lint-version" "1.55.2"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts semantic version with v prefix"
|
||||
When call validate_input_python "go-lint" "golangci-lint-version" "v1.55.2"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects invalid version format"
|
||||
When call validate_input_python "go-lint" "golangci-lint-version" "invalid-version"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating go-version input"
|
||||
It "accepts stable version"
|
||||
When call validate_input_python "go-lint" "go-version" "stable"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts major.minor version"
|
||||
When call validate_input_python "go-lint" "go-version" "1.21"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts full semantic version"
|
||||
When call validate_input_python "go-lint" "go-version" "1.21.5"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects invalid Go version"
|
||||
When call validate_input_python "go-lint" "go-version" "go1.21"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating config-file input"
|
||||
It "accepts default config file"
|
||||
When call validate_input_python "go-lint" "config-file" ".golangci.yml"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts custom config file path"
|
||||
When call validate_input_python "go-lint" "config-file" "configs/golangci.yaml"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects path traversal in config file"
|
||||
When call validate_input_python "go-lint" "config-file" "../configs/golangci.yml"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating timeout input"
|
||||
It "accepts timeout in minutes"
|
||||
When call validate_input_python "go-lint" "timeout" "5m"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts timeout in seconds"
|
||||
When call validate_input_python "go-lint" "timeout" "300s"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts timeout in hours"
|
||||
When call validate_input_python "go-lint" "timeout" "1h"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects timeout without unit"
|
||||
When call validate_input_python "go-lint" "timeout" "300"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects invalid timeout format"
|
||||
When call validate_input_python "go-lint" "timeout" "5 minutes"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating boolean inputs"
|
||||
It "accepts true for cache"
|
||||
When call validate_input_python "go-lint" "cache" "true"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts false for cache"
|
||||
When call validate_input_python "go-lint" "cache" "false"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects invalid boolean for fail-on-error"
|
||||
When call validate_input_python "go-lint" "fail-on-error" "maybe"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "accepts true for only-new-issues"
|
||||
When call validate_input_python "go-lint" "only-new-issues" "true"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts false for disable-all"
|
||||
When call validate_input_python "go-lint" "disable-all" "false"
|
||||
The status should be success
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating report-format input"
|
||||
It "accepts sarif format"
|
||||
When call validate_input_python "go-lint" "report-format" "sarif"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts json format"
|
||||
When call validate_input_python "go-lint" "report-format" "json"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts github-actions format"
|
||||
When call validate_input_python "go-lint" "report-format" "github-actions"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects invalid report format"
|
||||
When call validate_input_python "go-lint" "report-format" "invalid-format"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating max-retries input"
|
||||
It "accepts valid retry count"
|
||||
When call validate_input_python "go-lint" "max-retries" "3"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts minimum retry count"
|
||||
When call validate_input_python "go-lint" "max-retries" "1"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts maximum retry count"
|
||||
When call validate_input_python "go-lint" "max-retries" "10"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects retry count below minimum"
|
||||
When call validate_input_python "go-lint" "max-retries" "0"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects retry count above maximum"
|
||||
When call validate_input_python "go-lint" "max-retries" "15"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating linter lists"
|
||||
It "accepts valid enable-linters list"
|
||||
When call validate_input_python "go-lint" "enable-linters" "gosec,govet,staticcheck"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts single linter in enable-linters"
|
||||
When call validate_input_python "go-lint" "enable-linters" "gosec"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts valid disable-linters list"
|
||||
When call validate_input_python "go-lint" "disable-linters" "exhaustivestruct,interfacer"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects invalid linter list format"
|
||||
When call validate_input_python "go-lint" "enable-linters" "gosec, govet"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when checking action.yml structure"
|
||||
It "has valid YAML syntax"
|
||||
When call validate_action_yml_quiet "$ACTION_FILE"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "has correct action name"
|
||||
name=$(get_action_name "$ACTION_FILE")
|
||||
When call echo "$name"
|
||||
The output should equal "Go Lint Check"
|
||||
End
|
||||
|
||||
It "defines expected outputs"
|
||||
When call get_action_outputs "$ACTION_FILE"
|
||||
The output should include "error-count"
|
||||
The output should include "sarif-file"
|
||||
The output should include "cache-hit"
|
||||
The output should include "analyzed-files"
|
||||
End
|
||||
End
|
||||
|
||||
Context "when testing security validations"
|
||||
It "validates against command injection in working-directory"
|
||||
When call validate_input_python "go-lint" "working-directory" "src; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates against command injection in config-file"
|
||||
When call validate_input_python "go-lint" "config-file" "config.yml\$(whoami)"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates against shell expansion in enable-linters"
|
||||
When call validate_input_python "go-lint" "enable-linters" "gosec,\$(echo malicious)"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
End
|
||||
297
_tests/unit/language-version-detect/validation.spec.sh
Executable file
297
_tests/unit/language-version-detect/validation.spec.sh
Executable file
@@ -0,0 +1,297 @@
|
||||
#!/usr/bin/env shellspec
|
||||
# Unit tests for language-version-detect action validation and logic
|
||||
# Framework is automatically loaded via spec_helper.sh
|
||||
|
||||
Describe "language-version-detect action"
|
||||
ACTION_DIR="language-version-detect"
|
||||
ACTION_FILE="$ACTION_DIR/action.yml"
|
||||
|
||||
Context "when validating language input"
|
||||
It "accepts php language"
|
||||
When call validate_input_python "language-version-detect" "language" "php"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts python language"
|
||||
When call validate_input_python "language-version-detect" "language" "python"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts go language"
|
||||
When call validate_input_python "language-version-detect" "language" "go"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts dotnet language"
|
||||
When call validate_input_python "language-version-detect" "language" "dotnet"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects invalid language"
|
||||
When call validate_input_python "language-version-detect" "language" "javascript"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects empty language (required)"
|
||||
When call validate_input_python "language-version-detect" "language" ""
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects language with command injection"
|
||||
When call validate_input_python "language-version-detect" "language" "php; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects language with shell metacharacters"
|
||||
When call validate_input_python "language-version-detect" "language" "php|echo"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating default-version input for PHP"
|
||||
It "accepts valid PHP version 8.4"
|
||||
When call validate_input_python "language-version-detect" "default-version" "8.4"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts valid PHP version 8.3"
|
||||
When call validate_input_python "language-version-detect" "default-version" "8.3"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts valid PHP version 7.4"
|
||||
When call validate_input_python "language-version-detect" "default-version" "7.4"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts valid PHP version with patch 8.3.1"
|
||||
When call validate_input_python "language-version-detect" "default-version" "8.3.1"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts empty default-version (uses language default)"
|
||||
When call validate_input_python "language-version-detect" "default-version" ""
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects invalid PHP version format"
|
||||
When call validate_input_python "language-version-detect" "default-version" "invalid"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating default-version input for Python"
|
||||
It "accepts valid Python version 3.12"
|
||||
When call validate_input_python "language-version-detect" "default-version" "3.12"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts valid Python version 3.11"
|
||||
When call validate_input_python "language-version-detect" "default-version" "3.11"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts valid Python version 3.10"
|
||||
When call validate_input_python "language-version-detect" "default-version" "3.10"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts valid Python version with patch 3.12.1"
|
||||
When call validate_input_python "language-version-detect" "default-version" "3.12.1"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts valid Python version 3.9"
|
||||
When call validate_input_python "language-version-detect" "default-version" "3.9"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts valid Python version 3.8"
|
||||
When call validate_input_python "language-version-detect" "default-version" "3.8"
|
||||
The status should be success
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating default-version input for Go"
|
||||
It "accepts valid Go version 1.21"
|
||||
When call validate_input_python "language-version-detect" "default-version" "1.21"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts valid Go version 1.20"
|
||||
When call validate_input_python "language-version-detect" "default-version" "1.20"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts valid Go version with patch 1.21.5"
|
||||
When call validate_input_python "language-version-detect" "default-version" "1.21.5"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts valid Go version 1.22"
|
||||
When call validate_input_python "language-version-detect" "default-version" "1.22"
|
||||
The status should be success
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating default-version input for .NET"
|
||||
It "accepts valid .NET version 7.0"
|
||||
When call validate_input_python "language-version-detect" "default-version" "7.0"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts valid .NET version 8.0"
|
||||
When call validate_input_python "language-version-detect" "default-version" "8.0"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts valid .NET version 6.0"
|
||||
When call validate_input_python "language-version-detect" "default-version" "6.0"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts valid .NET version with patch 7.0.1"
|
||||
When call validate_input_python "language-version-detect" "default-version" "7.0.1"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts valid .NET major version 7"
|
||||
When call validate_input_python "language-version-detect" "default-version" "7"
|
||||
The status should be success
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating default-version input edge cases"
|
||||
It "rejects version with v prefix"
|
||||
When call validate_input_python "language-version-detect" "default-version" "v3.12"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects version with command injection"
|
||||
When call validate_input_python "language-version-detect" "default-version" "3.12; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects version with shell metacharacters"
|
||||
When call validate_input_python "language-version-detect" "default-version" "3.12|echo"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects version with command substitution"
|
||||
When call validate_input_python "language-version-detect" "default-version" "\$(whoami)"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects alphabetic version"
|
||||
When call validate_input_python "language-version-detect" "default-version" "latest"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating token input"
|
||||
It "accepts valid GitHub token (classic)"
|
||||
When call validate_input_python "language-version-detect" "token" "ghp_123456789012345678901234567890123456"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts valid GitHub fine-grained token"
|
||||
When call validate_input_python "language-version-detect" "token" "github_pat_1234567890123456789012345678901234567890123456789012345678901234567890a"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts empty token (optional)"
|
||||
When call validate_input_python "language-version-detect" "token" ""
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects invalid token format"
|
||||
When call validate_input_python "language-version-detect" "token" "invalid-token"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects token with command injection"
|
||||
When call validate_input_python "language-version-detect" "token" "ghp_123456789012345678901234567890123456; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when checking action.yml structure"
|
||||
It "has valid YAML syntax"
|
||||
When call validate_action_yml_quiet "$ACTION_FILE"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "has correct action name"
|
||||
name=$(get_action_name "$ACTION_FILE")
|
||||
When call echo "$name"
|
||||
The output should equal "Language Version Detect"
|
||||
End
|
||||
|
||||
It "defines expected inputs"
|
||||
When call get_action_inputs "$ACTION_FILE"
|
||||
The output should include "language"
|
||||
The output should include "default-version"
|
||||
The output should include "token"
|
||||
End
|
||||
|
||||
It "defines expected outputs"
|
||||
When call get_action_outputs "$ACTION_FILE"
|
||||
The output should include "detected-version"
|
||||
The output should include "package-manager"
|
||||
End
|
||||
End
|
||||
|
||||
Context "when testing input requirements"
|
||||
It "requires language input"
|
||||
When call is_input_required "$ACTION_FILE" "language"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "has default-version as optional input"
|
||||
When call is_input_required "$ACTION_FILE" "default-version"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "has token as optional input"
|
||||
When call is_input_required "$ACTION_FILE" "token"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when testing security validations"
|
||||
It "validates against path traversal in language"
|
||||
When call validate_input_python "language-version-detect" "language" "../../../etc"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates against shell metacharacters in language"
|
||||
When call validate_input_python "language-version-detect" "language" "php&whoami"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates against command substitution in language"
|
||||
When call validate_input_python "language-version-detect" "language" "php\`whoami\`"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates against path traversal in default-version"
|
||||
When call validate_input_python "language-version-detect" "default-version" "../../../etc"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates against shell metacharacters in default-version"
|
||||
When call validate_input_python "language-version-detect" "default-version" "3.12&echo"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates against command substitution in default-version"
|
||||
When call validate_input_python "language-version-detect" "default-version" "3.12\$(whoami)"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates against path traversal in token"
|
||||
When call validate_input_python "language-version-detect" "token" "../../../etc/passwd"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
End
|
||||
216
_tests/unit/npm-publish/validation.spec.sh
Executable file
216
_tests/unit/npm-publish/validation.spec.sh
Executable file
@@ -0,0 +1,216 @@
|
||||
#!/usr/bin/env shellspec
|
||||
# Unit tests for npm-publish action validation and logic
|
||||
# Framework is automatically loaded via spec_helper.sh
|
||||
|
||||
Describe "npm-publish action"
|
||||
ACTION_DIR="npm-publish"
|
||||
ACTION_FILE="$ACTION_DIR/action.yml"
|
||||
|
||||
Context "when validating registry-url input"
|
||||
It "accepts valid https registry URL"
|
||||
When call validate_input_python "npm-publish" "registry-url" "https://registry.npmjs.org/"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts https registry URL without trailing slash"
|
||||
When call validate_input_python "npm-publish" "registry-url" "https://registry.npmjs.org"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts http registry URL"
|
||||
When call validate_input_python "npm-publish" "registry-url" "http://localhost:4873"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts registry URL with path"
|
||||
When call validate_input_python "npm-publish" "registry-url" "https://npm.example.com/registry/"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects non-http(s) URL"
|
||||
When call validate_input_python "npm-publish" "registry-url" "ftp://registry.example.com"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects invalid URL format"
|
||||
When call validate_input_python "npm-publish" "registry-url" "not-a-url"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating npm_token input"
|
||||
It "accepts valid GitHub token format (exact length)"
|
||||
When call validate_input_python "npm-publish" "npm_token" "ghp_123456789012345678901234567890123456"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts valid NPM classic token format"
|
||||
When call validate_input_python "npm-publish" "npm_token" "npm_1234567890123456789012345678901234567890"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts GitHub fine-grained token (exact length)"
|
||||
When call validate_input_python "npm-publish" "npm_token" "github_pat_1234567890123456789012345678901234567890123456789012345678901234567890a"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects invalid token format"
|
||||
When call validate_input_python "npm-publish" "npm_token" "invalid-token-format"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects empty token"
|
||||
When call validate_input_python "npm-publish" "npm_token" ""
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects token with command injection"
|
||||
When call validate_input_python "npm-publish" "npm_token" "ghp_123456789012345678901234567890123456; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating scope input"
|
||||
It "accepts valid npm scope"
|
||||
When call validate_input_python "npm-publish" "scope" "@myorg"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts scope with hyphens"
|
||||
When call validate_input_python "npm-publish" "scope" "@my-organization"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts scope with numbers"
|
||||
When call validate_input_python "npm-publish" "scope" "@myorg123"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects scope without @ prefix"
|
||||
When call validate_input_python "npm-publish" "scope" "myorg"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects scope with invalid characters"
|
||||
When call validate_input_python "npm-publish" "scope" "@my_org!"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects scope with command injection"
|
||||
When call validate_input_python "npm-publish" "scope" "@myorg; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating access input"
|
||||
It "accepts public access"
|
||||
When call validate_input_python "npm-publish" "access" "public"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts restricted access"
|
||||
When call validate_input_python "npm-publish" "access" "restricted"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts private access (no specific validation)"
|
||||
When call validate_input_python "npm-publish" "access" "private"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts empty access"
|
||||
When call validate_input_python "npm-publish" "access" ""
|
||||
The status should be success
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating provenance input"
|
||||
It "accepts true for provenance"
|
||||
When call validate_input_python "npm-publish" "provenance" "true"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts false for provenance"
|
||||
When call validate_input_python "npm-publish" "provenance" "false"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts any value for provenance (no specific validation)"
|
||||
When call validate_input_python "npm-publish" "provenance" "maybe"
|
||||
The status should be success
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating dry-run input"
|
||||
It "accepts true for dry-run"
|
||||
When call validate_input_python "npm-publish" "dry-run" "true"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts false for dry-run"
|
||||
When call validate_input_python "npm-publish" "dry-run" "false"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts any value for dry-run (no specific validation)"
|
||||
When call validate_input_python "npm-publish" "dry-run" "yes"
|
||||
The status should be success
|
||||
End
|
||||
End
|
||||
|
||||
Context "when checking action.yml structure"
|
||||
It "has valid YAML syntax"
|
||||
When call validate_action_yml_quiet "$ACTION_FILE"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "has correct action name"
|
||||
name=$(get_action_name "$ACTION_FILE")
|
||||
When call echo "$name"
|
||||
The output should equal "Publish to NPM"
|
||||
End
|
||||
|
||||
It "defines expected inputs"
|
||||
When call get_action_inputs "$ACTION_FILE"
|
||||
The output should include "npm_token"
|
||||
The output should include "registry-url"
|
||||
End
|
||||
|
||||
It "defines expected outputs"
|
||||
When call get_action_outputs "$ACTION_FILE"
|
||||
The output should include "registry-url"
|
||||
The output should include "scope"
|
||||
The output should include "package-version"
|
||||
End
|
||||
End
|
||||
|
||||
Context "when testing input requirements"
|
||||
It "requires npm_token input"
|
||||
inputs=$(get_action_inputs "$ACTION_FILE")
|
||||
When call echo "$inputs"
|
||||
The output should include "npm_token"
|
||||
End
|
||||
|
||||
It "has registry-url as optional with default"
|
||||
When call uv run "_tests/shared/validation_core.py" --property "$ACTION_FILE" "registry-url" "default"
|
||||
The output should include "registry.npmjs.org"
|
||||
End
|
||||
End
|
||||
|
||||
Context "when testing security validations"
|
||||
It "validates against path traversal in all inputs"
|
||||
When call validate_input_python "npm-publish" "scope" "@../../../etc"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates against shell metacharacters"
|
||||
When call validate_input_python "npm-publish" "registry-url" "https://registry.npmjs.org|echo"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates against command substitution"
|
||||
When call validate_input_python "npm-publish" "scope" "@\$(whoami)"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
End
|
||||
458
_tests/unit/php-tests/validation.spec.sh
Executable file
458
_tests/unit/php-tests/validation.spec.sh
Executable file
@@ -0,0 +1,458 @@
|
||||
#!/usr/bin/env shellspec
|
||||
# Unit tests for php-tests action validation and logic
|
||||
|
||||
# Framework is automatically loaded via spec_helper.sh
|
||||
|
||||
Describe "php-tests action"
|
||||
ACTION_DIR="php-tests"
|
||||
ACTION_FILE="$ACTION_DIR/action.yml"
|
||||
|
||||
Context "when validating token input"
|
||||
It "accepts GitHub token expression"
|
||||
When call validate_input_python "php-tests" "token" "\${{ github.token }}"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts GitHub fine-grained token"
|
||||
When call validate_input_python "php-tests" "token" "ghp_abcdefghijklmnopqrstuvwxyz1234567890"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts GitHub app token"
|
||||
When call validate_input_python "php-tests" "token" "ghs_abcdefghijklmnopqrstuvwxyz1234567890"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts GitHub enterprise token"
|
||||
When call validate_input_python "php-tests" "token" "ghe_abcdefghijklmnopqrstuvwxyz1234567890"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects invalid token format"
|
||||
When call validate_input_python "php-tests" "token" "invalid-token"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects token with command injection"
|
||||
When call validate_input_python "php-tests" "token" "ghp_token; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "accepts empty token (uses default)"
|
||||
When call validate_input_python "php-tests" "token" ""
|
||||
The status should be success
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating username input"
|
||||
It "accepts valid GitHub username"
|
||||
When call validate_input_python "php-tests" "username" "github-actions"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts username with hyphens"
|
||||
When call validate_input_python "php-tests" "username" "user-name"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts username with numbers"
|
||||
When call validate_input_python "php-tests" "username" "user123"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts single character username"
|
||||
When call validate_input_python "php-tests" "username" "a"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts maximum length username"
|
||||
When call validate_input_python "php-tests" "username" "abcdefghijklmnopqrstuvwxyz0123456789abc"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects username too long"
|
||||
When call validate_input_python "php-tests" "username" "abcdefghijklmnopqrstuvwxyz0123456789abcd"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects username with command injection semicolon"
|
||||
When call validate_input_python "php-tests" "username" "user; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects username with command injection ampersand"
|
||||
When call validate_input_python "php-tests" "username" "user && rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects username with command injection pipe"
|
||||
When call validate_input_python "php-tests" "username" "user | rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "accepts empty username (uses default)"
|
||||
When call validate_input_python "php-tests" "username" ""
|
||||
The status should be success
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating email input"
|
||||
It "accepts valid email"
|
||||
When call validate_input_python "php-tests" "email" "user@example.com"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts email with subdomain"
|
||||
When call validate_input_python "php-tests" "email" "user@mail.example.com"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts email with plus sign"
|
||||
When call validate_input_python "php-tests" "email" "user+tag@example.com"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts email with numbers"
|
||||
When call validate_input_python "php-tests" "email" "user123@example123.com"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts email with hyphens"
|
||||
When call validate_input_python "php-tests" "email" "user-name@example-domain.com"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects email without at symbol"
|
||||
When call validate_input_python "php-tests" "email" "userexample.com"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects email without domain"
|
||||
When call validate_input_python "php-tests" "email" "user@"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects email without username"
|
||||
When call validate_input_python "php-tests" "email" "@example.com"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects email without dot in domain"
|
||||
When call validate_input_python "php-tests" "email" "user@example"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects email with spaces"
|
||||
When call validate_input_python "php-tests" "email" "user @example.com"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "accepts empty email (uses default)"
|
||||
When call validate_input_python "php-tests" "email" ""
|
||||
The status should be success
|
||||
End
|
||||
End
|
||||
|
||||
Context "when checking action.yml structure"
|
||||
It "has valid YAML syntax"
|
||||
When call validate_action_yml_quiet "$ACTION_FILE"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "has correct action name"
|
||||
name=$(get_action_name "$ACTION_FILE")
|
||||
When call echo "$name"
|
||||
The output should equal "PHP Tests"
|
||||
End
|
||||
|
||||
It "defines expected inputs"
|
||||
When call get_action_inputs "$ACTION_FILE"
|
||||
The output should include "token"
|
||||
The output should include "username"
|
||||
The output should include "email"
|
||||
End
|
||||
|
||||
It "defines expected outputs"
|
||||
When call get_action_outputs "$ACTION_FILE"
|
||||
The output should include "test-status"
|
||||
The output should include "tests-run"
|
||||
The output should include "tests-passed"
|
||||
The output should include "framework"
|
||||
End
|
||||
End
|
||||
|
||||
Context "when testing input requirements"
|
||||
It "has all inputs as optional"
|
||||
When call uv run "_tests/shared/validation_core.py" --property "$ACTION_FILE" "" "all_optional"
|
||||
The output should equal "none"
|
||||
End
|
||||
|
||||
It "has empty default token (runtime fallback)"
|
||||
When call uv run "_tests/shared/validation_core.py" --property "$ACTION_FILE" "token" "default"
|
||||
The output should equal "no-default"
|
||||
End
|
||||
|
||||
It "has correct default username"
|
||||
When call uv run "_tests/shared/validation_core.py" --property "$ACTION_FILE" "username" "default"
|
||||
The output should equal "github-actions"
|
||||
End
|
||||
|
||||
It "has correct default email"
|
||||
When call uv run "_tests/shared/validation_core.py" --property "$ACTION_FILE" "email" "default"
|
||||
The output should equal "github-actions@github.com"
|
||||
End
|
||||
End
|
||||
|
||||
Context "when testing security validations"
|
||||
It "validates against command injection in username"
|
||||
When call validate_input_python "php-tests" "username" "user\`whoami\`"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates against shell metacharacters in email"
|
||||
When call validate_input_python "php-tests" "email" "user@example.com; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates against variable expansion in token"
|
||||
When call validate_input_python "php-tests" "token" "\${MALICIOUS_VAR}"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates against backtick injection in username"
|
||||
When call validate_input_python "php-tests" "username" "user\`echo malicious\`"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when testing PHP-specific validations"
|
||||
It "validates username length boundaries"
|
||||
When call validate_input_python "php-tests" "username" "$(awk 'BEGIN{for(i=1;i<=40;i++)printf "a"}')"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates email format for Git commits"
|
||||
When call validate_input_python "php-tests" "email" "noreply@github.com"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "validates default values are secure"
|
||||
When call validate_input_python "php-tests" "username" "github-actions"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "validates default email is secure"
|
||||
When call validate_input_python "php-tests" "email" "github-actions@github.com"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
# Helper function that replicates the PHPUnit output parsing logic from action.yml
|
||||
parse_phpunit_output() {
|
||||
local phpunit_output="$1"
|
||||
local phpunit_exit_code="$2"
|
||||
|
||||
local tests_run="0"
|
||||
local tests_passed="0"
|
||||
|
||||
# Pattern 1: "OK (N test(s), M assertions)" - success case (handles both singular and plural)
|
||||
if echo "$phpunit_output" | grep -qE 'OK \([0-9]+ tests?,'; then
|
||||
tests_run=$(echo "$phpunit_output" | grep -oE 'OK \([0-9]+ tests?,' | grep -oE '[0-9]+' | head -1)
|
||||
tests_passed="$tests_run"
|
||||
# Pattern 2: "Tests: N" line - failure/error/skipped case
|
||||
elif echo "$phpunit_output" | grep -qE '^Tests:'; then
|
||||
tests_run=$(echo "$phpunit_output" | grep -E '^Tests:' | grep -oE '[0-9]+' | head -1)
|
||||
|
||||
# Calculate passed from failures and errors
|
||||
failures=$(echo "$phpunit_output" | grep -oE 'Failures: [0-9]+' | grep -oE '[0-9]+' | head -1 || echo "0")
|
||||
errors=$(echo "$phpunit_output" | grep -oE 'Errors: [0-9]+' | grep -oE '[0-9]+' | head -1 || echo "0")
|
||||
tests_passed=$((tests_run - failures - errors))
|
||||
|
||||
# Ensure non-negative
|
||||
if [ "$tests_passed" -lt 0 ]; then
|
||||
tests_passed="0"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Determine status
|
||||
local status
|
||||
if [ "$phpunit_exit_code" -eq 0 ]; then
|
||||
status="success"
|
||||
else
|
||||
status="failure"
|
||||
fi
|
||||
|
||||
# Output as KEY=VALUE format
|
||||
echo "tests_run=$tests_run"
|
||||
echo "tests_passed=$tests_passed"
|
||||
echo "status=$status"
|
||||
}
|
||||
|
||||
Context "when parsing PHPUnit output"
|
||||
# Success cases
|
||||
It "parses single successful test"
|
||||
output="OK (1 test, 2 assertions)"
|
||||
When call parse_phpunit_output "$output" 0
|
||||
The line 1 of output should equal "tests_run=1"
|
||||
The line 2 of output should equal "tests_passed=1"
|
||||
The line 3 of output should equal "status=success"
|
||||
End
|
||||
|
||||
It "parses multiple successful tests"
|
||||
output="OK (5 tests, 10 assertions)"
|
||||
When call parse_phpunit_output "$output" 0
|
||||
The line 1 of output should equal "tests_run=5"
|
||||
The line 2 of output should equal "tests_passed=5"
|
||||
The line 3 of output should equal "status=success"
|
||||
End
|
||||
|
||||
It "parses successful tests with plural form"
|
||||
output="OK (25 tests, 50 assertions)"
|
||||
When call parse_phpunit_output "$output" 0
|
||||
The line 1 of output should equal "tests_run=25"
|
||||
The line 2 of output should equal "tests_passed=25"
|
||||
The line 3 of output should equal "status=success"
|
||||
End
|
||||
|
||||
# Failure cases
|
||||
It "parses test failures"
|
||||
output="FAILURES!
|
||||
Tests: 5, Assertions: 10, Failures: 2."
|
||||
When call parse_phpunit_output "$output" 1
|
||||
The line 1 of output should equal "tests_run=5"
|
||||
The line 2 of output should equal "tests_passed=3"
|
||||
The line 3 of output should equal "status=failure"
|
||||
End
|
||||
|
||||
It "parses test errors"
|
||||
output="ERRORS!
|
||||
Tests: 5, Assertions: 10, Errors: 1."
|
||||
When call parse_phpunit_output "$output" 2
|
||||
The line 1 of output should equal "tests_run=5"
|
||||
The line 2 of output should equal "tests_passed=4"
|
||||
The line 3 of output should equal "status=failure"
|
||||
End
|
||||
|
||||
It "parses mixed failures and errors"
|
||||
output="FAILURES!
|
||||
Tests: 10, Assertions: 20, Failures: 2, Errors: 1."
|
||||
When call parse_phpunit_output "$output" 1
|
||||
The line 1 of output should equal "tests_run=10"
|
||||
The line 2 of output should equal "tests_passed=7"
|
||||
The line 3 of output should equal "status=failure"
|
||||
End
|
||||
|
||||
It "handles all tests failing"
|
||||
output="FAILURES!
|
||||
Tests: 5, Assertions: 10, Failures: 5."
|
||||
When call parse_phpunit_output "$output" 1
|
||||
The line 1 of output should equal "tests_run=5"
|
||||
The line 2 of output should equal "tests_passed=0"
|
||||
The line 3 of output should equal "status=failure"
|
||||
End
|
||||
|
||||
It "prevents negative passed count"
|
||||
output="ERRORS!
|
||||
Tests: 2, Assertions: 4, Failures: 1, Errors: 2."
|
||||
When call parse_phpunit_output "$output" 2
|
||||
The line 1 of output should equal "tests_run=2"
|
||||
The line 2 of output should equal "tests_passed=0"
|
||||
The line 3 of output should equal "status=failure"
|
||||
End
|
||||
|
||||
# Skipped tests
|
||||
It "parses skipped tests with success"
|
||||
output="OK, but some tests were skipped!
|
||||
Tests: 5, Assertions: 8, Skipped: 2."
|
||||
When call parse_phpunit_output "$output" 0
|
||||
The line 1 of output should equal "tests_run=5"
|
||||
The line 2 of output should equal "tests_passed=5"
|
||||
The line 3 of output should equal "status=success"
|
||||
End
|
||||
|
||||
# Edge cases
|
||||
It "handles no parseable output (fallback)"
|
||||
output="Some random output without test info"
|
||||
When call parse_phpunit_output "$output" 1
|
||||
The line 1 of output should equal "tests_run=0"
|
||||
The line 2 of output should equal "tests_passed=0"
|
||||
The line 3 of output should equal "status=failure"
|
||||
End
|
||||
|
||||
It "handles empty output"
|
||||
output=""
|
||||
When call parse_phpunit_output "$output" 0
|
||||
The line 1 of output should equal "tests_run=0"
|
||||
The line 2 of output should equal "tests_passed=0"
|
||||
The line 3 of output should equal "status=success"
|
||||
End
|
||||
|
||||
It "handles PHPUnit 10+ format with singular test"
|
||||
output="OK (1 test, 3 assertions)"
|
||||
When call parse_phpunit_output "$output" 0
|
||||
The line 1 of output should equal "tests_run=1"
|
||||
The line 2 of output should equal "tests_passed=1"
|
||||
The line 3 of output should equal "status=success"
|
||||
End
|
||||
|
||||
It "handles verbose output with noise"
|
||||
output="PHPUnit 10.5.0 by Sebastian Bergmann and contributors.
|
||||
Runtime: PHP 8.3.0
|
||||
|
||||
..... 5 / 5 (100%)
|
||||
|
||||
Time: 00:00.123, Memory: 10.00 MB
|
||||
|
||||
OK (5 tests, 10 assertions)"
|
||||
When call parse_phpunit_output "$output" 0
|
||||
The line 1 of output should equal "tests_run=5"
|
||||
The line 2 of output should equal "tests_passed=5"
|
||||
The line 3 of output should equal "status=success"
|
||||
End
|
||||
|
||||
It "handles failure output with full details"
|
||||
output="PHPUnit 10.5.0 by Sebastian Bergmann and contributors.
|
||||
|
||||
..F.. 5 / 5 (100%)
|
||||
|
||||
Time: 00:00.234, Memory: 12.00 MB
|
||||
|
||||
FAILURES!
|
||||
Tests: 5, Assertions: 10, Failures: 1."
|
||||
When call parse_phpunit_output "$output" 1
|
||||
The line 1 of output should equal "tests_run=5"
|
||||
The line 2 of output should equal "tests_passed=4"
|
||||
The line 3 of output should equal "status=failure"
|
||||
End
|
||||
|
||||
# Status determination tests
|
||||
It "marks as success when exit code is 0"
|
||||
output="OK (3 tests, 6 assertions)"
|
||||
When call parse_phpunit_output "$output" 0
|
||||
The line 3 of output should equal "status=success"
|
||||
End
|
||||
|
||||
It "marks as failure when exit code is non-zero"
|
||||
output="OK (3 tests, 6 assertions)"
|
||||
When call parse_phpunit_output "$output" 1
|
||||
The line 3 of output should equal "status=failure"
|
||||
End
|
||||
|
||||
It "handles skipped tests without OK prefix"
|
||||
output="Tests: 5, Assertions: 8, Skipped: 2."
|
||||
When call parse_phpunit_output "$output" 0
|
||||
The line 1 of output should equal "tests_run=5"
|
||||
The line 2 of output should equal "tests_passed=5"
|
||||
The line 3 of output should equal "status=success"
|
||||
End
|
||||
|
||||
It "handles risky tests output"
|
||||
output="FAILURES!
|
||||
Tests: 8, Assertions: 15, Failures: 1, Risky: 2."
|
||||
When call parse_phpunit_output "$output" 1
|
||||
The line 1 of output should equal "tests_run=8"
|
||||
The line 2 of output should equal "tests_passed=7"
|
||||
The line 3 of output should equal "status=failure"
|
||||
End
|
||||
End
|
||||
End
|
||||
End
|
||||
90
_tests/unit/pr-lint/validation.spec.sh
Executable file
90
_tests/unit/pr-lint/validation.spec.sh
Executable file
@@ -0,0 +1,90 @@
|
||||
#!/usr/bin/env shellspec
|
||||
# Unit tests for pr-lint action validation and logic
|
||||
# Framework is automatically loaded via spec_helper.sh
|
||||
|
||||
Describe "pr-lint action"
|
||||
ACTION_DIR="pr-lint"
|
||||
ACTION_FILE="$ACTION_DIR/action.yml"
|
||||
|
||||
Context "when validating token input"
|
||||
It "accepts valid GitHub token"
|
||||
When call validate_input_python "pr-lint" "token" "ghp_123456789012345678901234567890123456"
|
||||
The status should be success
|
||||
End
|
||||
It "rejects injection in token"
|
||||
When call validate_input_python "pr-lint" "token" "token; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating username input"
|
||||
It "accepts valid username"
|
||||
When call validate_input_python "pr-lint" "username" "github-actions"
|
||||
The status should be success
|
||||
End
|
||||
It "rejects injection in username"
|
||||
When call validate_input_python "pr-lint" "username" "user; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating email input"
|
||||
It "accepts valid email"
|
||||
When call validate_input_python "pr-lint" "email" "test@example.com"
|
||||
The status should be success
|
||||
End
|
||||
It "rejects invalid email format"
|
||||
When call validate_input_python "pr-lint" "email" "invalid-email"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when checking action.yml structure"
|
||||
It "has valid YAML syntax"
|
||||
When call validate_action_yml_quiet "$ACTION_FILE"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "has correct action name"
|
||||
name=$(get_action_name "$ACTION_FILE")
|
||||
When call echo "$name"
|
||||
The output should equal "PR Lint"
|
||||
End
|
||||
|
||||
It "defines required inputs"
|
||||
inputs=$(get_action_inputs "$ACTION_FILE")
|
||||
When call echo "$inputs"
|
||||
The output should include "token"
|
||||
The output should include "username"
|
||||
The output should include "email"
|
||||
End
|
||||
|
||||
It "defines expected outputs"
|
||||
outputs=$(get_action_outputs "$ACTION_FILE")
|
||||
When call echo "$outputs"
|
||||
The output should include "validation_status"
|
||||
The output should include "errors_found"
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating security"
|
||||
It "validates token format"
|
||||
When call validate_input_python "pr-lint" "token" "invalid-token;rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates email format"
|
||||
When call validate_input_python "pr-lint" "email" "invalid@email"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when testing outputs"
|
||||
It "produces all expected outputs"
|
||||
When call test_action_outputs "$ACTION_DIR" "token" "ghp_test" "username" "test" "email" "test@example.com"
|
||||
The status should be success
|
||||
The stderr should include "Testing action outputs for: pr-lint"
|
||||
The stderr should include "Output test passed for: pr-lint"
|
||||
End
|
||||
End
|
||||
End
|
||||
172
_tests/unit/pre-commit/validation.spec.sh
Executable file
172
_tests/unit/pre-commit/validation.spec.sh
Executable file
@@ -0,0 +1,172 @@
|
||||
#!/usr/bin/env shellspec
|
||||
# Unit tests for pre-commit action validation and logic
|
||||
# Framework is automatically loaded via spec_helper.sh
|
||||
|
||||
Describe "pre-commit action"
|
||||
ACTION_DIR="pre-commit"
|
||||
ACTION_FILE="$ACTION_DIR/action.yml"
|
||||
|
||||
Context "when validating pre-commit-config input"
|
||||
It "accepts default config file"
|
||||
When call validate_input_python "pre-commit" "pre-commit-config" ".pre-commit-config.yaml"
|
||||
The status should be success
|
||||
End
|
||||
It "accepts yml extension"
|
||||
When call validate_input_python "pre-commit" "pre-commit-config" ".pre-commit-config.yml"
|
||||
The status should be success
|
||||
End
|
||||
# NOTE: Test framework uses default validation for 'pre-commit-config' input
|
||||
# Default validation only checks for injection patterns (;, &&, $()
|
||||
It "rejects path traversal"
|
||||
When call validate_input_python "pre-commit" "pre-commit-config" "../config.yaml"
|
||||
The status should be failure
|
||||
End
|
||||
It "rejects absolute paths"
|
||||
When call validate_input_python "pre-commit" "pre-commit-config" "/etc/passwd"
|
||||
The status should be failure
|
||||
End
|
||||
It "accepts non-yaml extensions (framework default validation)"
|
||||
When call validate_input_python "pre-commit" "pre-commit-config" "config.txt"
|
||||
The status should be success
|
||||
End
|
||||
It "rejects injection patterns"
|
||||
When call validate_input_python "pre-commit" "pre-commit-config" "config.yaml; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating base-branch input"
|
||||
It "accepts valid branch name"
|
||||
When call validate_input_python "pre-commit" "base-branch" "main"
|
||||
The status should be success
|
||||
End
|
||||
It "accepts feature branch"
|
||||
When call validate_input_python "pre-commit" "base-branch" "feature/test-branch"
|
||||
The status should be success
|
||||
End
|
||||
It "accepts branch with numbers"
|
||||
When call validate_input_python "pre-commit" "base-branch" "release-2024.1"
|
||||
The status should be success
|
||||
End
|
||||
It "rejects injection in branch"
|
||||
When call validate_input_python "pre-commit" "base-branch" "branch; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
# NOTE: Test framework uses default validation for 'base-branch'
|
||||
# Default validation only checks for injection patterns (;, &&, $()
|
||||
It "accepts branch with tilde (framework default validation)"
|
||||
When call validate_input_python "pre-commit" "base-branch" "branch~1"
|
||||
The status should be success
|
||||
End
|
||||
It "accepts branch starting with dot (framework default validation)"
|
||||
When call validate_input_python "pre-commit" "base-branch" ".hidden-branch"
|
||||
The status should be success
|
||||
End
|
||||
It "rejects injection patterns in branch"
|
||||
When call validate_input_python "pre-commit" "base-branch" "branch && rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating token input"
|
||||
It "accepts valid GitHub token"
|
||||
When call validate_input_python "pre-commit" "token" "ghp_123456789012345678901234567890123456"
|
||||
The status should be success
|
||||
End
|
||||
It "rejects invalid token format"
|
||||
When call validate_input_python "pre-commit" "token" "invalid-token-format"
|
||||
The status should be failure
|
||||
End
|
||||
It "rejects injection in token"
|
||||
When call validate_input_python "pre-commit" "token" "token; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating commit_user input"
|
||||
It "accepts valid user"
|
||||
When call validate_input_python "pre-commit" "commit_user" "GitHub Actions"
|
||||
The status should be success
|
||||
End
|
||||
It "rejects injection in user"
|
||||
When call validate_input_python "pre-commit" "commit_user" "user; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating commit_email input"
|
||||
It "accepts valid email"
|
||||
When call validate_input_python "pre-commit" "commit_email" "test@example.com"
|
||||
The status should be success
|
||||
End
|
||||
It "accepts github-actions email"
|
||||
When call validate_input_python "pre-commit" "commit_email" "github-actions@github.com"
|
||||
The status should be success
|
||||
End
|
||||
It "rejects invalid email format"
|
||||
When call validate_input_python "pre-commit" "commit_email" "invalid-email"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when checking action.yml structure"
|
||||
It "has valid YAML syntax"
|
||||
When call validate_action_yml_quiet "$ACTION_FILE"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "has correct action name"
|
||||
name=$(get_action_name "$ACTION_FILE")
|
||||
When call echo "$name"
|
||||
The output should equal "pre-commit"
|
||||
End
|
||||
|
||||
It "defines expected inputs"
|
||||
inputs=$(get_action_inputs "$ACTION_FILE")
|
||||
When call echo "$inputs"
|
||||
The output should include "pre-commit-config"
|
||||
The output should include "base-branch"
|
||||
The output should include "token"
|
||||
The output should include "commit_user"
|
||||
The output should include "commit_email"
|
||||
End
|
||||
|
||||
It "has all inputs as optional"
|
||||
When call uv run "_tests/shared/validation_core.py" --property "$ACTION_FILE" "" "all_optional"
|
||||
The output should equal "none"
|
||||
End
|
||||
|
||||
It "defines expected outputs"
|
||||
outputs=$(get_action_outputs "$ACTION_FILE")
|
||||
When call echo "$outputs"
|
||||
The output should include "hooks_passed"
|
||||
The output should include "files_changed"
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating security"
|
||||
It "rejects path traversal"
|
||||
When call validate_input_python "pre-commit" "pre-commit-config" "../../malicious.yaml"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates branch name security"
|
||||
When call validate_input_python "pre-commit" "base-branch" "main && rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates email format"
|
||||
When call validate_input_python "pre-commit" "commit_email" "invalid@email"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when testing outputs"
|
||||
It "produces all expected outputs"
|
||||
When call test_action_outputs "$ACTION_DIR" "pre-commit-config" ".pre-commit-config.yaml" "token" "ghp_test" "commit_user" "test" "commit_email" "test@example.com"
|
||||
The status should be success
|
||||
The stderr should include "Testing action outputs for: pre-commit"
|
||||
The stderr should include "Output test passed for: pre-commit"
|
||||
End
|
||||
End
|
||||
End
|
||||
515
_tests/unit/prettier-lint/validation.spec.sh
Executable file
515
_tests/unit/prettier-lint/validation.spec.sh
Executable file
@@ -0,0 +1,515 @@
|
||||
#!/usr/bin/env shellspec
|
||||
# Unit tests for prettier-lint action validation and logic
|
||||
# Framework is automatically loaded via spec_helper.sh
|
||||
|
||||
Describe "prettier-lint action"
|
||||
ACTION_DIR="prettier-lint"
|
||||
ACTION_FILE="$ACTION_DIR/action.yml"
|
||||
|
||||
Context "when validating mode input"
|
||||
It "accepts check mode"
|
||||
When call validate_input_python "prettier-lint" "mode" "check"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts fix mode"
|
||||
When call validate_input_python "prettier-lint" "mode" "fix"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts empty mode (uses default)"
|
||||
When call validate_input_python "prettier-lint" "mode" ""
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects invalid mode"
|
||||
When call validate_input_python "prettier-lint" "mode" "invalid"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects mode with command injection"
|
||||
When call validate_input_python "prettier-lint" "mode" "check; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating working-directory input"
|
||||
It "accepts default directory"
|
||||
When call validate_input_python "prettier-lint" "working-directory" "."
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts valid subdirectory"
|
||||
When call validate_input_python "prettier-lint" "working-directory" "src"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts empty working-directory (uses default)"
|
||||
When call validate_input_python "prettier-lint" "working-directory" ""
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects path traversal"
|
||||
When call validate_input_python "prettier-lint" "working-directory" "../../../etc"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects directory with command injection"
|
||||
When call validate_input_python "prettier-lint" "working-directory" "src; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating prettier-version input"
|
||||
It "accepts latest version"
|
||||
When call validate_input_python "prettier-lint" "prettier-version" "latest"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts semantic version"
|
||||
When call validate_input_python "prettier-lint" "prettier-version" "3.2.5"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts major.minor version"
|
||||
When call validate_input_python "prettier-lint" "prettier-version" "3.2"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts major version"
|
||||
When call validate_input_python "prettier-lint" "prettier-version" "3"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts version with pre-release"
|
||||
When call validate_input_python "prettier-lint" "prettier-version" "3.0.0-alpha.1"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts empty version (uses default)"
|
||||
When call validate_input_python "prettier-lint" "prettier-version" ""
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects invalid version format"
|
||||
When call validate_input_python "prettier-lint" "prettier-version" "invalid"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects version with command injection"
|
||||
When call validate_input_python "prettier-lint" "prettier-version" "3.2.5; echo"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating config-file input"
|
||||
It "accepts default config file"
|
||||
When call validate_input_python "prettier-lint" "config-file" ".prettierrc"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts custom config file"
|
||||
When call validate_input_python "prettier-lint" "config-file" ".prettierrc.js"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts config file in subdirectory"
|
||||
When call validate_input_python "prettier-lint" "config-file" "config/prettier.config.js"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts empty config-file (uses default)"
|
||||
When call validate_input_python "prettier-lint" "config-file" ""
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects config file with path traversal"
|
||||
When call validate_input_python "prettier-lint" "config-file" "../../../.prettierrc"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects config file with command injection"
|
||||
When call validate_input_python "prettier-lint" "config-file" ".prettierrc; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating ignore-file input"
|
||||
It "accepts default ignore file"
|
||||
When call validate_input_python "prettier-lint" "ignore-file" ".prettierignore"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts custom ignore file"
|
||||
When call validate_input_python "prettier-lint" "ignore-file" ".prettierignore.custom"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts empty ignore-file (uses default)"
|
||||
When call validate_input_python "prettier-lint" "ignore-file" ""
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects ignore file with path traversal"
|
||||
When call validate_input_python "prettier-lint" "ignore-file" "../../../.prettierignore"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects ignore file with command injection"
|
||||
When call validate_input_python "prettier-lint" "ignore-file" ".prettierignore; echo"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating file-pattern input"
|
||||
It "accepts default pattern"
|
||||
When call validate_input_python "prettier-lint" "file-pattern" "**/*.{js,jsx,ts,tsx,css,scss,json,md,yaml,yml}"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts simple pattern"
|
||||
When call validate_input_python "prettier-lint" "file-pattern" "**/*.js"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts multiple patterns"
|
||||
When call validate_input_python "prettier-lint" "file-pattern" "**/*.{js,ts}"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts specific directory pattern"
|
||||
When call validate_input_python "prettier-lint" "file-pattern" "src/**/*.js"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts empty file-pattern (uses default)"
|
||||
When call validate_input_python "prettier-lint" "file-pattern" ""
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects pattern with command injection"
|
||||
When call validate_input_python "prettier-lint" "file-pattern" "**/*.js; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating cache input"
|
||||
It "accepts true"
|
||||
When call validate_input_python "prettier-lint" "cache" "true"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts false"
|
||||
When call validate_input_python "prettier-lint" "cache" "false"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts empty cache (uses default)"
|
||||
When call validate_input_python "prettier-lint" "cache" ""
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects invalid boolean value"
|
||||
When call validate_input_python "prettier-lint" "cache" "maybe"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating fail-on-error input"
|
||||
It "accepts true"
|
||||
When call validate_input_python "prettier-lint" "fail-on-error" "true"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts false"
|
||||
When call validate_input_python "prettier-lint" "fail-on-error" "false"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts empty fail-on-error (uses default)"
|
||||
When call validate_input_python "prettier-lint" "fail-on-error" ""
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects invalid boolean value"
|
||||
When call validate_input_python "prettier-lint" "fail-on-error" "yes"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating report-format input"
|
||||
It "accepts json format"
|
||||
When call validate_input_python "prettier-lint" "report-format" "json"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts sarif format"
|
||||
When call validate_input_python "prettier-lint" "report-format" "sarif"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts empty report-format (uses default)"
|
||||
When call validate_input_python "prettier-lint" "report-format" ""
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects invalid format"
|
||||
When call validate_input_python "prettier-lint" "report-format" "invalid"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects format with command injection"
|
||||
When call validate_input_python "prettier-lint" "report-format" "json; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating max-retries input"
|
||||
It "accepts default value 3"
|
||||
When call validate_input_python "prettier-lint" "max-retries" "3"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts retry count of 1"
|
||||
When call validate_input_python "prettier-lint" "max-retries" "1"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts retry count of 10"
|
||||
When call validate_input_python "prettier-lint" "max-retries" "10"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts empty max-retries (uses default)"
|
||||
When call validate_input_python "prettier-lint" "max-retries" ""
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects zero retries"
|
||||
When call validate_input_python "prettier-lint" "max-retries" "0"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects negative retry count"
|
||||
When call validate_input_python "prettier-lint" "max-retries" "-1"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects retry count above 10"
|
||||
When call validate_input_python "prettier-lint" "max-retries" "11"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects non-numeric retry count"
|
||||
When call validate_input_python "prettier-lint" "max-retries" "abc"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects retry count with command injection"
|
||||
When call validate_input_python "prettier-lint" "max-retries" "3; echo"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating plugins input"
|
||||
It "accepts empty plugins (optional)"
|
||||
When call validate_input_python "prettier-lint" "plugins" ""
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts single plugin"
|
||||
When call validate_input_python "prettier-lint" "plugins" "prettier-plugin-tailwindcss"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts multiple plugins"
|
||||
When call validate_input_python "prettier-lint" "plugins" "prettier-plugin-tailwindcss,prettier-plugin-organize-imports"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts scoped plugin"
|
||||
When call validate_input_python "prettier-lint" "plugins" "@trivago/prettier-plugin-sort-imports"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects plugins with command injection"
|
||||
When call validate_input_python "prettier-lint" "plugins" "prettier-plugin-tailwindcss; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating token input"
|
||||
It "accepts valid GitHub token (classic)"
|
||||
When call validate_input_python "prettier-lint" "token" "ghp_123456789012345678901234567890123456"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts valid GitHub fine-grained token"
|
||||
When call validate_input_python "prettier-lint" "token" "github_pat_1234567890123456789012345678901234567890123456789012345678901234567890a"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts empty token (optional)"
|
||||
When call validate_input_python "prettier-lint" "token" ""
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects invalid token format"
|
||||
When call validate_input_python "prettier-lint" "token" "invalid-token"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects token with command injection"
|
||||
When call validate_input_python "prettier-lint" "token" "ghp_123456789012345678901234567890123456; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating username input"
|
||||
It "accepts valid username"
|
||||
When call validate_input_python "prettier-lint" "username" "github-actions"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts username with hyphens"
|
||||
When call validate_input_python "prettier-lint" "username" "my-bot-user"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts alphanumeric username"
|
||||
When call validate_input_python "prettier-lint" "username" "user123"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts empty username (uses default)"
|
||||
When call validate_input_python "prettier-lint" "username" ""
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects username with command injection"
|
||||
When call validate_input_python "prettier-lint" "username" "user; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects username with special characters"
|
||||
When call validate_input_python "prettier-lint" "username" "user@bot"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating email input"
|
||||
It "accepts valid email"
|
||||
When call validate_input_python "prettier-lint" "email" "github-actions@github.com"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts email with plus sign"
|
||||
When call validate_input_python "prettier-lint" "email" "user+bot@example.com"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts email with subdomain"
|
||||
When call validate_input_python "prettier-lint" "email" "bot@ci.example.com"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts empty email (uses default)"
|
||||
When call validate_input_python "prettier-lint" "email" ""
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects invalid email format"
|
||||
When call validate_input_python "prettier-lint" "email" "not-an-email"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects email with command injection"
|
||||
When call validate_input_python "prettier-lint" "email" "user@example.com; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when checking action.yml structure"
|
||||
It "has valid YAML syntax"
|
||||
When call validate_action_yml_quiet "$ACTION_FILE"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "has correct action name"
|
||||
name=$(get_action_name "$ACTION_FILE")
|
||||
When call echo "$name"
|
||||
The output should equal "Prettier Lint"
|
||||
End
|
||||
|
||||
It "defines expected inputs"
|
||||
When call get_action_inputs "$ACTION_FILE"
|
||||
The output should include "mode"
|
||||
The output should include "working-directory"
|
||||
The output should include "prettier-version"
|
||||
The output should include "config-file"
|
||||
The output should include "ignore-file"
|
||||
The output should include "file-pattern"
|
||||
The output should include "cache"
|
||||
The output should include "fail-on-error"
|
||||
The output should include "report-format"
|
||||
The output should include "max-retries"
|
||||
The output should include "plugins"
|
||||
The output should include "token"
|
||||
The output should include "username"
|
||||
The output should include "email"
|
||||
End
|
||||
|
||||
It "defines expected outputs"
|
||||
When call get_action_outputs "$ACTION_FILE"
|
||||
The output should include "status"
|
||||
The output should include "files-checked"
|
||||
The output should include "unformatted-files"
|
||||
The output should include "sarif-file"
|
||||
The output should include "files-changed"
|
||||
End
|
||||
End
|
||||
|
||||
Context "when testing input requirements"
|
||||
It "has all inputs as optional (with defaults)"
|
||||
When call is_input_required "$ACTION_FILE" "mode"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when testing security validations"
|
||||
It "validates against path traversal in working-directory"
|
||||
When call validate_input_python "prettier-lint" "working-directory" "../../../etc"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates against shell metacharacters in mode"
|
||||
When call validate_input_python "prettier-lint" "mode" "check|echo"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates against command substitution in config-file"
|
||||
When call validate_input_python "prettier-lint" "config-file" "\$(whoami)"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates against path traversal in token"
|
||||
When call validate_input_python "prettier-lint" "token" "../../../etc/passwd"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates against shell metacharacters in username"
|
||||
When call validate_input_python "prettier-lint" "username" "user&whoami"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates against command injection in email"
|
||||
When call validate_input_python "prettier-lint" "email" "user@example.com\`whoami\`"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates against command injection in plugins"
|
||||
When call validate_input_python "prettier-lint" "plugins" "plugin1,plugin2; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
End
|
||||
149
_tests/unit/python-lint-fix/validation.spec.sh
Executable file
149
_tests/unit/python-lint-fix/validation.spec.sh
Executable file
@@ -0,0 +1,149 @@
|
||||
#!/usr/bin/env shellspec
|
||||
# Unit tests for python-lint-fix action validation and logic
|
||||
|
||||
# Framework is automatically loaded via spec_helper.sh
|
||||
|
||||
Describe "python-lint-fix action"
|
||||
ACTION_DIR="python-lint-fix"
|
||||
ACTION_FILE="$ACTION_DIR/action.yml"
|
||||
|
||||
Context "when validating token input"
|
||||
It "accepts GitHub token expression"
|
||||
When call validate_input_python "python-lint-fix" "token" "\${{ github.token }}"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts GitHub fine-grained token"
|
||||
When call validate_input_python "python-lint-fix" "token" "ghp_abcdefghijklmnopqrstuvwxyz1234567890"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts GitHub app token"
|
||||
When call validate_input_python "python-lint-fix" "token" "ghs_abcdefghijklmnopqrstuvwxyz1234567890"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects invalid token format"
|
||||
When call validate_input_python "python-lint-fix" "token" "invalid-token"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects token with command injection"
|
||||
When call validate_input_python "python-lint-fix" "token" "ghp_token; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "accepts empty token (uses default)"
|
||||
When call validate_input_python "python-lint-fix" "token" ""
|
||||
The status should be success
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating username input"
|
||||
It "accepts valid GitHub username"
|
||||
When call validate_input_python "python-lint-fix" "username" "github-actions"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts username with hyphens"
|
||||
When call validate_input_python "python-lint-fix" "username" "user-name"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts username with numbers"
|
||||
When call validate_input_python "python-lint-fix" "username" "user123"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects username too long"
|
||||
When call validate_input_python "python-lint-fix" "username" "$(awk 'BEGIN{for(i=1;i<=40;i++)printf "a"}')"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects username with command injection"
|
||||
When call validate_input_python "python-lint-fix" "username" "user; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "accepts empty username (uses default)"
|
||||
When call validate_input_python "python-lint-fix" "username" ""
|
||||
The status should be success
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating email input"
|
||||
It "accepts valid email"
|
||||
When call validate_input_python "python-lint-fix" "email" "user@example.com"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts email with subdomain"
|
||||
When call validate_input_python "python-lint-fix" "email" "user@mail.example.com"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects email without at symbol"
|
||||
When call validate_input_python "python-lint-fix" "email" "userexample.com"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects email without domain"
|
||||
When call validate_input_python "python-lint-fix" "email" "user@"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects email with spaces"
|
||||
When call validate_input_python "python-lint-fix" "email" "user @example.com"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "accepts empty email (uses default)"
|
||||
When call uv run "_tests/shared/validation_core.py" --validate "python-lint-fix" "email" ""
|
||||
The status should be success
|
||||
End
|
||||
End
|
||||
|
||||
Context "when checking action.yml structure"
|
||||
It "has valid YAML syntax"
|
||||
When call validate_action_yml_quiet "$ACTION_FILE"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "has correct action name"
|
||||
name=$(get_action_name "$ACTION_FILE")
|
||||
When call echo "$name"
|
||||
The output should equal "Python Lint and Fix"
|
||||
End
|
||||
|
||||
It "defines expected inputs"
|
||||
When call get_action_inputs "$ACTION_FILE"
|
||||
The output should include "token"
|
||||
The output should include "username"
|
||||
The output should include "email"
|
||||
End
|
||||
End
|
||||
|
||||
Context "when testing input requirements"
|
||||
It "has all inputs as optional"
|
||||
When call uv run "_tests/shared/validation_core.py" --property "$ACTION_FILE" "" "all_optional"
|
||||
The output should equal "none"
|
||||
End
|
||||
End
|
||||
|
||||
Context "when testing security validations"
|
||||
It "validates against command injection in username"
|
||||
When call validate_input_python "python-lint-fix" "username" "user\`whoami\`"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates against shell metacharacters in email"
|
||||
When call validate_input_python "python-lint-fix" "email" "user@example.com; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates against variable expansion in token"
|
||||
When call validate_input_python "python-lint-fix" "token" "\${MALICIOUS_VAR}"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
End
|
||||
125
_tests/unit/release-monthly/validation.spec.sh
Executable file
125
_tests/unit/release-monthly/validation.spec.sh
Executable file
@@ -0,0 +1,125 @@
|
||||
#!/usr/bin/env shellspec
|
||||
# Unit tests for release-monthly action validation and logic
|
||||
# Framework is automatically loaded via spec_helper.sh
|
||||
|
||||
Describe "release-monthly action"
|
||||
ACTION_DIR="release-monthly"
|
||||
ACTION_FILE="$ACTION_DIR/action.yml"
|
||||
|
||||
Context "when validating token input"
|
||||
# NOTE: Test framework uses strict GitHub token format validation
|
||||
It "accepts valid GitHub token with correct format"
|
||||
When call validate_input_python "release-monthly" "token" "ghp_123456789012345678901234567890123456"
|
||||
The status should be success
|
||||
End
|
||||
It "rejects empty token"
|
||||
When call validate_input_python "release-monthly" "token" ""
|
||||
The status should be failure
|
||||
End
|
||||
It "rejects injection in token"
|
||||
When call validate_input_python "release-monthly" "token" "token; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating dry-run input"
|
||||
It "accepts true value"
|
||||
When call validate_input_python "release-monthly" "dry-run" "true"
|
||||
The status should be success
|
||||
End
|
||||
It "accepts false value"
|
||||
When call validate_input_python "release-monthly" "dry-run" "false"
|
||||
The status should be success
|
||||
End
|
||||
# NOTE: Convention-based validation applies boolean validation to 'dry-run'
|
||||
# Boolean validator rejects non-boolean values
|
||||
It "rejects invalid boolean value"
|
||||
When call validate_input_python "release-monthly" "dry-run" "maybe"
|
||||
The status should be failure
|
||||
End
|
||||
It "rejects injection in dry-run"
|
||||
When call validate_input_python "release-monthly" "dry-run" "true; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating prefix input"
|
||||
# NOTE: prefix has default: '' so empty values are accepted
|
||||
It "accepts empty prefix (has empty default)"
|
||||
When call validate_input_python "release-monthly" "prefix" ""
|
||||
The status should be success
|
||||
End
|
||||
It "accepts valid prefix"
|
||||
When call validate_input_python "release-monthly" "prefix" "v"
|
||||
The status should be success
|
||||
End
|
||||
It "accepts alphanumeric prefix"
|
||||
When call validate_input_python "release-monthly" "prefix" "release-v1.0-"
|
||||
The status should be success
|
||||
End
|
||||
# NOTE: Test framework uses default validation for 'prefix'
|
||||
# Default validation only checks injection patterns, not character restrictions
|
||||
It "accepts special characters in prefix (framework default validation)"
|
||||
When call validate_input_python "release-monthly" "prefix" "invalid@prefix"
|
||||
The status should be success
|
||||
End
|
||||
It "accepts spaces in prefix (framework default validation)"
|
||||
When call validate_input_python "release-monthly" "prefix" "invalid prefix"
|
||||
The status should be success
|
||||
End
|
||||
It "rejects injection in prefix"
|
||||
When call validate_input_python "release-monthly" "prefix" "prefix; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when checking action.yml structure"
|
||||
It "has valid YAML syntax"
|
||||
When call validate_action_yml_quiet "$ACTION_FILE"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "has correct action name"
|
||||
name=$(get_action_name "$ACTION_FILE")
|
||||
When call echo "$name"
|
||||
The output should equal "Do Monthly Release"
|
||||
End
|
||||
|
||||
It "defines required inputs"
|
||||
inputs=$(get_action_inputs "$ACTION_FILE")
|
||||
When call echo "$inputs"
|
||||
The output should include "token"
|
||||
The output should include "dry-run"
|
||||
The output should include "prefix"
|
||||
End
|
||||
|
||||
It "defines expected outputs"
|
||||
outputs=$(get_action_outputs "$ACTION_FILE")
|
||||
When call echo "$outputs"
|
||||
The output should include "release-tag"
|
||||
The output should include "release-url"
|
||||
The output should include "previous-tag"
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating security"
|
||||
It "validates token is required"
|
||||
When call validate_input_python "release-monthly" "token" ""
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates prefix format"
|
||||
When call validate_input_python "release-monthly" "prefix" "invalid;prefix"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when testing outputs"
|
||||
It "produces all expected outputs"
|
||||
When call test_action_outputs "$ACTION_DIR" "token" "ghp_test" "dry-run" "true" "prefix" "v"
|
||||
The status should be success
|
||||
The stderr should include "Testing action outputs for: release-monthly"
|
||||
The stderr should include "Output test passed for: release-monthly"
|
||||
End
|
||||
End
|
||||
End
|
||||
116
_tests/unit/security-scan/validation.spec.sh
Executable file
116
_tests/unit/security-scan/validation.spec.sh
Executable file
@@ -0,0 +1,116 @@
|
||||
#!/usr/bin/env shellspec
|
||||
# Unit tests for security-scan action validation and logic
|
||||
# Framework is automatically loaded via spec_helper.sh
|
||||
|
||||
Describe "security-scan action"
|
||||
ACTION_DIR="security-scan"
|
||||
ACTION_FILE="$ACTION_DIR/action.yml"
|
||||
|
||||
Context "when validating token input"
|
||||
It "accepts valid GitHub token"
|
||||
When call validate_input_python "security-scan" "token" "ghp_123456789012345678901234567890123456"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects injection in token"
|
||||
When call validate_input_python "security-scan" "token" "token; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "accepts empty token (optional)"
|
||||
When call validate_input_python "security-scan" "token" ""
|
||||
The status should be success
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating actionlint-enabled input"
|
||||
It "accepts true value"
|
||||
When call validate_input_python "security-scan" "actionlint-enabled" "true"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts false value"
|
||||
When call validate_input_python "security-scan" "actionlint-enabled" "false"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects non-boolean value"
|
||||
When call validate_input_python "security-scan" "actionlint-enabled" "maybe"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when checking action.yml structure"
|
||||
It "has valid YAML syntax"
|
||||
When call validate_action_yml_quiet "$ACTION_FILE"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "has correct action name"
|
||||
name=$(get_action_name "$ACTION_FILE")
|
||||
When call echo "$name"
|
||||
The output should equal "Security Scan"
|
||||
End
|
||||
|
||||
It "defines all expected inputs"
|
||||
inputs=$(get_action_inputs "$ACTION_FILE")
|
||||
When call echo "$inputs"
|
||||
The output should include "gitleaks-license"
|
||||
The output should include "gitleaks-config"
|
||||
The output should include "trivy-severity"
|
||||
The output should include "trivy-scanners"
|
||||
The output should include "trivy-timeout"
|
||||
The output should include "actionlint-enabled"
|
||||
The output should include "token"
|
||||
End
|
||||
|
||||
It "defines all expected outputs"
|
||||
outputs=$(get_action_outputs "$ACTION_FILE")
|
||||
When call echo "$outputs"
|
||||
The output should include "has_trivy_results"
|
||||
The output should include "has_gitleaks_results"
|
||||
The output should include "total_issues"
|
||||
The output should include "critical_issues"
|
||||
End
|
||||
|
||||
It "uses composite run type"
|
||||
run_type=$(get_action_runs_using "$ACTION_FILE")
|
||||
When call echo "$run_type"
|
||||
The output should equal "composite"
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating inputs per conventions"
|
||||
It "validates token against github_token convention"
|
||||
When call validate_input_python "security-scan" "token" "ghp_123456789012345678901234567890123456"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "validates actionlint-enabled as boolean"
|
||||
When call validate_input_python "security-scan" "actionlint-enabled" "true"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects invalid boolean for actionlint-enabled"
|
||||
When call validate_input_python "security-scan" "actionlint-enabled" "1"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when testing optional inputs"
|
||||
It "accepts empty gitleaks-license"
|
||||
When call validate_input_python "security-scan" "gitleaks-license" ""
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts empty token"
|
||||
When call validate_input_python "security-scan" "token" ""
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts valid gitleaks-license value"
|
||||
When call validate_input_python "security-scan" "gitleaks-license" "license-key-123"
|
||||
The status should be success
|
||||
End
|
||||
End
|
||||
End
|
||||
552
_tests/unit/spec_helper.sh
Executable file
552
_tests/unit/spec_helper.sh
Executable file
@@ -0,0 +1,552 @@
|
||||
#!/usr/bin/env bash
|
||||
# ShellSpec spec helper for GitHub Actions Testing Framework
|
||||
# This file is automatically loaded by ShellSpec for all tests
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Get the project root directory (where .shellspec is located)
|
||||
PROJECT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
|
||||
|
||||
# Test framework directories
|
||||
TEST_ROOT="${PROJECT_ROOT}/_tests"
|
||||
FRAMEWORK_DIR="${TEST_ROOT}/framework"
|
||||
FIXTURES_DIR="${FRAMEWORK_DIR}/fixtures"
|
||||
MOCKS_DIR="${FRAMEWORK_DIR}/mocks"
|
||||
|
||||
# Export directories for use by test cases
|
||||
export FIXTURES_DIR MOCKS_DIR
|
||||
# Only create TEMP_DIR if not already set (framework setup.sh will create it)
|
||||
if [ -z "${TEMP_DIR:-}" ]; then
|
||||
TEMP_DIR=$(mktemp -d) || exit 1
|
||||
fi
|
||||
|
||||
# Load framework utilities
|
||||
# shellcheck source=_tests/framework/setup.sh
|
||||
source "${FRAMEWORK_DIR}/setup.sh"
|
||||
# shellcheck source=_tests/framework/utils.sh
|
||||
source "${FRAMEWORK_DIR}/utils.sh"
|
||||
|
||||
# Initialize testing framework
|
||||
init_testing_framework
|
||||
|
||||
# ShellSpec specific setup
|
||||
spec_helper_configure() {
|
||||
# Configure ShellSpec behavior
|
||||
|
||||
# Set up environment variables for tests
|
||||
export GITHUB_ACTIONS=true
|
||||
export GITHUB_WORKSPACE="${PROJECT_ROOT}"
|
||||
export GITHUB_REPOSITORY="ivuorinen/actions"
|
||||
export GITHUB_SHA="test-sha"
|
||||
export GITHUB_REF="refs/heads/main"
|
||||
export GITHUB_TOKEN="test-token"
|
||||
|
||||
# Temporary directory already created by mktemp above
|
||||
|
||||
# Set up default GITHUB_OUTPUT if not already set
|
||||
if [[ -z ${GITHUB_OUTPUT:-} ]]; then
|
||||
export GITHUB_OUTPUT="${TEMP_DIR}/default-github-output"
|
||||
touch "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
||||
# Quiet logging during ShellSpec runs to avoid output interference
|
||||
if [[ -z ${SHELLSPEC_VERSION:-} ]]; then
|
||||
log_info "ShellSpec helper configured - framework loaded"
|
||||
fi
|
||||
}
|
||||
|
||||
# Run configuration
|
||||
spec_helper_configure
|
||||
|
||||
# Helper functions specifically for ShellSpec tests
|
||||
|
||||
# Set up default input values for testing a single input
|
||||
# This prevents validation failures when testing one input at a time
|
||||
setup_default_inputs() {
|
||||
local action_name="$1"
|
||||
local input_name="$2"
|
||||
|
||||
case "$action_name" in
|
||||
"github-release")
|
||||
[[ "$input_name" != "version" ]] && export INPUT_VERSION="1.0.0"
|
||||
;;
|
||||
"docker-build" | "docker-publish" | "docker-publish-gh" | "docker-publish-hub")
|
||||
[[ "$input_name" != "image-name" ]] && export INPUT_IMAGE_NAME="test-image"
|
||||
[[ "$input_name" != "tag" ]] && export INPUT_TAG="latest"
|
||||
[[ "$action_name" == "docker-publish" && "$input_name" != "registry" ]] && export INPUT_REGISTRY="dockerhub"
|
||||
;;
|
||||
"npm-publish")
|
||||
[[ "$input_name" != "npm_token" ]] && export INPUT_NPM_TOKEN="ghp_123456789012345678901234567890123456"
|
||||
;;
|
||||
"csharp-publish")
|
||||
[[ "$input_name" != "token" ]] && export INPUT_TOKEN="ghp_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
|
||||
[[ "$input_name" != "version" ]] && export INPUT_VERSION="1.0.0"
|
||||
[[ "$input_name" != "namespace" ]] && export INPUT_NAMESPACE="test-namespace"
|
||||
;;
|
||||
"php-composer")
|
||||
[[ "$input_name" != "php" ]] && export INPUT_PHP="8.1"
|
||||
;;
|
||||
"php-tests" | "php-laravel-phpunit")
|
||||
[[ "$input_name" != "php-version" ]] && export INPUT_PHP_VERSION="8.1"
|
||||
;;
|
||||
"go-build" | "go-lint")
|
||||
[[ "$input_name" != "go-version" ]] && export INPUT_GO_VERSION="1.21"
|
||||
;;
|
||||
"common-retry")
|
||||
[[ "$input_name" != "command" ]] && export INPUT_COMMAND="echo test"
|
||||
;;
|
||||
"dotnet-version-detect")
|
||||
[[ "$input_name" != "default-version" ]] && export INPUT_DEFAULT_VERSION="8.0"
|
||||
;;
|
||||
"python-version-detect" | "python-version-detect-v2")
|
||||
[[ "$input_name" != "default-version" ]] && export INPUT_DEFAULT_VERSION="3.11"
|
||||
;;
|
||||
"php-version-detect")
|
||||
[[ "$input_name" != "default-version" ]] && export INPUT_DEFAULT_VERSION="8.1"
|
||||
;;
|
||||
"go-version-detect")
|
||||
[[ "$input_name" != "default-version" ]] && export INPUT_DEFAULT_VERSION="1.22"
|
||||
;;
|
||||
"validate-inputs")
|
||||
[[ "$input_name" != "action-type" && "$input_name" != "action" && "$input_name" != "rules-file" && "$input_name" != "fail-on-error" ]] && export INPUT_ACTION_TYPE="test-action"
|
||||
;;
|
||||
"codeql-analysis")
|
||||
[[ "$input_name" != "language" ]] && export INPUT_LANGUAGE="javascript"
|
||||
[[ "$input_name" != "token" ]] && export INPUT_TOKEN="ghp_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
|
||||
;;
|
||||
"version-validator")
|
||||
[[ "$input_name" != "version" ]] && export INPUT_VERSION="1.0.0"
|
||||
;;
|
||||
"release-monthly")
|
||||
[[ "$input_name" != "token" ]] && export INPUT_TOKEN="ghp_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Clean up default input values after testing
|
||||
cleanup_default_inputs() {
|
||||
local action_name="$1"
|
||||
local input_name="$2"
|
||||
|
||||
case "$action_name" in
|
||||
"github-release")
|
||||
[[ "$input_name" != "version" ]] && unset INPUT_VERSION
|
||||
;;
|
||||
"docker-build" | "docker-publish" | "docker-publish-gh" | "docker-publish-hub")
|
||||
[[ "$input_name" != "image-name" ]] && unset INPUT_IMAGE_NAME
|
||||
[[ "$input_name" != "tag" ]] && unset INPUT_TAG
|
||||
[[ "$action_name" == "docker-publish" && "$input_name" != "registry" ]] && unset INPUT_REGISTRY
|
||||
;;
|
||||
"npm-publish")
|
||||
[[ "$input_name" != "npm_token" ]] && unset INPUT_NPM_TOKEN
|
||||
;;
|
||||
"csharp-publish")
|
||||
[[ "$input_name" != "token" ]] && unset INPUT_TOKEN
|
||||
[[ "$input_name" != "version" ]] && unset INPUT_VERSION
|
||||
[[ "$input_name" != "namespace" ]] && unset INPUT_NAMESPACE
|
||||
;;
|
||||
"php-composer")
|
||||
[[ "$input_name" != "php" ]] && unset INPUT_PHP
|
||||
;;
|
||||
"php-tests" | "php-laravel-phpunit")
|
||||
[[ "$input_name" != "php-version" ]] && unset INPUT_PHP_VERSION
|
||||
;;
|
||||
"go-build" | "go-lint")
|
||||
[[ "$input_name" != "go-version" ]] && unset INPUT_GO_VERSION
|
||||
;;
|
||||
"common-retry")
|
||||
[[ "$input_name" != "command" ]] && unset INPUT_COMMAND
|
||||
;;
|
||||
"dotnet-version-detect")
|
||||
[[ "$input_name" != "default-version" ]] && unset INPUT_DEFAULT_VERSION
|
||||
;;
|
||||
"python-version-detect" | "python-version-detect-v2")
|
||||
[[ "$input_name" != "default-version" ]] && unset INPUT_DEFAULT_VERSION
|
||||
;;
|
||||
"php-version-detect")
|
||||
[[ "$input_name" != "default-version" ]] && unset INPUT_DEFAULT_VERSION
|
||||
;;
|
||||
"go-version-detect")
|
||||
[[ "$input_name" != "default-version" ]] && unset INPUT_DEFAULT_VERSION
|
||||
;;
|
||||
"validate-inputs")
|
||||
[[ "$input_name" != "action-type" && "$input_name" != "action" && "$input_name" != "rules-file" && "$input_name" != "fail-on-error" ]] && unset INPUT_ACTION_TYPE
|
||||
;;
|
||||
"codeql-analysis")
|
||||
[[ "$input_name" != "language" ]] && unset INPUT_LANGUAGE
|
||||
[[ "$input_name" != "token" ]] && unset INPUT_TOKEN
|
||||
;;
|
||||
"version-validator")
|
||||
[[ "$input_name" != "version" ]] && unset INPUT_VERSION
|
||||
;;
|
||||
"release-monthly")
|
||||
[[ "$input_name" != "token" ]] && unset INPUT_TOKEN
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Enhanced test validation for ShellSpec
|
||||
shellspec_validate_action_output() {
|
||||
local expected_key="$1"
|
||||
local expected_value="$2"
|
||||
local output_file="${3:-$GITHUB_OUTPUT}"
|
||||
|
||||
if [[ ! -f $output_file ]]; then
|
||||
echo "Output file not found: $output_file" >&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
if grep -Fq "${expected_key}=${expected_value}" "$output_file"; then
|
||||
return 0
|
||||
else
|
||||
echo "Expected output not found: $expected_key=$expected_value" >&2
|
||||
echo "Actual outputs:" >&2
|
||||
cat "$output_file" >&2
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Mock action execution for ShellSpec tests
|
||||
shellspec_mock_action_run() {
|
||||
local action_dir="$1"
|
||||
shift
|
||||
|
||||
# Set up inputs as environment variables
|
||||
while [[ $# -gt 1 ]]; do
|
||||
local key="$1"
|
||||
local value="$2"
|
||||
# Convert dashes to underscores for environment variable names
|
||||
local env_key="${key//-/_}"
|
||||
export "INPUT_$(echo "$env_key" | tr '[:lower:]' '[:upper:]')"="$value"
|
||||
shift 2
|
||||
done
|
||||
|
||||
# For testing, we'll simulate action outputs based on the action type
|
||||
local action_name
|
||||
action_name=$(basename "$action_dir")
|
||||
|
||||
case "$action_name" in
|
||||
"node-setup")
|
||||
echo "node-version=18.0.0" >>"$GITHUB_OUTPUT"
|
||||
echo "package-manager=npm" >>"$GITHUB_OUTPUT"
|
||||
echo "cache-hit=false" >>"$GITHUB_OUTPUT"
|
||||
;;
|
||||
"docker-build")
|
||||
echo "image-digest=sha256:abc123" >>"$GITHUB_OUTPUT"
|
||||
echo "build-time=45" >>"$GITHUB_OUTPUT"
|
||||
echo "platforms=linux/amd64" >>"$GITHUB_OUTPUT"
|
||||
;;
|
||||
"common-file-check")
|
||||
echo "found=true" >>"$GITHUB_OUTPUT"
|
||||
;;
|
||||
"common-retry")
|
||||
echo "success=true" >>"$GITHUB_OUTPUT"
|
||||
echo "attempts=1" >>"$GITHUB_OUTPUT"
|
||||
echo "exit-code=0" >>"$GITHUB_OUTPUT"
|
||||
echo "duration=5" >>"$GITHUB_OUTPUT"
|
||||
;;
|
||||
"compress-images")
|
||||
echo "images_compressed=true" >>"$GITHUB_OUTPUT"
|
||||
printf "compression_report=## Compression Results\n- 3 images compressed\n- 25%% size reduction\n" >>"$GITHUB_OUTPUT"
|
||||
;;
|
||||
"csharp-build")
|
||||
echo "build_status=success" >>"$GITHUB_OUTPUT"
|
||||
echo "test_status=success" >>"$GITHUB_OUTPUT"
|
||||
echo "dotnet_version=7.0" >>"$GITHUB_OUTPUT"
|
||||
echo "artifacts_path=**/bin/Release/**/*" >>"$GITHUB_OUTPUT"
|
||||
echo "test_results_path=**/*.trx" >>"$GITHUB_OUTPUT"
|
||||
;;
|
||||
"csharp-lint-check")
|
||||
echo "lint_status=success" >>"$GITHUB_OUTPUT"
|
||||
echo "errors_count=0" >>"$GITHUB_OUTPUT"
|
||||
echo "warnings_count=0" >>"$GITHUB_OUTPUT"
|
||||
;;
|
||||
"csharp-publish")
|
||||
echo "publish_status=success" >>"$GITHUB_OUTPUT"
|
||||
echo "package_version=1.2.3" >>"$GITHUB_OUTPUT"
|
||||
echo "package_url=https://github.com/ivuorinen/packages/nuget" >>"$GITHUB_OUTPUT"
|
||||
;;
|
||||
"docker-publish")
|
||||
echo "registry=github,dockerhub" >>"$GITHUB_OUTPUT"
|
||||
echo "tags=latest,v1.2.3" >>"$GITHUB_OUTPUT"
|
||||
echo "build-time=120" >>"$GITHUB_OUTPUT"
|
||||
echo 'platform-matrix={"linux/amd64":"success","linux/arm64":"success"}' >>"$GITHUB_OUTPUT"
|
||||
echo 'scan-results={"vulnerabilities":0}' >>"$GITHUB_OUTPUT"
|
||||
;;
|
||||
"docker-publish-gh")
|
||||
echo "image-name=ghcr.io/ivuorinen/test" >>"$GITHUB_OUTPUT"
|
||||
echo "digest=sha256:abc123def456" >>"$GITHUB_OUTPUT"
|
||||
echo "tags=ghcr.io/ivuorinen/test:latest,ghcr.io/ivuorinen/test:v1.2.3" >>"$GITHUB_OUTPUT"
|
||||
echo "provenance=true" >>"$GITHUB_OUTPUT"
|
||||
echo "sbom=ghcr.io/ivuorinen/test.sbom" >>"$GITHUB_OUTPUT"
|
||||
echo 'scan-results={"vulnerabilities":0,"critical":0}' >>"$GITHUB_OUTPUT"
|
||||
echo 'platform-matrix={"linux/amd64":"success","linux/arm64":"success"}' >>"$GITHUB_OUTPUT"
|
||||
echo "build-time=180" >>"$GITHUB_OUTPUT"
|
||||
;;
|
||||
"docker-publish-hub")
|
||||
echo "image-name=ivuorinen/test-app" >>"$GITHUB_OUTPUT"
|
||||
echo "digest=sha256:hub123def456" >>"$GITHUB_OUTPUT"
|
||||
echo "tags=ivuorinen/test-app:latest,ivuorinen/test-app:v1.2.3" >>"$GITHUB_OUTPUT"
|
||||
echo "repo-url=https://hub.docker.com/r/ivuorinen/test-app" >>"$GITHUB_OUTPUT"
|
||||
echo 'scan-results={"vulnerabilities":2,"critical":0}' >>"$GITHUB_OUTPUT"
|
||||
echo 'platform-matrix={"linux/amd64":"success","linux/arm64":"success"}' >>"$GITHUB_OUTPUT"
|
||||
echo "build-time=240" >>"$GITHUB_OUTPUT"
|
||||
echo "signature=signed" >>"$GITHUB_OUTPUT"
|
||||
;;
|
||||
"dotnet-version-detect")
|
||||
echo "dotnet-version=7.0.403" >>"$GITHUB_OUTPUT"
|
||||
;;
|
||||
"eslint-check")
|
||||
echo "error-count=0" >>"$GITHUB_OUTPUT"
|
||||
echo "warning-count=3" >>"$GITHUB_OUTPUT"
|
||||
echo "sarif-file=reports/eslint.sarif" >>"$GITHUB_OUTPUT"
|
||||
echo "files-checked=15" >>"$GITHUB_OUTPUT"
|
||||
;;
|
||||
"eslint-fix")
|
||||
echo "fixed-count=5" >>"$GITHUB_OUTPUT"
|
||||
echo "files-fixed=3" >>"$GITHUB_OUTPUT"
|
||||
echo "error-count=0" >>"$GITHUB_OUTPUT"
|
||||
echo "warning-count=0" >>"$GITHUB_OUTPUT"
|
||||
;;
|
||||
"github-release")
|
||||
echo "release-id=123456789" >>"$GITHUB_OUTPUT"
|
||||
echo "release-url=https://github.com/ivuorinen/test/releases/tag/v1.2.3" >>"$GITHUB_OUTPUT"
|
||||
echo "asset-urls=https://github.com/ivuorinen/test/releases/download/v1.2.3/app.tar.gz" >>"$GITHUB_OUTPUT"
|
||||
echo "tag-name=v1.2.3" >>"$GITHUB_OUTPUT"
|
||||
;;
|
||||
"go-build")
|
||||
echo "build_status=success" >>"$GITHUB_OUTPUT"
|
||||
echo "test_status=success" >>"$GITHUB_OUTPUT"
|
||||
echo "go_version=1.21.5" >>"$GITHUB_OUTPUT"
|
||||
echo "binary_path=./bin" >>"$GITHUB_OUTPUT"
|
||||
echo "coverage_path=coverage.out" >>"$GITHUB_OUTPUT"
|
||||
;;
|
||||
"go-lint")
|
||||
echo "lint_status=success" >>"$GITHUB_OUTPUT"
|
||||
echo "issues_count=0" >>"$GITHUB_OUTPUT"
|
||||
echo "files_checked=25" >>"$GITHUB_OUTPUT"
|
||||
echo "golangci_version=1.55.2" >>"$GITHUB_OUTPUT"
|
||||
;;
|
||||
"go-version-detect")
|
||||
echo "go-version=1.21" >>"$GITHUB_OUTPUT"
|
||||
;;
|
||||
"npm-publish")
|
||||
echo "publish-status=success" >>"$GITHUB_OUTPUT"
|
||||
echo "package-version=1.2.3" >>"$GITHUB_OUTPUT"
|
||||
echo "registry-url=https://registry.npmjs.org" >>"$GITHUB_OUTPUT"
|
||||
echo "package-url=https://www.npmjs.com/package/test-package" >>"$GITHUB_OUTPUT"
|
||||
;;
|
||||
"php-composer")
|
||||
echo "composer-version=2.6.5" >>"$GITHUB_OUTPUT"
|
||||
echo "install-status=success" >>"$GITHUB_OUTPUT"
|
||||
echo "dependencies-count=15" >>"$GITHUB_OUTPUT"
|
||||
echo "php-version=8.2.0" >>"$GITHUB_OUTPUT"
|
||||
echo "lock-file-updated=false" >>"$GITHUB_OUTPUT"
|
||||
;;
|
||||
*)
|
||||
# Generic mock outputs
|
||||
echo "status=success" >>"$GITHUB_OUTPUT"
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Use centralized Python validation system for input validation testing
|
||||
shellspec_test_input_validation() {
|
||||
local action_dir="$1"
|
||||
local input_name="$2"
|
||||
local test_value="$3"
|
||||
local expected_result="${4:-success}"
|
||||
|
||||
# Get the action name from the directory
|
||||
local action_name
|
||||
action_name=$(basename "$action_dir")
|
||||
|
||||
# Set up environment for Python validation
|
||||
local temp_output_file
|
||||
temp_output_file=$(mktemp)
|
||||
|
||||
# Capture original INPUT_ACTION_TYPE state to restore after test
|
||||
local original_action_type_set=false
|
||||
local original_action_type_value=""
|
||||
if [[ -n "${INPUT_ACTION_TYPE+x}" ]]; then
|
||||
original_action_type_set=true
|
||||
original_action_type_value="$INPUT_ACTION_TYPE"
|
||||
fi
|
||||
|
||||
# Set environment variables for the validation script
|
||||
# Only set INPUT_ACTION_TYPE if we're not testing the action input
|
||||
if [[ "$input_name" != "action" ]]; then
|
||||
export INPUT_ACTION_TYPE="$action_name"
|
||||
fi
|
||||
|
||||
# Set default values for commonly required inputs to avoid validation failures
|
||||
# when testing only one input at a time
|
||||
setup_default_inputs "$action_name" "$input_name"
|
||||
|
||||
# Convert input name to uppercase and replace dashes with underscores
|
||||
local input_var_name
|
||||
input_var_name="INPUT_${input_name//-/_}"
|
||||
input_var_name="$(echo "$input_var_name" | tr '[:lower:]' '[:upper:]')"
|
||||
export "$input_var_name"="$test_value"
|
||||
export GITHUB_OUTPUT="$temp_output_file"
|
||||
|
||||
# Run the Python validation script and capture exit code
|
||||
local exit_code
|
||||
if python3 "${PROJECT_ROOT}/validate-inputs/validator.py" >/dev/null 2>&1; then
|
||||
exit_code=0
|
||||
else
|
||||
exit_code=1
|
||||
fi
|
||||
|
||||
# Determine the actual result based on exit code
|
||||
local actual_result
|
||||
if [[ $exit_code -eq 0 ]]; then
|
||||
actual_result="success"
|
||||
else
|
||||
actual_result="failure"
|
||||
fi
|
||||
|
||||
# Clean up
|
||||
rm -f "$temp_output_file" 2>/dev/null || true
|
||||
unset "$input_var_name"
|
||||
|
||||
# Clean up default inputs
|
||||
cleanup_default_inputs "$action_name" "$input_name"
|
||||
|
||||
# Restore original INPUT_ACTION_TYPE state
|
||||
if [[ "$original_action_type_set" == "true" ]]; then
|
||||
export INPUT_ACTION_TYPE="$original_action_type_value"
|
||||
else
|
||||
unset INPUT_ACTION_TYPE
|
||||
fi
|
||||
|
||||
# Return based on expected result
|
||||
if [[ $actual_result == "$expected_result" ]]; then
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Test environment setup that works with ShellSpec
|
||||
shellspec_setup_test_env() {
|
||||
local test_name="${1:-shellspec-test}"
|
||||
|
||||
# Create unique temporary directory for this test
|
||||
export SHELLSPEC_TEST_TEMP_DIR="${TEMP_DIR}/${test_name}-$$"
|
||||
mkdir -p "$SHELLSPEC_TEST_TEMP_DIR"
|
||||
|
||||
# Create fake GitHub workspace
|
||||
export SHELLSPEC_TEST_WORKSPACE="${SHELLSPEC_TEST_TEMP_DIR}/workspace"
|
||||
mkdir -p "$SHELLSPEC_TEST_WORKSPACE"
|
||||
|
||||
# Setup fake GitHub outputs
|
||||
export GITHUB_OUTPUT="${SHELLSPEC_TEST_TEMP_DIR}/github-output"
|
||||
export GITHUB_ENV="${SHELLSPEC_TEST_TEMP_DIR}/github-env"
|
||||
export GITHUB_PATH="${SHELLSPEC_TEST_TEMP_DIR}/github-path"
|
||||
export GITHUB_STEP_SUMMARY="${SHELLSPEC_TEST_TEMP_DIR}/github-step-summary"
|
||||
|
||||
# Initialize output files
|
||||
touch "$GITHUB_OUTPUT" "$GITHUB_ENV" "$GITHUB_PATH" "$GITHUB_STEP_SUMMARY"
|
||||
|
||||
# Change to test workspace
|
||||
cd "$SHELLSPEC_TEST_WORKSPACE"
|
||||
}
|
||||
|
||||
# Test environment cleanup for ShellSpec
|
||||
shellspec_cleanup_test_env() {
|
||||
local test_name="${1:-shellspec-test}"
|
||||
|
||||
if [[ -n ${SHELLSPEC_TEST_TEMP_DIR:-} && -d $SHELLSPEC_TEST_TEMP_DIR ]]; then
|
||||
rm -rf "$SHELLSPEC_TEST_TEMP_DIR"
|
||||
fi
|
||||
|
||||
# Return to project root
|
||||
cd "$PROJECT_ROOT"
|
||||
}
|
||||
|
||||
# Export functions for use in specs
|
||||
export -f shellspec_validate_action_output shellspec_mock_action_run
|
||||
export -f shellspec_setup_test_env shellspec_cleanup_test_env shellspec_test_input_validation
|
||||
|
||||
# Create alias for backward compatibility (override framework version)
|
||||
test_input_validation() {
|
||||
shellspec_test_input_validation "$@"
|
||||
}
|
||||
|
||||
# Export all framework functions for backward compatibility
|
||||
export -f setup_test_env cleanup_test_env create_mock_repo
|
||||
export -f create_mock_node_repo
|
||||
export -f validate_action_output check_required_tools
|
||||
export -f log_info log_success log_warning log_error
|
||||
export -f validate_action_yml get_action_inputs get_action_outputs get_action_name
|
||||
export -f test_action_outputs test_external_usage test_input_validation
|
||||
|
||||
# Quiet wrapper for validate_action_yml in tests
|
||||
validate_action_yml_quiet() {
|
||||
validate_action_yml "$1" "true"
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# VALIDATION TEST HELPERS
|
||||
# =============================================================================
|
||||
# Note: These helpers return validation results but cannot use ShellSpec commands
|
||||
# They must be called from within ShellSpec It blocks
|
||||
|
||||
# Modern Python-based validation function for direct testing
|
||||
validate_input_python() {
|
||||
local action_type="$1"
|
||||
local input_name="$2"
|
||||
local input_value="$3"
|
||||
|
||||
# Set up environment variables for Python validator
|
||||
export INPUT_ACTION_TYPE="$action_type"
|
||||
export VALIDATOR_QUIET="1" # Suppress success messages for tests
|
||||
|
||||
# Set default values for commonly required inputs to avoid validation failures
|
||||
# when testing only one input at a time
|
||||
setup_default_inputs "$action_type" "$input_name"
|
||||
|
||||
# Set the target input
|
||||
local input_var_name="INPUT_${input_name//-/_}"
|
||||
input_var_name="$(echo "$input_var_name" | tr '[:lower:]' '[:upper:]')"
|
||||
export "$input_var_name"="$input_value"
|
||||
|
||||
# Set up GitHub output file
|
||||
local temp_output
|
||||
temp_output=$(mktemp)
|
||||
export GITHUB_OUTPUT="$temp_output"
|
||||
|
||||
# Call Python validator directly
|
||||
|
||||
if [[ "${SHELLSPEC_DEBUG:-}" == "1" ]]; then
|
||||
echo "DEBUG: Testing $action_type $input_name=$input_value"
|
||||
echo "DEBUG: Environment variables:"
|
||||
env | grep "^INPUT_" | sort
|
||||
fi
|
||||
|
||||
# Run validator and output everything to stdout for ShellSpec
|
||||
uv run "${PROJECT_ROOT}/validate-inputs/validator.py" 2>&1
|
||||
local exit_code=$?
|
||||
|
||||
# Clean up target input
|
||||
unset INPUT_ACTION_TYPE "$input_var_name" GITHUB_OUTPUT VALIDATOR_QUIET
|
||||
rm -f "$temp_output" 2>/dev/null || true
|
||||
|
||||
# Clean up default inputs
|
||||
cleanup_default_inputs "$action_type" "$input_name"
|
||||
|
||||
# Return the exit code for ShellSpec to check
|
||||
return $exit_code
|
||||
}
|
||||
|
||||
# Export all new simplified helpers (functions are moved above)
|
||||
export -f validate_action_yml_quiet validate_input_python
|
||||
|
||||
# Removed EXIT trap setup to avoid conflicts with ShellSpec
|
||||
# ShellSpec handles its own cleanup, and our framework cleanup is handled in setup.sh
|
||||
|
||||
# Quiet logging during ShellSpec runs
|
||||
if [[ -z ${SHELLSPEC_VERSION:-} ]]; then
|
||||
log_success "ShellSpec spec helper loaded successfully"
|
||||
fi
|
||||
139
_tests/unit/stale/validation.spec.sh
Executable file
139
_tests/unit/stale/validation.spec.sh
Executable file
@@ -0,0 +1,139 @@
|
||||
#!/usr/bin/env shellspec
|
||||
# Unit tests for stale action validation and logic
|
||||
|
||||
# Framework is automatically loaded via spec_helper.sh
|
||||
|
||||
Describe "stale action"
|
||||
ACTION_DIR="stale"
|
||||
ACTION_FILE="$ACTION_DIR/action.yml"
|
||||
|
||||
Context "when validating token input"
|
||||
It "accepts GitHub token expression"
|
||||
When call validate_input_python "stale" "token" "\${{ github.token }}"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts GitHub fine-grained token"
|
||||
When call validate_input_python "stale" "token" "ghp_abcdefghijklmnopqrstuvwxyz1234567890"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects invalid token format"
|
||||
When call validate_input_python "stale" "token" "invalid-token"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects token with command injection"
|
||||
When call validate_input_python "stale" "token" "ghp_token; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "accepts empty token (uses default)"
|
||||
When call validate_input_python "stale" "token" ""
|
||||
The status should be success
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating days-before-stale input"
|
||||
It "accepts valid day count"
|
||||
When call validate_input_python "stale" "days-before-stale" "30"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts minimum days"
|
||||
When call validate_input_python "stale" "days-before-stale" "1"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts reasonable maximum days"
|
||||
When call validate_input_python "stale" "days-before-stale" "365"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects zero days"
|
||||
When call validate_input_python "stale" "days-before-stale" "0"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects negative days"
|
||||
When call validate_input_python "stale" "days-before-stale" "-1"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects non-numeric days"
|
||||
When call validate_input_python "stale" "days-before-stale" "many"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating days-before-close input"
|
||||
It "accepts valid day count"
|
||||
When call validate_input_python "stale" "days-before-close" "7"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts minimum days"
|
||||
When call validate_input_python "stale" "days-before-close" "1"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts reasonable maximum days"
|
||||
When call validate_input_python "stale" "days-before-close" "365"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects zero days"
|
||||
When call validate_input_python "stale" "days-before-close" "0"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects negative days"
|
||||
When call validate_input_python "stale" "days-before-close" "-1"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when checking action.yml structure"
|
||||
It "has valid YAML syntax"
|
||||
When call validate_action_yml_quiet "$ACTION_FILE"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "has correct action name"
|
||||
name=$(get_action_name "$ACTION_FILE")
|
||||
When call echo "$name"
|
||||
The output should equal "Stale"
|
||||
End
|
||||
|
||||
It "defines expected inputs"
|
||||
When call get_action_inputs "$ACTION_FILE"
|
||||
The output should include "token"
|
||||
The output should include "days-before-stale"
|
||||
The output should include "days-before-close"
|
||||
End
|
||||
End
|
||||
|
||||
Context "when testing input requirements"
|
||||
It "has all inputs as optional"
|
||||
When call uv run "_tests/shared/validation_core.py" --property "$ACTION_FILE" "" "all_optional"
|
||||
The output should equal "none"
|
||||
End
|
||||
End
|
||||
|
||||
Context "when testing security validations"
|
||||
It "validates against command injection in token"
|
||||
When call validate_input_python "stale" "token" "ghp_token\`whoami\`"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates against variable expansion in days"
|
||||
When call validate_input_python "stale" "days-before-stale" "30\${HOME}"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates against shell metacharacters in days"
|
||||
When call validate_input_python "stale" "days-before-close" "7; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
End
|
||||
111
_tests/unit/sync-labels/validation.spec.sh
Executable file
111
_tests/unit/sync-labels/validation.spec.sh
Executable file
@@ -0,0 +1,111 @@
|
||||
#!/usr/bin/env shellspec
|
||||
# Unit tests for sync-labels action validation and logic
|
||||
|
||||
# Framework is automatically loaded via spec_helper.sh
|
||||
|
||||
Describe "sync-labels action"
|
||||
ACTION_DIR="sync-labels"
|
||||
ACTION_FILE="$ACTION_DIR/action.yml"
|
||||
|
||||
Context "when validating token input"
|
||||
It "accepts GitHub token expression"
|
||||
When call uv run "_tests/shared/validation_core.py" --validate "sync-labels" "token" "\${{ github.token }}"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts classic GitHub token"
|
||||
When call uv run "_tests/shared/validation_core.py" --validate "sync-labels" "token" "ghp_abcdefghijklmnopqrstuvwxyz1234567890"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts fine-grained GitHub token"
|
||||
When call uv run "_tests/shared/validation_core.py" --validate "sync-labels" "token" "github_pat_11ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects invalid token format"
|
||||
When call validate_input_python "sync-labels" "token" "invalid-token"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects token with command injection"
|
||||
When call validate_input_python "sync-labels" "token" "ghp_token; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating config-file input"
|
||||
It "accepts valid config file"
|
||||
When call uv run "_tests/shared/validation_core.py" --validate "sync-labels" "labels" ".github/labels.yml"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts config file with json extension"
|
||||
When call uv run "_tests/shared/validation_core.py" --validate "sync-labels" "labels" ".github/labels.json"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects path traversal in config file"
|
||||
When call validate_input_python "sync-labels" "labels" "../../../etc/passwd"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects absolute path in config file"
|
||||
When call validate_input_python "sync-labels" "labels" "/etc/passwd"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects config file with command injection"
|
||||
When call validate_input_python "sync-labels" "labels" "labels.yml; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when checking action.yml structure"
|
||||
It "has valid YAML syntax"
|
||||
When call validate_action_yml_quiet "$ACTION_FILE"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "has correct action name"
|
||||
name=$(get_action_name "$ACTION_FILE")
|
||||
When call echo "$name"
|
||||
The output should equal "Sync labels"
|
||||
End
|
||||
|
||||
It "defines expected inputs"
|
||||
When call get_action_inputs "$ACTION_FILE"
|
||||
The output should include "token"
|
||||
The output should include "labels"
|
||||
End
|
||||
End
|
||||
|
||||
Context "when testing input requirements"
|
||||
It "token input is optional"
|
||||
When call uv run "_tests/shared/validation_core.py" --property "$ACTION_FILE" "token" "optional"
|
||||
The output should equal "optional"
|
||||
End
|
||||
|
||||
It "labels input is required"
|
||||
When call uv run "_tests/shared/validation_core.py" --property "$ACTION_FILE" "labels" "required"
|
||||
The output should equal "required"
|
||||
End
|
||||
End
|
||||
|
||||
Context "when testing security validations"
|
||||
It "validates against path traversal in config file"
|
||||
When call validate_input_python "sync-labels" "labels" "../../malicious.yml"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates against command injection in token"
|
||||
When call validate_input_python "sync-labels" "token" "ghp_token\`whoami\`"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates against shell metacharacters in config file"
|
||||
When call validate_input_python "sync-labels" "labels" "labels.yml && rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
End
|
||||
156
_tests/unit/terraform-lint-fix/validation.spec.sh
Executable file
156
_tests/unit/terraform-lint-fix/validation.spec.sh
Executable file
@@ -0,0 +1,156 @@
|
||||
#!/usr/bin/env shellspec
|
||||
# Unit tests for terraform-lint-fix action validation and logic
|
||||
|
||||
# Framework is automatically loaded via spec_helper.sh
|
||||
|
||||
Describe "terraform-lint-fix action"
|
||||
ACTION_DIR="terraform-lint-fix"
|
||||
ACTION_FILE="$ACTION_DIR/action.yml"
|
||||
|
||||
Context "when validating token input"
|
||||
It "accepts GitHub token expression"
|
||||
When call validate_input_python "terraform-lint-fix" "token" "\${{ github.token }}"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts GitHub fine-grained token"
|
||||
When call validate_input_python "terraform-lint-fix" "token" "ghp_abcdefghijklmnopqrstuvwxyz1234567890"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects invalid token format"
|
||||
When call validate_input_python "terraform-lint-fix" "token" "invalid-token"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects token with command injection"
|
||||
When call validate_input_python "terraform-lint-fix" "token" "ghp_token; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "accepts empty token (uses default)"
|
||||
When call validate_input_python "terraform-lint-fix" "token" ""
|
||||
The status should be success
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating terraform-version input"
|
||||
It "accepts valid terraform version"
|
||||
When call validate_input_python "terraform-lint-fix" "terraform-version" "1.5.0"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts latest terraform version"
|
||||
When call validate_input_python "terraform-lint-fix" "terraform-version" "latest"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts terraform version with patch"
|
||||
When call validate_input_python "terraform-lint-fix" "terraform-version" "1.5.7"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts terraform version with v prefix"
|
||||
When call validate_input_python "terraform-lint-fix" "terraform-version" "v1.5.0"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects terraform version with command injection"
|
||||
When call validate_input_python "terraform-lint-fix" "terraform-version" "1.5.0; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "accepts empty terraform version (uses default)"
|
||||
When call validate_input_python "terraform-lint-fix" "terraform-version" ""
|
||||
The status should be success
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating working-directory input"
|
||||
It "accepts current directory"
|
||||
When call validate_input_python "terraform-lint-fix" "working-directory" "."
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts relative directory"
|
||||
When call validate_input_python "terraform-lint-fix" "working-directory" "terraform"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts nested directory"
|
||||
When call validate_input_python "terraform-lint-fix" "working-directory" "infrastructure/terraform"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects path traversal"
|
||||
When call validate_input_python "terraform-lint-fix" "working-directory" "../malicious"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects absolute paths"
|
||||
When call validate_input_python "terraform-lint-fix" "working-directory" "/etc/passwd"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects directory with command injection"
|
||||
When call validate_input_python "terraform-lint-fix" "working-directory" "terraform; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when checking action.yml structure"
|
||||
It "has valid YAML syntax"
|
||||
When call validate_action_yml_quiet "$ACTION_FILE"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "has correct action name"
|
||||
name=$(get_action_name "$ACTION_FILE")
|
||||
When call echo "$name"
|
||||
The output should equal "Terraform Lint and Fix"
|
||||
End
|
||||
|
||||
It "defines expected inputs"
|
||||
When call get_action_inputs "$ACTION_FILE"
|
||||
The output should include "token"
|
||||
The output should include "terraform-version"
|
||||
The output should include "working-directory"
|
||||
End
|
||||
End
|
||||
|
||||
Context "when testing input requirements"
|
||||
It "has all inputs as optional"
|
||||
When call uv run "_tests/shared/validation_core.py" --property "$ACTION_FILE" "" "all_optional"
|
||||
The output should equal "none"
|
||||
End
|
||||
End
|
||||
|
||||
Context "when testing security validations"
|
||||
It "validates against path traversal in working directory"
|
||||
When call validate_input_python "terraform-lint-fix" "working-directory" "../../malicious"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates against command injection in terraform version"
|
||||
When call validate_input_python "terraform-lint-fix" "terraform-version" "1.5.0\`whoami\`"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates against shell metacharacters in token"
|
||||
When call validate_input_python "terraform-lint-fix" "token" "ghp_token && rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when testing Terraform-specific validations"
|
||||
It "validates terraform version format"
|
||||
When call validate_input_python "terraform-lint-fix" "terraform-version" "1.x.x"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates working directory path safety"
|
||||
When call validate_input_python "terraform-lint-fix" "working-directory" "/root/.ssh"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
End
|
||||
178
_tests/unit/validate-inputs/validation.spec.sh
Executable file
178
_tests/unit/validate-inputs/validation.spec.sh
Executable file
@@ -0,0 +1,178 @@
|
||||
#!/usr/bin/env shellspec
|
||||
# Unit tests for validate-inputs action validation and logic
|
||||
|
||||
# Framework is automatically loaded via spec_helper.sh
|
||||
|
||||
Describe "validate-inputs action"
|
||||
ACTION_DIR="validate-inputs"
|
||||
ACTION_FILE="$ACTION_DIR/action.yml"
|
||||
|
||||
Context "when validating action input"
|
||||
It "accepts valid action name"
|
||||
When call validate_input_python "validate-inputs" "action" "github-release"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts action name with hyphens"
|
||||
When call validate_input_python "validate-inputs" "action" "docker-build"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts action name with underscores"
|
||||
When call validate_input_python "validate-inputs" "action" "npm_publish"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects action with command injection"
|
||||
When call validate_input_python "validate-inputs" "action" "github-release; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects action with shell operators"
|
||||
When call validate_input_python "validate-inputs" "action" "github-release && malicious"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects action with pipe"
|
||||
When call validate_input_python "validate-inputs" "action" "github-release | cat /etc/passwd"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects empty action"
|
||||
When call validate_input_python "validate-inputs" "action" ""
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating rules-file input"
|
||||
It "accepts valid rules file"
|
||||
When call validate_input_python "validate-inputs" "rules-file" "validate-inputs/rules/github-release.yml"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts rules file with relative path"
|
||||
When call validate_input_python "validate-inputs" "rules-file" "rules/action.yml"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects path traversal in rules file"
|
||||
When call validate_input_python "validate-inputs" "rules-file" "../../../etc/passwd"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects absolute path in rules file"
|
||||
When call validate_input_python "validate-inputs" "rules-file" "/etc/passwd"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects rules file with command injection"
|
||||
When call validate_input_python "validate-inputs" "rules-file" "rules.yml; rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "accepts empty rules file (uses default)"
|
||||
When call validate_input_python "validate-inputs" "rules-file" ""
|
||||
The status should be success
|
||||
End
|
||||
End
|
||||
|
||||
Context "when validating fail-on-error input"
|
||||
It "accepts true for fail-on-error"
|
||||
When call validate_input_python "validate-inputs" "fail-on-error" "true"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "accepts false for fail-on-error"
|
||||
When call validate_input_python "validate-inputs" "fail-on-error" "false"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "rejects invalid fail-on-error value"
|
||||
When call validate_input_python "validate-inputs" "fail-on-error" "yes"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "rejects empty fail-on-error"
|
||||
When call validate_input_python "validate-inputs" "fail-on-error" ""
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when checking action.yml structure"
|
||||
It "has valid YAML syntax"
|
||||
When call validate_action_yml_quiet "$ACTION_FILE"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "has correct action name"
|
||||
name=$(get_action_name "$ACTION_FILE")
|
||||
When call echo "$name"
|
||||
The output should equal "Validate Inputs"
|
||||
End
|
||||
|
||||
It "defines expected inputs"
|
||||
When call get_action_inputs "$ACTION_FILE"
|
||||
The output should include "action"
|
||||
The output should include "rules-file"
|
||||
The output should include "fail-on-error"
|
||||
End
|
||||
|
||||
It "defines expected outputs"
|
||||
When call get_action_outputs "$ACTION_FILE"
|
||||
The output should include "validation-result"
|
||||
The output should include "errors-found"
|
||||
The output should include "rules-applied"
|
||||
End
|
||||
End
|
||||
|
||||
Context "when testing input requirements"
|
||||
It "requires action input"
|
||||
When call uv run "_tests/shared/validation_core.py" --property "$ACTION_FILE" "action" "required"
|
||||
The output should equal "required"
|
||||
End
|
||||
|
||||
It "has rules-file as optional input"
|
||||
When call uv run "_tests/shared/validation_core.py" --property "$ACTION_FILE" "rules-file" "optional"
|
||||
The output should equal "optional"
|
||||
End
|
||||
|
||||
It "has fail-on-error as optional input"
|
||||
When call uv run "_tests/shared/validation_core.py" --property "$ACTION_FILE" "fail-on-error" "optional"
|
||||
The output should equal "optional"
|
||||
End
|
||||
End
|
||||
|
||||
Context "when testing security validations"
|
||||
It "validates against path traversal in rules file"
|
||||
When call validate_input_python "validate-inputs" "rules-file" "../../malicious.yml"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates against command injection in action name"
|
||||
When call validate_input_python "validate-inputs" "action" "test\`whoami\`"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates against shell metacharacters in rules file"
|
||||
When call validate_input_python "validate-inputs" "rules-file" "rules.yml && rm -rf /"
|
||||
The status should be failure
|
||||
End
|
||||
End
|
||||
|
||||
Context "when testing validation-specific functionality"
|
||||
It "validates action name format restrictions"
|
||||
When call validate_input_python "validate-inputs" "action" "invalid/action/name"
|
||||
The status should be failure
|
||||
End
|
||||
|
||||
It "validates rules file extension requirements"
|
||||
When call validate_input_python "validate-inputs" "rules-file" "rules.txt"
|
||||
The status should be success
|
||||
End
|
||||
|
||||
It "validates boolean input parsing"
|
||||
When call validate_input_python "validate-inputs" "fail-on-error" "TRUE"
|
||||
The status should be success
|
||||
End
|
||||
End
|
||||
End
|
||||
90
_tools/bump-major-version.sh
Executable file
90
_tools/bump-major-version.sh
Executable file
@@ -0,0 +1,90 @@
|
||||
#!/bin/sh
|
||||
# Bump from one major version to another (annual version bump)
|
||||
set -eu
|
||||
|
||||
OLD_VERSION="${1:-}"
|
||||
NEW_VERSION="${2:-}"
|
||||
|
||||
# Source shared utilities
|
||||
# shellcheck source=_tools/shared.sh
|
||||
SCRIPT_DIR=$(cd "$(dirname "$0")" && pwd)
|
||||
# shellcheck disable=SC1091
|
||||
. "$SCRIPT_DIR/shared.sh"
|
||||
|
||||
# Check git availability
|
||||
require_git
|
||||
|
||||
if [ -z "$OLD_VERSION" ] || [ -z "$NEW_VERSION" ]; then
|
||||
printf '%b' "${RED}Error: OLD_VERSION and NEW_VERSION arguments required${NC}\n"
|
||||
printf 'Usage: %s v2025 v2026\n' "$0"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Validate major version format
|
||||
if ! validate_major_version "$OLD_VERSION"; then
|
||||
printf '%b' "${RED}Error: Invalid old version format: $OLD_VERSION${NC}\n"
|
||||
printf 'Expected: vYYYY (e.g., v2025)\n'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! validate_major_version "$NEW_VERSION"; then
|
||||
printf '%b' "${RED}Error: Invalid new version format: $NEW_VERSION${NC}\n"
|
||||
printf 'Expected: vYYYY (e.g., v2026)\n'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
printf '%b' "${BLUE}Bumping major version from $OLD_VERSION to $NEW_VERSION${NC}\n"
|
||||
printf '\n'
|
||||
|
||||
# Get SHA for new version tag
|
||||
if ! git rev-parse "$NEW_VERSION" >/dev/null 2>&1; then
|
||||
printf '%b' "${YELLOW}Warning: Tag $NEW_VERSION not found${NC}\n"
|
||||
printf 'Creating tag %s pointing to current HEAD...\n' "$NEW_VERSION"
|
||||
|
||||
if ! current_sha=$(git rev-parse HEAD 2>&1); then
|
||||
printf '%b' "${RED}Error: Failed to get current HEAD SHA${NC}\n" >&2
|
||||
printf 'Git command failed: git rev-parse HEAD\n' >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
git tag -a "$NEW_VERSION" -m "Major version $NEW_VERSION"
|
||||
printf '%b' "${GREEN}✓ Created tag $NEW_VERSION pointing to $current_sha${NC}\n"
|
||||
printf '\n'
|
||||
fi
|
||||
|
||||
if ! new_sha=$(git rev-list -n 1 "$NEW_VERSION" 2>&1); then
|
||||
printf '%b' "${RED}Error: Failed to get SHA for tag $NEW_VERSION${NC}\n" >&2
|
||||
printf 'Git command failed: git rev-list -n 1 "%s"\n' "$NEW_VERSION" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$new_sha" ]; then
|
||||
printf '%b' "${RED}Error: Empty SHA returned for tag $NEW_VERSION${NC}\n" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
printf '%b' "Target SHA for $NEW_VERSION: ${GREEN}$new_sha${NC}\n"
|
||||
printf '\n'
|
||||
|
||||
# Update all action references
|
||||
printf '%b' "${BLUE}Updating action references...${NC}\n"
|
||||
"$SCRIPT_DIR/update-action-refs.sh" "$NEW_VERSION" "tag"
|
||||
|
||||
# Commit the changes
|
||||
if ! git diff --quiet; then
|
||||
git add -- */action.yml
|
||||
git commit -m "chore: bump major version from $OLD_VERSION to $NEW_VERSION
|
||||
|
||||
This commit updates all internal action references from $OLD_VERSION
|
||||
to $NEW_VERSION."
|
||||
|
||||
printf '%b' "${GREEN}✅ Committed version bump${NC}\n"
|
||||
else
|
||||
printf '%b' "${BLUE}No changes to commit${NC}\n"
|
||||
fi
|
||||
|
||||
printf '\n'
|
||||
printf '%b' "${GREEN}✅ Major version bumped successfully${NC}\n"
|
||||
printf '\n'
|
||||
printf '%b' "${YELLOW}Remember to update READMEs:${NC}\n"
|
||||
printf ' make docs\n'
|
||||
120
_tools/check-version-refs.sh
Executable file
120
_tools/check-version-refs.sh
Executable file
@@ -0,0 +1,120 @@
|
||||
#!/bin/sh
|
||||
# Check and display all current SHA-pinned action references
|
||||
set -eu
|
||||
|
||||
# Source shared utilities
|
||||
# shellcheck source=_tools/shared.sh
|
||||
SCRIPT_DIR=$(cd "$(dirname "$0")" && pwd)
|
||||
# shellcheck disable=SC1091
|
||||
. "$SCRIPT_DIR/shared.sh"
|
||||
|
||||
# Warn once if git is not available
|
||||
if ! has_git; then
|
||||
printf '%b' "${YELLOW}Warning: git is not installed or not in PATH${NC}\n" >&2
|
||||
printf 'Git tag information will not be available.\n' >&2
|
||||
fi
|
||||
|
||||
# Check for required coreutils
|
||||
for tool in find grep sed printf sort cut tr wc; do
|
||||
if ! command -v "$tool" >/dev/null 2>&1; then
|
||||
printf '%b' "${RED}Error: Required tool '%s' is not installed or not in PATH${NC}\n" "$tool" >&2
|
||||
printf 'Please install coreutils to use this script.\n' >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
printf '%b' "${BLUE}Current SHA-pinned action references:${NC}\n"
|
||||
printf '\n'
|
||||
|
||||
# Create temp files for processing
|
||||
temp_file=$(safe_mktemp)
|
||||
trap 'rm -f "$temp_file"' EXIT
|
||||
|
||||
temp_input=$(safe_mktemp)
|
||||
trap 'rm -f "$temp_file" "$temp_input"' EXIT
|
||||
|
||||
# Find all action references and collect SHA|action pairs
|
||||
# Use input redirection to avoid subshell issues with pipeline
|
||||
find . -maxdepth 2 -name "action.yml" -path "*/action.yml" ! -path "./_*" ! -path "./.github/*" -exec grep -h "uses: ivuorinen/actions/" {} \; > "$temp_input"
|
||||
|
||||
while IFS= read -r line; do
|
||||
# Extract action name and SHA using sed
|
||||
action=$(echo "$line" | sed -n 's|.*ivuorinen/actions/\([a-z-]*\)@.*|\1|p')
|
||||
sha=$(echo "$line" | sed -n 's|.*@\([a-f0-9]\{40\}\).*|\1|p')
|
||||
|
||||
if [ -n "$action" ] && [ -n "$sha" ]; then
|
||||
printf '%s\n' "$sha|$action" >> "$temp_file"
|
||||
fi
|
||||
done < "$temp_input"
|
||||
|
||||
# Check if we found any references
|
||||
if [ ! -s "$temp_file" ]; then
|
||||
printf '%b' "${YELLOW}No SHA-pinned references found${NC}\n"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Sort by SHA and group
|
||||
sort "$temp_file" | uniq > "${temp_file}.sorted"
|
||||
mv "${temp_file}.sorted" "$temp_file"
|
||||
|
||||
# Count unique SHAs
|
||||
sha_count=$(cut -d'|' -f1 "$temp_file" | sort -u | wc -l | tr -d ' ')
|
||||
|
||||
if [ "$sha_count" -eq 1 ]; then
|
||||
printf '%b' "${GREEN}✓ All references use the same SHA (consistent)${NC}\n"
|
||||
printf '\n'
|
||||
fi
|
||||
|
||||
# Process and display grouped by SHA
|
||||
current_sha=""
|
||||
actions_list=""
|
||||
|
||||
while IFS='|' read -r sha action; do
|
||||
if [ "$sha" != "$current_sha" ]; then
|
||||
# Print previous SHA group if exists
|
||||
if [ -n "$current_sha" ]; then
|
||||
# Try to find tags pointing to this SHA
|
||||
if has_git; then
|
||||
tags=$(git tag --points-at "$current_sha" 2>/dev/null | tr '\n' ', ' | sed 's/,$//')
|
||||
else
|
||||
tags=""
|
||||
fi
|
||||
|
||||
printf '%b' "${GREEN}SHA: $current_sha${NC}\n"
|
||||
if [ -n "$tags" ]; then
|
||||
printf '%b' " Tags: ${BLUE}$tags${NC}\n"
|
||||
fi
|
||||
printf ' Actions: %s\n' "$actions_list"
|
||||
printf '\n'
|
||||
fi
|
||||
|
||||
# Start new SHA group
|
||||
current_sha="$sha"
|
||||
actions_list="$action"
|
||||
else
|
||||
# Add to current SHA group
|
||||
actions_list="$actions_list, $action"
|
||||
fi
|
||||
done < "$temp_file"
|
||||
|
||||
# Print last SHA group
|
||||
if [ -n "$current_sha" ]; then
|
||||
if has_git; then
|
||||
tags=$(git tag --points-at "$current_sha" 2>/dev/null | tr '\n' ', ' | sed 's/,$//')
|
||||
else
|
||||
tags=""
|
||||
fi
|
||||
|
||||
printf '%b' "${GREEN}SHA: $current_sha${NC}\n"
|
||||
if [ -n "$tags" ]; then
|
||||
printf '%b' " Tags: ${BLUE}$tags${NC}\n"
|
||||
fi
|
||||
printf ' Actions: %s\n' "$actions_list"
|
||||
printf '\n'
|
||||
fi
|
||||
|
||||
printf '%b' "${BLUE}Summary:${NC}\n"
|
||||
printf ' Unique SHAs: %s\n' "$sha_count"
|
||||
if [ "$sha_count" -gt 1 ]; then
|
||||
printf '%b' " ${YELLOW}⚠ Warning: Multiple SHAs in use (consider updating)${NC}\n"
|
||||
fi
|
||||
280
_tools/docker-testing-tools/Dockerfile
Normal file
280
_tools/docker-testing-tools/Dockerfile
Normal file
@@ -0,0 +1,280 @@
|
||||
# GitHub Actions Testing Framework Docker Image
|
||||
# Multi-stage build with non-root user for security
|
||||
# Pre-installs all testing tools to reduce CI runtime
|
||||
|
||||
# Centralized ARG defaults to avoid version drift across stages
|
||||
ARG KCOV_VERSION=42
|
||||
ARG TRUFFLEHOG_VERSION=3.86.0
|
||||
ARG ACTIONLINT_VERSION=1.7.7
|
||||
ARG ACT_VERSION=0.2.71
|
||||
ARG SHELLSPEC_VERSION=0.28.1
|
||||
|
||||
# Stage 1: Build kcov separately to keep final image slim
|
||||
FROM ubuntu:24.04 AS kcov-builder
|
||||
|
||||
ARG KCOV_VERSION
|
||||
|
||||
# Install only build dependencies needed for kcov
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
ca-certificates \
|
||||
cmake \
|
||||
g++ \
|
||||
git \
|
||||
libcurl4-openssl-dev \
|
||||
libdw-dev \
|
||||
libelf-dev \
|
||||
libiberty-dev \
|
||||
libssl-dev \
|
||||
make \
|
||||
pkg-config \
|
||||
python3 \
|
||||
zlib1g-dev \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Build kcov from source
|
||||
WORKDIR /tmp/kcov-build
|
||||
RUN git clone --depth 1 --branch "v${KCOV_VERSION}" https://github.com/SimonKagstrom/kcov.git .
|
||||
|
||||
WORKDIR /tmp/kcov-build/build
|
||||
RUN cmake .. \
|
||||
&& make \
|
||||
&& make install DESTDIR=/kcov-install
|
||||
|
||||
# Stage 2: Base system setup
|
||||
FROM ubuntu:24.04 AS base
|
||||
|
||||
LABEL maintainer="ivuorinen"
|
||||
LABEL description="GitHub Actions testing framework with pre-installed tools"
|
||||
LABEL version="1.0.0"
|
||||
LABEL org.opencontainers.image.source="https://github.com/ivuorinen/actions"
|
||||
|
||||
# Avoid interactive prompts during package installation
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
ENV TZ=UTC
|
||||
ENV NODE_MAJOR=20
|
||||
|
||||
# Set shell to bash with pipefail for better error handling
|
||||
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||
|
||||
# Install system dependencies and common tools
|
||||
# hadolint ignore=DL3008
|
||||
RUN apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
ca-certificates \
|
||||
curl \
|
||||
git \
|
||||
gnupg \
|
||||
gzip \
|
||||
jq \
|
||||
lsb-release \
|
||||
python3 \
|
||||
python3-pip \
|
||||
python3-yaml \
|
||||
shellcheck \
|
||||
sudo \
|
||||
tar \
|
||||
unzip \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
# Note: build-essential, cmake, and kcov build deps moved to separate builder stage \
|
||||
&& curl -fsSL --proto '=https' --tlsv1.2 https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key -o /tmp/nodesource.gpg.key \
|
||||
&& gpg --dearmor -o /usr/share/keyrings/nodesource.gpg < /tmp/nodesource.gpg.key \
|
||||
&& echo "deb [signed-by=/usr/share/keyrings/nodesource.gpg] https://deb.nodesource.com/node_${NODE_MAJOR}.x nodistro main" | tee /etc/apt/sources.list.d/nodesource.list \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y --no-install-recommends nodejs \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* /tmp/nodesource.gpg.key
|
||||
|
||||
# Stage 2: Tool installation
|
||||
FROM base AS tools
|
||||
|
||||
# Set shell to bash with pipefail for better error handling
|
||||
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||
|
||||
# Version pinning for security and reproducibility (inherit from global ARGs)
|
||||
ARG TRUFFLEHOG_VERSION
|
||||
ARG ACTIONLINT_VERSION
|
||||
ARG ACT_VERSION
|
||||
ARG SHELLSPEC_VERSION
|
||||
|
||||
# Install all APT-based and standalone tools in a single optimized layer
|
||||
# 1. Configure APT repositories (Trivy, GitHub CLI)
|
||||
# 2. Install APT packages (trivy, gh, xz-utils)
|
||||
# 3. Download all tool tarballs and checksums in parallel
|
||||
# 4. Verify checksums and install tools
|
||||
# hadolint ignore=DL3008
|
||||
RUN set -eux \
|
||||
# Detect architecture once
|
||||
&& arch="$(dpkg --print-architecture)" \
|
||||
&& case "${arch}" in \
|
||||
amd64) trufflehog_arch="amd64"; actionlint_arch="amd64"; act_arch="Linux_x86_64" ;; \
|
||||
arm64) trufflehog_arch="arm64"; actionlint_arch="arm64"; act_arch="Linux_arm64" ;; \
|
||||
*) echo "Unsupported architecture: ${arch}" && exit 1 ;; \
|
||||
esac \
|
||||
# Configure APT repositories for Trivy and GitHub CLI
|
||||
&& echo "=== Configuring APT repositories ===" \
|
||||
&& curl -fsSL --proto '=https' --tlsv1.2 https://aquasecurity.github.io/trivy-repo/deb/public.key -o /tmp/trivy.key \
|
||||
&& gpg --dearmor -o /usr/share/keyrings/trivy.gpg < /tmp/trivy.key \
|
||||
&& echo "deb [signed-by=/usr/share/keyrings/trivy.gpg] https://aquasecurity.github.io/trivy-repo/deb $(lsb_release -sc) main" \
|
||||
| tee /etc/apt/sources.list.d/trivy.list \
|
||||
&& curl -fsSL --proto '=https' --tlsv1.2 https://cli.github.com/packages/githubcli-archive-keyring.gpg -o /tmp/githubcli-archive-keyring.gpg \
|
||||
&& install -m 0644 /tmp/githubcli-archive-keyring.gpg /usr/share/keyrings/githubcli-archive-keyring.gpg \
|
||||
&& echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" \
|
||||
| tee /etc/apt/sources.list.d/github-cli.list > /dev/null \
|
||||
# Install APT packages
|
||||
&& echo "=== Installing APT packages ===" \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y --no-install-recommends gh trivy xz-utils \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* /tmp/trivy.key /tmp/githubcli-archive-keyring.gpg \
|
||||
# Download all tool tarballs and checksums
|
||||
&& echo "=== Downloading standalone tools ===" \
|
||||
&& trufflehog_tarball="trufflehog_${TRUFFLEHOG_VERSION}_linux_${trufflehog_arch}.tar.gz" \
|
||||
&& actionlint_tarball="actionlint_${ACTIONLINT_VERSION}_linux_${actionlint_arch}.tar.gz" \
|
||||
&& act_tarball="act_${act_arch}.tar.gz" \
|
||||
&& curl -fsSL --proto '=https' --tlsv1.2 "https://github.com/trufflesecurity/trufflehog/releases/download/v${TRUFFLEHOG_VERSION}/${trufflehog_tarball}" -o "/tmp/${trufflehog_tarball}" \
|
||||
&& curl -fsSL --proto '=https' --tlsv1.2 "https://github.com/trufflesecurity/trufflehog/releases/download/v${TRUFFLEHOG_VERSION}/trufflehog_${TRUFFLEHOG_VERSION}_checksums.txt" -o /tmp/trufflehog_checksums.txt \
|
||||
&& curl -fsSL --proto '=https' --tlsv1.2 "https://github.com/rhysd/actionlint/releases/download/v${ACTIONLINT_VERSION}/${actionlint_tarball}" -o "/tmp/${actionlint_tarball}" \
|
||||
&& curl -fsSL --proto '=https' --tlsv1.2 "https://github.com/rhysd/actionlint/releases/download/v${ACTIONLINT_VERSION}/actionlint_${ACTIONLINT_VERSION}_checksums.txt" -o /tmp/actionlint_checksums.txt \
|
||||
&& curl -fsSL --proto '=https' --tlsv1.2 "https://github.com/nektos/act/releases/download/v${ACT_VERSION}/${act_tarball}" -o "/tmp/${act_tarball}" \
|
||||
&& curl -fsSL --proto '=https' --tlsv1.2 "https://github.com/nektos/act/releases/download/v${ACT_VERSION}/checksums.txt" -o /tmp/act_checksums.txt \
|
||||
# Verify checksums and install trufflehog
|
||||
&& echo "=== Verifying checksums and installing tools ===" \
|
||||
&& grep "${trufflehog_tarball}" /tmp/trufflehog_checksums.txt \
|
||||
| sed "s|${trufflehog_tarball}|/tmp/${trufflehog_tarball}|" \
|
||||
| sha256sum -c - \
|
||||
&& tar -xzf "/tmp/${trufflehog_tarball}" -C /tmp \
|
||||
&& chmod +x /tmp/trufflehog \
|
||||
&& mv /tmp/trufflehog /usr/local/bin/trufflehog \
|
||||
# Verify checksum and install actionlint
|
||||
&& grep "${actionlint_tarball}" /tmp/actionlint_checksums.txt \
|
||||
| sed "s|${actionlint_tarball}|/tmp/${actionlint_tarball}|" \
|
||||
| sha256sum -c - \
|
||||
&& tar -xzf "/tmp/${actionlint_tarball}" -C /tmp \
|
||||
&& chmod +x /tmp/actionlint \
|
||||
&& mv /tmp/actionlint /usr/local/bin/actionlint \
|
||||
# Verify checksum and install act
|
||||
&& grep "${act_tarball}" /tmp/act_checksums.txt \
|
||||
| sed "s|${act_tarball}|/tmp/${act_tarball}|" \
|
||||
| sha256sum -c - \
|
||||
&& tar -xzf "/tmp/${act_tarball}" -C /tmp \
|
||||
&& chmod +x /tmp/act \
|
||||
&& mv /tmp/act /usr/local/bin/act \
|
||||
# Clean up all temporary files
|
||||
&& rm -f /tmp/*.tar.gz /tmp/*_checksums.txt \
|
||||
# Verify all installations
|
||||
&& echo "=== Verifying tool installations ===" \
|
||||
&& trivy --version \
|
||||
&& gh --version \
|
||||
&& trufflehog --version \
|
||||
&& actionlint --version \
|
||||
&& act --version \
|
||||
&& test -f /bin/sh && test -f /bin/bash && echo "✓ Shell binaries intact" \
|
||||
&& echo "=== All tools installed successfully ==="
|
||||
|
||||
# Stage 3: Final image with non-root user
|
||||
FROM tools AS final
|
||||
|
||||
# Set shell to bash with pipefail for better error handling
|
||||
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||
|
||||
# Create non-root user for security
|
||||
ARG USERNAME=runner
|
||||
ARG USER_UID=1001
|
||||
ARG USER_GID=$USER_UID
|
||||
ARG SHELLSPEC_VERSION
|
||||
|
||||
# Set up environment for testing
|
||||
ENV PATH="/home/$USERNAME/.local/bin:$PATH"
|
||||
ENV USER=$USERNAME
|
||||
ENV HOME="/home/$USERNAME"
|
||||
|
||||
# Create the user and group, then
|
||||
# grant passwordless sudo to runner user for testing scenarios, then
|
||||
# create workspace directory with proper permissions (as root)
|
||||
RUN groupadd --gid "$USER_GID" "$USERNAME" \
|
||||
&& useradd --uid "$USER_UID" --gid "$USER_GID" -m "$USERNAME" -s /bin/bash \
|
||||
&& echo "$USERNAME ALL=(ALL) NOPASSWD:ALL" > "/etc/sudoers.d/$USERNAME" \
|
||||
&& chmod 0440 "/etc/sudoers.d/$USERNAME" \
|
||||
&& mkdir -p /workspace \
|
||||
&& chown -R "$USERNAME:$USERNAME" /workspace
|
||||
|
||||
# Copy kcov from builder stage (avoiding build dependencies in final image)
|
||||
# kcov is not available in Ubuntu 22.04 apt repositories, so we build it separately
|
||||
COPY --from=kcov-builder /kcov-install/usr/local/ /usr/local/
|
||||
|
||||
# Install only runtime dependencies for kcov (not build dependencies)
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
libcurl4 \
|
||||
libdw1 \
|
||||
libelf1 \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Switch to non-root user for ShellSpec installation
|
||||
USER "$USERNAME"
|
||||
WORKDIR /workspace
|
||||
|
||||
# Install ShellSpec testing framework in user's home with checksum verification, then
|
||||
# verify installations (run as root to access all tools)
|
||||
# ShellSpec - version-aware checksum verification
|
||||
# hadolint ignore=SC2016
|
||||
RUN set -eux; \
|
||||
mkdir -p ~/.local/bin; \
|
||||
tarball="shellspec-dist.tar.gz"; \
|
||||
# Pinned SHA-256 checksum for ShellSpec 0.28.1 shellspec-dist.tar.gz
|
||||
# Source: https://github.com/shellspec/shellspec/releases/download/0.28.1/shellspec-dist.tar.gz
|
||||
expected_checksum="350d3de04ba61505c54eda31a3c2ee912700f1758b1a80a284bc08fd8b6c5992"; \
|
||||
\
|
||||
# Download ShellSpec
|
||||
curl -fsSL --proto '=https' --tlsv1.2 \
|
||||
"https://github.com/shellspec/shellspec/releases/download/${SHELLSPEC_VERSION}/${tarball}" \
|
||||
-o "/tmp/${tarball}"; \
|
||||
\
|
||||
# Verify checksum
|
||||
actual_checksum=$(sha256sum "/tmp/${tarball}" | awk '{print $1}'); \
|
||||
if [ "${actual_checksum}" != "${expected_checksum}" ]; then \
|
||||
echo "Error: Checksum verification failed for ShellSpec ${SHELLSPEC_VERSION}" >&2; \
|
||||
echo "Expected: ${expected_checksum}" >&2; \
|
||||
echo "Got: ${actual_checksum}" >&2; \
|
||||
rm -f "/tmp/${tarball}"; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
echo "Checksum verified successfully"; \
|
||||
\
|
||||
tar -xzf "/tmp/${tarball}" -C "$HOME/.local"; \
|
||||
ln -s "$HOME/.local/shellspec/shellspec" "$HOME/.local/bin/shellspec"; \
|
||||
echo 'export PATH="$HOME/.local/bin:$PATH"' >> ~/.bashrc; \
|
||||
shellspec --version; \
|
||||
rm -f "/tmp/${tarball}" \
|
||||
&& echo "ShellSpec installed successfully" \
|
||||
&& echo "Verifying installed tool versions..." \
|
||||
&& echo "=== Tool Versions ===" \
|
||||
&& shellcheck --version \
|
||||
&& jq --version \
|
||||
&& kcov --version \
|
||||
&& trivy --version \
|
||||
&& trufflehog --version \
|
||||
&& actionlint --version \
|
||||
&& act --version \
|
||||
&& gh --version \
|
||||
&& node --version \
|
||||
&& npm --version \
|
||||
&& python3 --version \
|
||||
&& echo "=== System tools verified ===" \
|
||||
&& echo "=== Verify user-installed tools ===" \
|
||||
&& shellspec --version \
|
||||
&& echo "=== User tools verified ===" \
|
||||
&& echo "=== Build complete ==="
|
||||
|
||||
# Health check to verify essential tools are accessible
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD shellspec --version > /dev/null 2>&1 && \
|
||||
shellcheck --version > /dev/null 2>&1 && \
|
||||
jq --version > /dev/null 2>&1 || exit 1
|
||||
|
||||
# Default command keeps container running for GitHub Actions
|
||||
CMD ["/bin/bash", "-c", "tail -f /dev/null"]
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user