mirror of
https://github.com/ivuorinen/ghaw-auditor.git
synced 2026-03-08 08:58:32 +00:00
feat: initial commit
This commit is contained in:
1
tests/__init__.py
Normal file
1
tests/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Tests for ghaw-auditor."""
|
||||
23
tests/fixtures/action-with-defaults.yml
vendored
Normal file
23
tests/fixtures/action-with-defaults.yml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
---
|
||||
name: 'Action with Various Defaults'
|
||||
description: 'Tests different input default types'
|
||||
|
||||
inputs:
|
||||
string-input:
|
||||
description: 'String input'
|
||||
default: 'hello'
|
||||
boolean-input:
|
||||
description: 'Boolean input'
|
||||
default: true
|
||||
number-input:
|
||||
description: 'Number input'
|
||||
default: 42
|
||||
no-default:
|
||||
description: 'Input without default'
|
||||
required: true
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- run: echo "test"
|
||||
shell: bash
|
||||
12
tests/fixtures/basic-workflow.yml
vendored
Normal file
12
tests/fixtures/basic-workflow.yml
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
---
|
||||
name: Basic Workflow
|
||||
|
||||
on: push
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Run tests
|
||||
run: echo "Testing"
|
||||
89
tests/fixtures/complex-workflow.yml
vendored
Normal file
89
tests/fixtures/complex-workflow.yml
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
---
|
||||
name: Complex Workflow
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main, develop]
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
issues: write
|
||||
pull_requests: write
|
||||
|
||||
env:
|
||||
NODE_ENV: production
|
||||
API_URL: https://api.example.com
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: ./src
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
permissions:
|
||||
contents: read
|
||||
environment:
|
||||
name: production
|
||||
url: https://example.com
|
||||
env:
|
||||
BUILD_ENV: production
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Build
|
||||
run: npm run build
|
||||
env:
|
||||
API_KEY: ${{ secrets.API_KEY }}
|
||||
TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
if: github.event_name == 'pull_request'
|
||||
container:
|
||||
image: node:20-alpine
|
||||
credentials:
|
||||
username: ${{ secrets.DOCKER_USER }}
|
||||
password: ${{ secrets.DOCKER_PASS }}
|
||||
env:
|
||||
NODE_ENV: test
|
||||
ports:
|
||||
- 8080
|
||||
volumes:
|
||||
- /tmp:/tmp
|
||||
options: --cpus 2
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:15
|
||||
credentials:
|
||||
username: ${{ secrets.DOCKER_USER }}
|
||||
password: ${{ secrets.DOCKER_PASS }}
|
||||
env:
|
||||
POSTGRES_PASSWORD: ${{ secrets.DB_PASSWORD }}
|
||||
ports:
|
||||
- 5432
|
||||
options: --health-cmd pg_isready
|
||||
strategy:
|
||||
matrix:
|
||||
node-version: [18, 20]
|
||||
os: [ubuntu-latest, windows-latest]
|
||||
fail-fast: false
|
||||
max-parallel: 2
|
||||
continue-on-error: true
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: ${{ matrix.node-version }}
|
||||
- name: Test
|
||||
run: npm test
|
||||
env:
|
||||
DATABASE_URL: ${{ secrets.DATABASE_URL }}
|
||||
33
tests/fixtures/composite-action.yml
vendored
Normal file
33
tests/fixtures/composite-action.yml
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
---
|
||||
name: 'Composite Action'
|
||||
description: 'A composite action example'
|
||||
author: 'Test Author'
|
||||
|
||||
inputs:
|
||||
message:
|
||||
description: 'Message to display'
|
||||
required: true
|
||||
debug:
|
||||
description: 'Enable debug mode'
|
||||
required: false
|
||||
default: 'false'
|
||||
|
||||
outputs:
|
||||
result:
|
||||
description: 'Action result'
|
||||
value: ${{ steps.output.outputs.result }}
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Display message
|
||||
run: echo "${{ inputs.message }}"
|
||||
shell: bash
|
||||
- name: Set output
|
||||
id: output
|
||||
run: echo "result=success" >> $GITHUB_OUTPUT
|
||||
shell: bash
|
||||
|
||||
branding:
|
||||
icon: 'check'
|
||||
color: 'green'
|
||||
21
tests/fixtures/docker-action.yml
vendored
Normal file
21
tests/fixtures/docker-action.yml
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
---
|
||||
name: 'Docker Action'
|
||||
description: 'A Docker action example'
|
||||
|
||||
inputs:
|
||||
dockerfile:
|
||||
description: 'Path to Dockerfile'
|
||||
required: false
|
||||
default: 'Dockerfile'
|
||||
|
||||
outputs:
|
||||
image-id:
|
||||
description: 'Built image ID'
|
||||
|
||||
runs:
|
||||
using: docker
|
||||
image: Dockerfile
|
||||
args:
|
||||
- ${{ inputs.dockerfile }}
|
||||
env:
|
||||
BUILD_ENV: production
|
||||
11
tests/fixtures/empty-workflow-call.yml
vendored
Normal file
11
tests/fixtures/empty-workflow-call.yml
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
---
|
||||
name: Empty Workflow Call
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
1
tests/fixtures/invalid-action.yml
vendored
Normal file
1
tests/fixtures/invalid-action.yml
vendored
Normal file
@@ -0,0 +1 @@
|
||||
# Empty action file
|
||||
1
tests/fixtures/invalid-workflow.yml
vendored
Normal file
1
tests/fixtures/invalid-workflow.yml
vendored
Normal file
@@ -0,0 +1 @@
|
||||
# Empty file
|
||||
27
tests/fixtures/javascript-action.yml
vendored
Normal file
27
tests/fixtures/javascript-action.yml
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
---
|
||||
name: 'JavaScript Action'
|
||||
description: 'A Node.js action example'
|
||||
author: 'GitHub'
|
||||
|
||||
inputs:
|
||||
token:
|
||||
description: 'GitHub token'
|
||||
required: true
|
||||
timeout:
|
||||
description: 'Timeout in seconds'
|
||||
required: false
|
||||
default: '60'
|
||||
|
||||
outputs:
|
||||
status:
|
||||
description: 'Action status'
|
||||
|
||||
runs:
|
||||
using: node20
|
||||
main: dist/index.js
|
||||
pre: dist/setup.js
|
||||
post: dist/cleanup.js
|
||||
|
||||
branding:
|
||||
icon: 'code'
|
||||
color: 'blue'
|
||||
27
tests/fixtures/job-with-outputs.yml
vendored
Normal file
27
tests/fixtures/job-with-outputs.yml
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
---
|
||||
name: Job with Outputs
|
||||
|
||||
on: push
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
version: ${{ steps.version.outputs.version }}
|
||||
artifact-url: ${{ steps.upload.outputs.url }}
|
||||
status: success
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Get version
|
||||
id: version
|
||||
run: echo "version=1.0.0" >> $GITHUB_OUTPUT
|
||||
- name: Upload artifact
|
||||
id: upload
|
||||
run: echo "url=https://example.com/artifact" >> $GITHUB_OUTPUT
|
||||
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
steps:
|
||||
- name: Deploy
|
||||
run: echo "Deploying version ${{ needs.build.outputs.version }}"
|
||||
26
tests/fixtures/reusable-workflow-caller.yml
vendored
Normal file
26
tests/fixtures/reusable-workflow-caller.yml
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
---
|
||||
name: Reusable Workflow Caller
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
|
||||
jobs:
|
||||
call-workflow:
|
||||
uses: owner/repo/.github/workflows/deploy.yml@v1
|
||||
with:
|
||||
environment: production
|
||||
debug: false
|
||||
version: 1.2.3
|
||||
secrets:
|
||||
deploy-token: ${{ secrets.DEPLOY_TOKEN }}
|
||||
api-key: ${{ secrets.API_KEY }}
|
||||
|
||||
call-workflow-inherit:
|
||||
uses: owner/repo/.github/workflows/test.yml@main
|
||||
secrets: inherit
|
||||
|
||||
call-local-workflow:
|
||||
uses: ./.github/workflows/shared.yml
|
||||
with:
|
||||
config: custom
|
||||
39
tests/fixtures/reusable-workflow.yml
vendored
Normal file
39
tests/fixtures/reusable-workflow.yml
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
---
|
||||
name: Reusable Workflow
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
environment:
|
||||
description: 'Deployment environment'
|
||||
required: true
|
||||
type: string
|
||||
debug:
|
||||
description: 'Enable debug mode'
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
outputs:
|
||||
deployment-id:
|
||||
description: 'Deployment ID'
|
||||
value: ${{ jobs.deploy.outputs.id }}
|
||||
secrets:
|
||||
deploy-token:
|
||||
description: 'Deployment token'
|
||||
required: true
|
||||
api-key:
|
||||
required: false
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
id: ${{ steps.deploy.outputs.id }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Deploy
|
||||
id: deploy
|
||||
run: echo "id=12345" >> $GITHUB_OUTPUT
|
||||
env:
|
||||
TOKEN: ${{ secrets.deploy-token }}
|
||||
API_KEY: ${{ secrets.api-key }}
|
||||
30
tests/golden/actions.json
Normal file
30
tests/golden/actions.json
Normal file
@@ -0,0 +1,30 @@
|
||||
{
|
||||
"actions/checkout@abc123": {
|
||||
"name": "Checkout",
|
||||
"description": "Checkout a Git repository",
|
||||
"author": "GitHub",
|
||||
"inputs": {
|
||||
"repository": {
|
||||
"name": "repository",
|
||||
"description": "Repository name with owner",
|
||||
"required": false,
|
||||
"default": null
|
||||
},
|
||||
"ref": {
|
||||
"name": "ref",
|
||||
"description": "The branch, tag or SHA to checkout",
|
||||
"required": false,
|
||||
"default": null
|
||||
}
|
||||
},
|
||||
"outputs": {},
|
||||
"runs": {
|
||||
"using": "node20",
|
||||
"main": "dist/index.js"
|
||||
},
|
||||
"branding": null,
|
||||
"is_composite": false,
|
||||
"is_docker": false,
|
||||
"is_javascript": true
|
||||
}
|
||||
}
|
||||
57
tests/golden/report.md
Normal file
57
tests/golden/report.md
Normal file
@@ -0,0 +1,57 @@
|
||||
# GitHub Actions & Workflows Audit Report
|
||||
|
||||
**Generated:** 2025-10-02T00:00:00.000000
|
||||
|
||||
## Summary
|
||||
|
||||
- **Workflows:** 1
|
||||
- **Actions:** 1
|
||||
- **Policy Violations:** 0
|
||||
|
||||
## Analysis
|
||||
|
||||
- **Total Jobs:** 1
|
||||
- **Reusable Workflows:** 0
|
||||
|
||||
### Triggers
|
||||
|
||||
- `pull_request`: 1
|
||||
- `push`: 1
|
||||
|
||||
### Runners
|
||||
|
||||
- `ubuntu-latest`: 1
|
||||
|
||||
### Secrets
|
||||
|
||||
Total unique secrets: 1
|
||||
|
||||
- `GITHUB_TOKEN`
|
||||
|
||||
## Workflows
|
||||
|
||||
### Test Workflow
|
||||
|
||||
**Path:** `test.yml`
|
||||
|
||||
**Triggers:** `push`, `pull_request`
|
||||
|
||||
**Jobs:** 1
|
||||
|
||||
#### Jobs
|
||||
|
||||
- **test**
|
||||
- Runner: `ubuntu-latest`
|
||||
|
||||
## Actions Inventory
|
||||
|
||||
### Checkout
|
||||
|
||||
**Key:** `actions/checkout@abc123`
|
||||
|
||||
Checkout a Git repository
|
||||
|
||||
**Inputs:**
|
||||
|
||||
- `repository` (optional): Repository name with owner
|
||||
- `ref` (optional): The branch, tag or SHA to checkout
|
||||
41
tests/golden/workflows.json
Normal file
41
tests/golden/workflows.json
Normal file
@@ -0,0 +1,41 @@
|
||||
{
|
||||
"test.yml": {
|
||||
"name": "Test Workflow",
|
||||
"path": "test.yml",
|
||||
"triggers": [
|
||||
"push",
|
||||
"pull_request"
|
||||
],
|
||||
"permissions": null,
|
||||
"concurrency": null,
|
||||
"env": {},
|
||||
"defaults": {},
|
||||
"jobs": {
|
||||
"test": {
|
||||
"name": "test",
|
||||
"runs_on": "ubuntu-latest",
|
||||
"needs": [],
|
||||
"if_condition": null,
|
||||
"permissions": null,
|
||||
"environment": null,
|
||||
"concurrency": null,
|
||||
"timeout_minutes": null,
|
||||
"continue_on_error": false,
|
||||
"container": null,
|
||||
"services": {},
|
||||
"strategy": null,
|
||||
"actions_used": [],
|
||||
"secrets_used": [
|
||||
"GITHUB_TOKEN"
|
||||
],
|
||||
"env_vars": {}
|
||||
}
|
||||
},
|
||||
"is_reusable": false,
|
||||
"reusable_contract": null,
|
||||
"secrets_used": [
|
||||
"GITHUB_TOKEN"
|
||||
],
|
||||
"actions_used": []
|
||||
}
|
||||
}
|
||||
144
tests/test_analyzer.py
Normal file
144
tests/test_analyzer.py
Normal file
@@ -0,0 +1,144 @@
|
||||
"""Tests for analyzer module."""
|
||||
|
||||
from ghaw_auditor.analyzer import Analyzer
|
||||
from ghaw_auditor.models import ActionRef, ActionType, JobMeta, WorkflowMeta
|
||||
|
||||
|
||||
def test_analyzer_initialization() -> None:
|
||||
"""Test analyzer can be initialized."""
|
||||
analyzer = Analyzer()
|
||||
assert analyzer is not None
|
||||
|
||||
|
||||
def test_deduplicate_actions() -> None:
|
||||
"""Test action deduplication."""
|
||||
analyzer = Analyzer()
|
||||
|
||||
action1 = ActionRef(
|
||||
type=ActionType.GITHUB,
|
||||
owner="actions",
|
||||
repo="checkout",
|
||||
ref="v4",
|
||||
source_file="test.yml",
|
||||
)
|
||||
action2 = ActionRef(
|
||||
type=ActionType.GITHUB,
|
||||
owner="actions",
|
||||
repo="checkout",
|
||||
ref="v4",
|
||||
source_file="test2.yml",
|
||||
)
|
||||
action3 = ActionRef(
|
||||
type=ActionType.GITHUB,
|
||||
owner="actions",
|
||||
repo="setup-node",
|
||||
ref="v4",
|
||||
source_file="test.yml",
|
||||
)
|
||||
|
||||
result = analyzer.deduplicate_actions([action1, action2, action3])
|
||||
|
||||
# Should have 2 unique actions (checkout appears twice)
|
||||
assert len(result) == 2
|
||||
|
||||
|
||||
def test_analyze_workflows() -> None:
|
||||
"""Test workflow analysis."""
|
||||
analyzer = Analyzer()
|
||||
|
||||
job = JobMeta(
|
||||
name="test",
|
||||
runs_on="ubuntu-latest",
|
||||
)
|
||||
|
||||
workflow = WorkflowMeta(
|
||||
name="Test Workflow",
|
||||
path="test.yml",
|
||||
triggers=["push", "pull_request"],
|
||||
jobs={"test": job},
|
||||
secrets_used={"SECRET1", "SECRET2"},
|
||||
)
|
||||
|
||||
workflows = {"test.yml": workflow}
|
||||
analysis = analyzer.analyze_workflows(workflows, {})
|
||||
|
||||
assert analysis["total_workflows"] == 1
|
||||
assert analysis["total_jobs"] == 1
|
||||
assert "push" in analysis["triggers"]
|
||||
assert analysis["triggers"]["push"] == 1
|
||||
assert analysis["secrets"]["total_unique_secrets"] == 2
|
||||
|
||||
|
||||
def test_analyze_runners_with_list() -> None:
|
||||
"""Test runner analysis with list runner."""
|
||||
from ghaw_auditor.analyzer import Analyzer
|
||||
from ghaw_auditor.models import JobMeta, WorkflowMeta
|
||||
|
||||
analyzer = Analyzer()
|
||||
|
||||
# Job with list runner (matrix runner)
|
||||
job = JobMeta(
|
||||
name="test",
|
||||
runs_on=["ubuntu-latest", "macos-latest"],
|
||||
)
|
||||
|
||||
workflow = WorkflowMeta(
|
||||
name="Test Workflow",
|
||||
path="test.yml",
|
||||
triggers=["push"],
|
||||
jobs={"test": job},
|
||||
)
|
||||
|
||||
workflows = {"test.yml": workflow}
|
||||
analysis = analyzer.analyze_workflows(workflows, {})
|
||||
|
||||
# List runner should be converted to string
|
||||
assert "['ubuntu-latest', 'macos-latest']" in analysis["runners"]
|
||||
|
||||
|
||||
def test_analyze_containers_and_services() -> None:
|
||||
"""Test container and service analysis."""
|
||||
from ghaw_auditor.analyzer import Analyzer
|
||||
from ghaw_auditor.models import Container, JobMeta, Service, WorkflowMeta
|
||||
|
||||
analyzer = Analyzer()
|
||||
|
||||
# Job with container
|
||||
job1 = JobMeta(
|
||||
name="with-container",
|
||||
runs_on="ubuntu-latest",
|
||||
container=Container(image="node:18"),
|
||||
)
|
||||
|
||||
# Job with services
|
||||
job2 = JobMeta(
|
||||
name="with-services",
|
||||
runs_on="ubuntu-latest",
|
||||
services={"postgres": Service(name="postgres", image="postgres:14")},
|
||||
)
|
||||
|
||||
# Job with both
|
||||
job3 = JobMeta(
|
||||
name="with-both",
|
||||
runs_on="ubuntu-latest",
|
||||
container=Container(image="node:18"),
|
||||
services={"redis": Service(name="redis", image="redis:7")},
|
||||
)
|
||||
|
||||
workflow = WorkflowMeta(
|
||||
name="Test Workflow",
|
||||
path="test.yml",
|
||||
triggers=["push"],
|
||||
jobs={
|
||||
"with-container": job1,
|
||||
"with-services": job2,
|
||||
"with-both": job3,
|
||||
},
|
||||
)
|
||||
|
||||
workflows = {"test.yml": workflow}
|
||||
analysis = analyzer.analyze_workflows(workflows, {})
|
||||
|
||||
# Should count containers and services
|
||||
assert analysis["containers"]["jobs_with_containers"] == 2
|
||||
assert analysis["containers"]["jobs_with_services"] == 2
|
||||
58
tests/test_cache.py
Normal file
58
tests/test_cache.py
Normal file
@@ -0,0 +1,58 @@
|
||||
"""Tests for cache module."""
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
from ghaw_auditor.cache import Cache
|
||||
|
||||
|
||||
def test_cache_initialization(tmp_path: Path) -> None:
|
||||
"""Test cache can be initialized."""
|
||||
cache = Cache(tmp_path / "cache")
|
||||
assert cache.cache_dir.exists()
|
||||
cache.close()
|
||||
|
||||
|
||||
def test_cache_set_get(tmp_path: Path) -> None:
|
||||
"""Test cache set and get."""
|
||||
cache = Cache(tmp_path / "cache")
|
||||
|
||||
cache.set("test_key", "test_value")
|
||||
value = cache.get("test_key")
|
||||
|
||||
assert value == "test_value"
|
||||
cache.close()
|
||||
|
||||
|
||||
def test_cache_make_key() -> None:
|
||||
"""Test cache key generation."""
|
||||
cache = Cache()
|
||||
|
||||
key1 = cache.make_key("part1", "part2", "part3")
|
||||
key2 = cache.make_key("part1", "part2", "part3")
|
||||
key3 = cache.make_key("different", "parts")
|
||||
|
||||
assert key1 == key2
|
||||
assert key1 != key3
|
||||
cache.close()
|
||||
|
||||
|
||||
def test_cache_clear(tmp_path: Path) -> None:
|
||||
"""Test cache clear."""
|
||||
cache = Cache(tmp_path / "cache")
|
||||
|
||||
# Add some values
|
||||
cache.set("key1", "value1")
|
||||
cache.set("key2", "value2")
|
||||
|
||||
# Verify they exist
|
||||
assert cache.get("key1") == "value1"
|
||||
assert cache.get("key2") == "value2"
|
||||
|
||||
# Clear cache
|
||||
cache.clear()
|
||||
|
||||
# Verify values are gone
|
||||
assert cache.get("key1") is None
|
||||
assert cache.get("key2") is None
|
||||
|
||||
cache.close()
|
||||
584
tests/test_cli.py
Normal file
584
tests/test_cli.py
Normal file
@@ -0,0 +1,584 @@
|
||||
"""Integration tests for CLI commands."""
|
||||
|
||||
from pathlib import Path
|
||||
from unittest.mock import Mock, patch
|
||||
|
||||
from typer.testing import CliRunner
|
||||
|
||||
from ghaw_auditor.cli import app
|
||||
|
||||
runner = CliRunner()
|
||||
|
||||
|
||||
def test_scan_command_basic(tmp_path: Path) -> None:
|
||||
"""Test basic scan command."""
|
||||
output_dir = tmp_path / "output"
|
||||
|
||||
with patch("ghaw_auditor.cli.Scanner") as mock_scanner:
|
||||
mock_scanner.return_value.find_workflows.return_value = []
|
||||
mock_scanner.return_value.find_actions.return_value = []
|
||||
|
||||
result = runner.invoke(app, ["scan", "--repo", str(tmp_path), "--output", str(output_dir), "--offline"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Scanning repository" in result.stdout
|
||||
|
||||
|
||||
def test_scan_command_with_token(tmp_path: Path) -> None:
|
||||
"""Test scan with GitHub token."""
|
||||
with patch("ghaw_auditor.cli.Scanner") as mock_scanner:
|
||||
mock_scanner.return_value.find_workflows.return_value = []
|
||||
mock_scanner.return_value.find_actions.return_value = []
|
||||
|
||||
result = runner.invoke(
|
||||
app,
|
||||
["scan", "--repo", str(tmp_path), "--token", "test_token", "--offline"],
|
||||
)
|
||||
|
||||
assert result.exit_code == 0
|
||||
|
||||
|
||||
def test_inventory_command(tmp_path: Path) -> None:
|
||||
"""Test inventory command."""
|
||||
with patch("ghaw_auditor.cli.Scanner") as mock_scanner:
|
||||
mock_scanner.return_value.find_workflows.return_value = []
|
||||
|
||||
result = runner.invoke(app, ["inventory", "--repo", str(tmp_path)])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Unique Actions" in result.stdout
|
||||
|
||||
|
||||
def test_validate_command(tmp_path: Path) -> None:
|
||||
"""Test validate command."""
|
||||
with patch("ghaw_auditor.cli.Scanner") as mock_scanner:
|
||||
mock_scanner.return_value.find_workflows.return_value = []
|
||||
|
||||
result = runner.invoke(app, ["validate", "--repo", str(tmp_path)])
|
||||
|
||||
assert result.exit_code == 0
|
||||
|
||||
|
||||
def test_version_command() -> None:
|
||||
"""Test version command."""
|
||||
result = runner.invoke(app, ["version"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "ghaw-auditor version" in result.stdout
|
||||
|
||||
|
||||
def test_scan_command_verbose(tmp_path: Path) -> None:
|
||||
"""Test scan with verbose flag."""
|
||||
with patch("ghaw_auditor.cli.Scanner") as mock_scanner:
|
||||
mock_scanner.return_value.find_workflows.return_value = []
|
||||
mock_scanner.return_value.find_actions.return_value = []
|
||||
|
||||
result = runner.invoke(app, ["scan", "--repo", str(tmp_path), "--verbose", "--offline"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
|
||||
|
||||
def test_scan_command_quiet(tmp_path: Path) -> None:
|
||||
"""Test scan with quiet flag."""
|
||||
with patch("ghaw_auditor.cli.Scanner") as mock_scanner:
|
||||
mock_scanner.return_value.find_workflows.return_value = []
|
||||
mock_scanner.return_value.find_actions.return_value = []
|
||||
|
||||
result = runner.invoke(app, ["scan", "--repo", str(tmp_path), "--quiet", "--offline"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
|
||||
|
||||
def test_scan_command_nonexistent_repo() -> None:
|
||||
"""Test scan with nonexistent repository."""
|
||||
result = runner.invoke(app, ["scan", "--repo", "/nonexistent/path"])
|
||||
|
||||
assert result.exit_code in (1, 2) # Either repo not found or other error
|
||||
assert "Repository not found" in result.stdout or result.exit_code == 2
|
||||
|
||||
|
||||
def test_scan_command_with_log_json(tmp_path: Path) -> None:
|
||||
"""Test scan with JSON logging."""
|
||||
with patch("ghaw_auditor.cli.Scanner") as mock_scanner:
|
||||
mock_scanner.return_value.find_workflows.return_value = []
|
||||
mock_scanner.return_value.find_actions.return_value = []
|
||||
|
||||
result = runner.invoke(app, ["scan", "--repo", str(tmp_path), "--log-json", "--offline"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
|
||||
|
||||
def test_scan_command_with_policy_file(tmp_path: Path) -> None:
|
||||
"""Test scan with policy file."""
|
||||
policy_file = tmp_path / "policy.yml"
|
||||
policy_file.write_text("require_pinned_actions: true")
|
||||
|
||||
with patch("ghaw_auditor.cli.Scanner") as mock_scanner:
|
||||
mock_scanner.return_value.find_workflows.return_value = []
|
||||
mock_scanner.return_value.find_actions.return_value = []
|
||||
|
||||
result = runner.invoke(
|
||||
app,
|
||||
[
|
||||
"scan",
|
||||
"--repo",
|
||||
str(tmp_path),
|
||||
"--policy-file",
|
||||
str(policy_file),
|
||||
"--offline",
|
||||
],
|
||||
)
|
||||
|
||||
assert result.exit_code == 0
|
||||
|
||||
|
||||
def test_scan_command_with_violations(tmp_path: Path) -> None:
|
||||
"""Test scan with policy violations."""
|
||||
# Create workflow with unpinned action
|
||||
workflows_dir = tmp_path / ".github" / "workflows"
|
||||
workflows_dir.mkdir(parents=True)
|
||||
(workflows_dir / "ci.yml").write_text(
|
||||
"""
|
||||
name: CI
|
||||
on: push
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@main
|
||||
"""
|
||||
)
|
||||
|
||||
policy_file = tmp_path / "policy.yml"
|
||||
policy_file.write_text("require_pinned_actions: true")
|
||||
|
||||
result = runner.invoke(
|
||||
app,
|
||||
[
|
||||
"scan",
|
||||
"--repo",
|
||||
str(tmp_path),
|
||||
"--policy-file",
|
||||
str(policy_file),
|
||||
"--offline",
|
||||
],
|
||||
)
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "policy violations" in result.stdout
|
||||
|
||||
|
||||
def test_scan_command_with_enforcement(tmp_path: Path) -> None:
|
||||
"""Test scan with policy enforcement."""
|
||||
# Create workflow with unpinned action
|
||||
workflows_dir = tmp_path / ".github" / "workflows"
|
||||
workflows_dir.mkdir(parents=True)
|
||||
(workflows_dir / "ci.yml").write_text(
|
||||
"""
|
||||
name: CI
|
||||
on: push
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@main
|
||||
"""
|
||||
)
|
||||
|
||||
policy_file = tmp_path / "policy.yml"
|
||||
policy_file.write_text("require_pinned_actions: true")
|
||||
|
||||
result = runner.invoke(
|
||||
app,
|
||||
[
|
||||
"scan",
|
||||
"--repo",
|
||||
str(tmp_path),
|
||||
"--policy-file",
|
||||
str(policy_file),
|
||||
"--enforce",
|
||||
"--offline",
|
||||
],
|
||||
)
|
||||
|
||||
# Should exit with error due to violations
|
||||
assert result.exit_code in (1, 2) # Exit code 1 from policy, or 2 from exception handling
|
||||
# Check that enforcement was triggered
|
||||
assert "policy violations" in result.stdout or "Policy enforcement failed" in result.stdout
|
||||
|
||||
|
||||
def test_scan_command_with_diff_mode(tmp_path: Path) -> None:
|
||||
"""Test scan in diff mode."""
|
||||
# Create baseline
|
||||
baseline_dir = tmp_path / "baseline"
|
||||
baseline_dir.mkdir()
|
||||
|
||||
from ghaw_auditor.differ import Differ
|
||||
from ghaw_auditor.models import WorkflowMeta
|
||||
|
||||
differ = Differ(baseline_dir)
|
||||
workflow = WorkflowMeta(name="Test", path="test.yml", triggers=["push"], jobs={})
|
||||
differ.save_baseline({"test.yml": workflow}, {})
|
||||
|
||||
# Create workflow
|
||||
workflows_dir = tmp_path / ".github" / "workflows"
|
||||
workflows_dir.mkdir(parents=True)
|
||||
(workflows_dir / "test.yml").write_text("name: Test\non: push\njobs: {}")
|
||||
|
||||
output_dir = tmp_path / "output"
|
||||
|
||||
result = runner.invoke(
|
||||
app,
|
||||
[
|
||||
"scan",
|
||||
"--repo",
|
||||
str(tmp_path),
|
||||
"--diff",
|
||||
"--baseline",
|
||||
str(baseline_dir),
|
||||
"--output",
|
||||
str(output_dir),
|
||||
"--offline",
|
||||
],
|
||||
)
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Running diff" in result.stdout
|
||||
|
||||
|
||||
def test_scan_command_with_write_baseline(tmp_path: Path) -> None:
|
||||
"""Test scan with baseline writing."""
|
||||
workflows_dir = tmp_path / ".github" / "workflows"
|
||||
workflows_dir.mkdir(parents=True)
|
||||
(workflows_dir / "ci.yml").write_text("name: CI\non: push\njobs:\n test:\n runs-on: ubuntu-latest")
|
||||
|
||||
baseline_dir = tmp_path / "baseline"
|
||||
|
||||
result = runner.invoke(
|
||||
app,
|
||||
[
|
||||
"scan",
|
||||
"--repo",
|
||||
str(tmp_path),
|
||||
"--write-baseline",
|
||||
"--baseline",
|
||||
str(baseline_dir),
|
||||
"--offline",
|
||||
],
|
||||
)
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Baseline saved" in result.stdout
|
||||
assert baseline_dir.exists()
|
||||
|
||||
|
||||
def test_scan_command_with_format_json(tmp_path: Path) -> None:
|
||||
"""Test scan with JSON format only."""
|
||||
with patch("ghaw_auditor.cli.Scanner") as mock_scanner:
|
||||
mock_scanner.return_value.find_workflows.return_value = []
|
||||
mock_scanner.return_value.find_actions.return_value = []
|
||||
|
||||
result = runner.invoke(
|
||||
app,
|
||||
["scan", "--repo", str(tmp_path), "--format-type", "json", "--offline"],
|
||||
)
|
||||
|
||||
assert result.exit_code == 0
|
||||
|
||||
|
||||
def test_scan_command_with_format_md(tmp_path: Path) -> None:
|
||||
"""Test scan with Markdown format only."""
|
||||
with patch("ghaw_auditor.cli.Scanner") as mock_scanner:
|
||||
mock_scanner.return_value.find_workflows.return_value = []
|
||||
mock_scanner.return_value.find_actions.return_value = []
|
||||
|
||||
result = runner.invoke(
|
||||
app,
|
||||
["scan", "--repo", str(tmp_path), "--format-type", "md", "--offline"],
|
||||
)
|
||||
|
||||
assert result.exit_code == 0
|
||||
|
||||
|
||||
def test_inventory_command_with_error(tmp_path: Path) -> None:
|
||||
"""Test inventory command with parse error."""
|
||||
# Create invalid workflow
|
||||
workflows_dir = tmp_path / ".github" / "workflows"
|
||||
workflows_dir.mkdir(parents=True)
|
||||
(workflows_dir / "invalid.yml").write_text("invalid: yaml: {{{")
|
||||
|
||||
result = runner.invoke(app, ["inventory", "--repo", str(tmp_path)])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Unique Actions" in result.stdout
|
||||
|
||||
|
||||
def test_inventory_command_verbose_with_error(tmp_path: Path) -> None:
|
||||
"""Test inventory command verbose mode with error."""
|
||||
# Create invalid workflow
|
||||
workflows_dir = tmp_path / ".github" / "workflows"
|
||||
workflows_dir.mkdir(parents=True)
|
||||
(workflows_dir / "invalid.yml").write_text("invalid: yaml: {{{")
|
||||
|
||||
result = runner.invoke(app, ["inventory", "--repo", str(tmp_path), "--verbose"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
|
||||
|
||||
def test_validate_command_with_violations(tmp_path: Path) -> None:
|
||||
"""Test validate command with violations."""
|
||||
# Create workflow with unpinned action
|
||||
workflows_dir = tmp_path / ".github" / "workflows"
|
||||
workflows_dir.mkdir(parents=True)
|
||||
(workflows_dir / "ci.yml").write_text(
|
||||
"""
|
||||
name: CI
|
||||
on: push
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@main
|
||||
"""
|
||||
)
|
||||
|
||||
result = runner.invoke(app, ["validate", "--repo", str(tmp_path)])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "policy violations" in result.stdout
|
||||
|
||||
|
||||
def test_validate_command_with_enforcement(tmp_path: Path) -> None:
|
||||
"""Test validate command with enforcement."""
|
||||
# Create workflow with unpinned action
|
||||
workflows_dir = tmp_path / ".github" / "workflows"
|
||||
workflows_dir.mkdir(parents=True)
|
||||
(workflows_dir / "ci.yml").write_text(
|
||||
"""
|
||||
name: CI
|
||||
on: push
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@main
|
||||
"""
|
||||
)
|
||||
|
||||
result = runner.invoke(app, ["validate", "--repo", str(tmp_path), "--enforce"])
|
||||
|
||||
# Should exit with error
|
||||
assert result.exit_code == 1
|
||||
|
||||
|
||||
def test_validate_command_no_violations(tmp_path: Path) -> None:
|
||||
"""Test validate command with no violations."""
|
||||
# Create workflow with pinned action (valid 40-char SHA)
|
||||
workflows_dir = tmp_path / ".github" / "workflows"
|
||||
workflows_dir.mkdir(parents=True)
|
||||
(workflows_dir / "ci.yml").write_text(
|
||||
"""
|
||||
name: CI
|
||||
on: push
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@a81bbbf8298c0fa03ea29cdc473d45769f953675
|
||||
"""
|
||||
)
|
||||
|
||||
result = runner.invoke(app, ["validate", "--repo", str(tmp_path)])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "No policy violations found" in result.stdout
|
||||
|
||||
|
||||
def test_validate_command_with_error(tmp_path: Path) -> None:
|
||||
"""Test validate command with parse error."""
|
||||
# Create invalid workflow
|
||||
workflows_dir = tmp_path / ".github" / "workflows"
|
||||
workflows_dir.mkdir(parents=True)
|
||||
(workflows_dir / "invalid.yml").write_text("invalid: yaml: {{{")
|
||||
|
||||
result = runner.invoke(app, ["validate", "--repo", str(tmp_path)])
|
||||
|
||||
assert result.exit_code == 0
|
||||
|
||||
|
||||
def test_validate_command_verbose_with_error(tmp_path: Path) -> None:
|
||||
"""Test validate command verbose mode with error."""
|
||||
# Create invalid workflow
|
||||
workflows_dir = tmp_path / ".github" / "workflows"
|
||||
workflows_dir.mkdir(parents=True)
|
||||
(workflows_dir / "invalid.yml").write_text("invalid: yaml: {{{")
|
||||
|
||||
result = runner.invoke(app, ["validate", "--repo", str(tmp_path), "--verbose"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
|
||||
|
||||
def test_scan_command_diff_baseline_not_found(tmp_path: Path) -> None:
|
||||
"""Test scan with diff mode when baseline doesn't exist."""
|
||||
# Create workflow
|
||||
workflows_dir = tmp_path / ".github" / "workflows"
|
||||
workflows_dir.mkdir(parents=True)
|
||||
(workflows_dir / "ci.yml").write_text("name: CI\non: push\njobs:\n test:\n runs-on: ubuntu-latest")
|
||||
|
||||
# Non-existent baseline
|
||||
baseline_dir = tmp_path / "nonexistent_baseline"
|
||||
output_dir = tmp_path / "output"
|
||||
|
||||
result = runner.invoke(
|
||||
app,
|
||||
[
|
||||
"scan",
|
||||
"--repo",
|
||||
str(tmp_path),
|
||||
"--diff",
|
||||
"--baseline",
|
||||
str(baseline_dir),
|
||||
"--output",
|
||||
str(output_dir),
|
||||
"--offline",
|
||||
],
|
||||
)
|
||||
|
||||
# Should complete but log error about missing baseline
|
||||
assert result.exit_code == 0
|
||||
# Diff should be attempted but baseline not found is logged
|
||||
|
||||
|
||||
def test_scan_command_general_exception(tmp_path: Path) -> None:
|
||||
"""Test scan command with general exception."""
|
||||
# Mock the factory to raise an exception
|
||||
with patch("ghaw_auditor.cli.AuditServiceFactory") as mock_factory:
|
||||
mock_factory.create.side_effect = RuntimeError("Factory failed")
|
||||
|
||||
result = runner.invoke(
|
||||
app,
|
||||
["scan", "--repo", str(tmp_path), "--offline"],
|
||||
)
|
||||
|
||||
# Should exit with code 2 (exception)
|
||||
assert result.exit_code == 2
|
||||
|
||||
|
||||
def test_inventory_command_parse_error_verbose(tmp_path: Path) -> None:
|
||||
"""Test inventory command logs exceptions in verbose mode."""
|
||||
# Create workflow that will cause parse exception
|
||||
workflows_dir = tmp_path / ".github" / "workflows"
|
||||
workflows_dir.mkdir(parents=True)
|
||||
(workflows_dir / "bad.yml").write_text("!!invalid yaml!!")
|
||||
|
||||
result = runner.invoke(
|
||||
app,
|
||||
["inventory", "--repo", str(tmp_path), "--verbose"],
|
||||
)
|
||||
|
||||
# Should complete (exception is caught)
|
||||
assert result.exit_code == 0
|
||||
# Check for error message in output or logs
|
||||
|
||||
|
||||
def test_validate_command_parse_error_verbose(tmp_path: Path) -> None:
|
||||
"""Test validate command logs exceptions in verbose mode."""
|
||||
# Create workflow that will cause parse exception
|
||||
workflows_dir = tmp_path / ".github" / "workflows"
|
||||
workflows_dir.mkdir(parents=True)
|
||||
(workflows_dir / "bad.yml").write_text("!!invalid yaml!!")
|
||||
|
||||
result = runner.invoke(
|
||||
app,
|
||||
["validate", "--repo", str(tmp_path), "--verbose"],
|
||||
)
|
||||
|
||||
# Should complete (exception is caught)
|
||||
assert result.exit_code == 0
|
||||
|
||||
|
||||
def test_scan_command_with_resolver_exception(tmp_path: Path) -> None:
|
||||
"""Test scan with resolver that raises exception."""
|
||||
workflows_dir = tmp_path / ".github" / "workflows"
|
||||
workflows_dir.mkdir(parents=True)
|
||||
(workflows_dir / "ci.yml").write_text(
|
||||
"""
|
||||
name: CI
|
||||
on: push
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
"""
|
||||
)
|
||||
|
||||
# Mock resolver to raise exception
|
||||
with patch("ghaw_auditor.cli.AuditServiceFactory") as mock_factory:
|
||||
mock_service = Mock()
|
||||
mock_service.scan.side_effect = Exception("Resolver error")
|
||||
mock_factory.create.return_value = mock_service
|
||||
|
||||
result = runner.invoke(
|
||||
app,
|
||||
["scan", "--repo", str(tmp_path), "--offline"],
|
||||
)
|
||||
|
||||
# Should exit with code 2
|
||||
assert result.exit_code == 2
|
||||
|
||||
|
||||
def test_inventory_command_with_actions(tmp_path: Path) -> None:
|
||||
"""Test inventory command with workflow that has actions."""
|
||||
# Create workflow with actions
|
||||
workflows_dir = tmp_path / ".github" / "workflows"
|
||||
workflows_dir.mkdir(parents=True)
|
||||
(workflows_dir / "ci.yml").write_text(
|
||||
"""
|
||||
name: CI
|
||||
on: push
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-node@v4
|
||||
"""
|
||||
)
|
||||
|
||||
result = runner.invoke(app, ["inventory", "--repo", str(tmp_path)])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Unique Actions" in result.stdout
|
||||
# Should list the actions
|
||||
assert "actions/checkout" in result.stdout or "•" in result.stdout
|
||||
|
||||
|
||||
def test_validate_command_with_policy_file(tmp_path: Path) -> None:
|
||||
"""Test validate command with policy file."""
|
||||
# Create workflow
|
||||
workflows_dir = tmp_path / ".github" / "workflows"
|
||||
workflows_dir.mkdir(parents=True)
|
||||
(workflows_dir / "ci.yml").write_text(
|
||||
"""
|
||||
name: CI
|
||||
on: push
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
"""
|
||||
)
|
||||
|
||||
# Create policy file
|
||||
policy_file = tmp_path / "policy.yml"
|
||||
policy_file.write_text("require_pinned_actions: true")
|
||||
|
||||
result = runner.invoke(
|
||||
app,
|
||||
["validate", "--repo", str(tmp_path), "--policy-file", str(policy_file)],
|
||||
)
|
||||
|
||||
assert result.exit_code == 0
|
||||
# Policy file exists, so TODO block executes
|
||||
376
tests/test_differ.py
Normal file
376
tests/test_differ.py
Normal file
@@ -0,0 +1,376 @@
|
||||
"""Tests for differ module."""
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from ghaw_auditor.differ import Differ
|
||||
from ghaw_auditor.models import (
|
||||
ActionManifest,
|
||||
JobMeta,
|
||||
PermissionLevel,
|
||||
Permissions,
|
||||
WorkflowMeta,
|
||||
)
|
||||
|
||||
|
||||
def test_save_and_load_baseline(tmp_path: Path) -> None:
|
||||
"""Test saving and loading baseline."""
|
||||
baseline_path = tmp_path / "baseline"
|
||||
differ = Differ(baseline_path)
|
||||
|
||||
# Create sample data
|
||||
workflows = {
|
||||
"test.yml": WorkflowMeta(
|
||||
name="Test",
|
||||
path="test.yml",
|
||||
triggers=["push"],
|
||||
jobs={"test": JobMeta(name="test", runs_on="ubuntu-latest")},
|
||||
)
|
||||
}
|
||||
actions = {
|
||||
"actions/checkout@v4": ActionManifest(
|
||||
name="Checkout",
|
||||
description="Checkout code",
|
||||
)
|
||||
}
|
||||
|
||||
# Save baseline
|
||||
differ.save_baseline(workflows, actions, "abc123")
|
||||
assert (baseline_path / "workflows.json").exists()
|
||||
assert (baseline_path / "actions.json").exists()
|
||||
assert (baseline_path / "meta.json").exists()
|
||||
|
||||
# Load baseline
|
||||
baseline = differ.load_baseline()
|
||||
assert baseline.meta.commit_sha == "abc123"
|
||||
assert len(baseline.workflows) == 1
|
||||
assert len(baseline.actions) == 1
|
||||
|
||||
|
||||
def test_diff_workflows(tmp_path: Path) -> None:
|
||||
"""Test workflow diff."""
|
||||
baseline_path = tmp_path / "baseline"
|
||||
differ = Differ(baseline_path)
|
||||
|
||||
old_workflow = WorkflowMeta(
|
||||
name="Test",
|
||||
path="test.yml",
|
||||
triggers=["push"],
|
||||
jobs={},
|
||||
)
|
||||
|
||||
new_workflow = WorkflowMeta(
|
||||
name="Test",
|
||||
path="test.yml",
|
||||
triggers=["push", "pull_request"],
|
||||
jobs={},
|
||||
)
|
||||
|
||||
diffs = differ.diff_workflows({"test.yml": old_workflow}, {"test.yml": new_workflow})
|
||||
|
||||
assert len(diffs) == 1
|
||||
assert diffs[0].status == "modified"
|
||||
assert len(diffs[0].changes) > 0
|
||||
|
||||
|
||||
def test_diff_added_workflow(tmp_path: Path) -> None:
|
||||
"""Test added workflow detection."""
|
||||
baseline_path = tmp_path / "baseline"
|
||||
differ = Differ(baseline_path)
|
||||
|
||||
new_workflow = WorkflowMeta(
|
||||
name="New",
|
||||
path="new.yml",
|
||||
triggers=["push"],
|
||||
jobs={},
|
||||
)
|
||||
|
||||
diffs = differ.diff_workflows({}, {"new.yml": new_workflow})
|
||||
|
||||
assert len(diffs) == 1
|
||||
assert diffs[0].status == "added"
|
||||
assert diffs[0].path == "new.yml"
|
||||
|
||||
|
||||
def test_diff_removed_workflow(tmp_path: Path) -> None:
|
||||
"""Test removed workflow detection."""
|
||||
baseline_path = tmp_path / "baseline"
|
||||
differ = Differ(baseline_path)
|
||||
|
||||
old_workflow = WorkflowMeta(
|
||||
name="Old",
|
||||
path="old.yml",
|
||||
triggers=["push"],
|
||||
jobs={},
|
||||
)
|
||||
|
||||
diffs = differ.diff_workflows({"old.yml": old_workflow}, {})
|
||||
|
||||
assert len(diffs) == 1
|
||||
assert diffs[0].status == "removed"
|
||||
assert diffs[0].path == "old.yml"
|
||||
|
||||
|
||||
def test_load_baseline_not_found(tmp_path: Path) -> None:
|
||||
"""Test loading baseline when it doesn't exist."""
|
||||
baseline_path = tmp_path / "nonexistent"
|
||||
differ = Differ(baseline_path)
|
||||
|
||||
with pytest.raises(FileNotFoundError, match="Baseline not found"):
|
||||
differ.load_baseline()
|
||||
|
||||
|
||||
def test_load_baseline_without_meta(tmp_path: Path) -> None:
|
||||
"""Test loading baseline when meta.json doesn't exist."""
|
||||
baseline_path = tmp_path / "baseline"
|
||||
baseline_path.mkdir()
|
||||
|
||||
# Create only workflows.json and actions.json
|
||||
(baseline_path / "workflows.json").write_text("{}")
|
||||
(baseline_path / "actions.json").write_text("{}")
|
||||
|
||||
differ = Differ(baseline_path)
|
||||
baseline = differ.load_baseline()
|
||||
|
||||
# Should still load with default meta
|
||||
assert baseline.meta is not None
|
||||
assert baseline.workflows == {}
|
||||
assert baseline.actions == {}
|
||||
|
||||
|
||||
def test_diff_workflows_permissions_change(tmp_path: Path) -> None:
|
||||
"""Test workflow diff with permissions changes."""
|
||||
baseline_path = tmp_path / "baseline"
|
||||
differ = Differ(baseline_path)
|
||||
|
||||
old_workflow = WorkflowMeta(
|
||||
name="Test",
|
||||
path="test.yml",
|
||||
triggers=["push"],
|
||||
permissions=Permissions(contents=PermissionLevel.READ),
|
||||
jobs={},
|
||||
)
|
||||
|
||||
new_workflow = WorkflowMeta(
|
||||
name="Test",
|
||||
path="test.yml",
|
||||
triggers=["push"],
|
||||
permissions=Permissions(contents=PermissionLevel.WRITE),
|
||||
jobs={},
|
||||
)
|
||||
|
||||
diffs = differ.diff_workflows({"test.yml": old_workflow}, {"test.yml": new_workflow})
|
||||
|
||||
assert len(diffs) == 1
|
||||
assert diffs[0].status == "modified"
|
||||
assert any(c.field == "permissions" for c in diffs[0].changes)
|
||||
|
||||
|
||||
def test_diff_workflows_concurrency_change(tmp_path: Path) -> None:
|
||||
"""Test workflow diff with concurrency changes."""
|
||||
baseline_path = tmp_path / "baseline"
|
||||
differ = Differ(baseline_path)
|
||||
|
||||
old_workflow = WorkflowMeta(
|
||||
name="Test",
|
||||
path="test.yml",
|
||||
triggers=["push"],
|
||||
concurrency="group1",
|
||||
jobs={},
|
||||
)
|
||||
|
||||
new_workflow = WorkflowMeta(
|
||||
name="Test",
|
||||
path="test.yml",
|
||||
triggers=["push"],
|
||||
concurrency="group2",
|
||||
jobs={},
|
||||
)
|
||||
|
||||
diffs = differ.diff_workflows({"test.yml": old_workflow}, {"test.yml": new_workflow})
|
||||
|
||||
assert len(diffs) == 1
|
||||
assert diffs[0].status == "modified"
|
||||
assert any(c.field == "concurrency" for c in diffs[0].changes)
|
||||
|
||||
|
||||
def test_diff_workflows_jobs_change(tmp_path: Path) -> None:
|
||||
"""Test workflow diff with job changes."""
|
||||
baseline_path = tmp_path / "baseline"
|
||||
differ = Differ(baseline_path)
|
||||
|
||||
old_workflow = WorkflowMeta(
|
||||
name="Test",
|
||||
path="test.yml",
|
||||
triggers=["push"],
|
||||
jobs={"build": JobMeta(name="build", runs_on="ubuntu-latest")},
|
||||
)
|
||||
|
||||
new_workflow = WorkflowMeta(
|
||||
name="Test",
|
||||
path="test.yml",
|
||||
triggers=["push"],
|
||||
jobs={
|
||||
"build": JobMeta(name="build", runs_on="ubuntu-latest"),
|
||||
"test": JobMeta(name="test", runs_on="ubuntu-latest"),
|
||||
},
|
||||
)
|
||||
|
||||
diffs = differ.diff_workflows({"test.yml": old_workflow}, {"test.yml": new_workflow})
|
||||
|
||||
assert len(diffs) == 1
|
||||
assert diffs[0].status == "modified"
|
||||
assert any(c.field == "jobs" for c in diffs[0].changes)
|
||||
|
||||
|
||||
def test_diff_workflows_secrets_change(tmp_path: Path) -> None:
|
||||
"""Test workflow diff with secrets changes."""
|
||||
baseline_path = tmp_path / "baseline"
|
||||
differ = Differ(baseline_path)
|
||||
|
||||
old_workflow = WorkflowMeta(
|
||||
name="Test",
|
||||
path="test.yml",
|
||||
triggers=["push"],
|
||||
jobs={},
|
||||
secrets_used={"API_KEY"},
|
||||
)
|
||||
|
||||
new_workflow = WorkflowMeta(
|
||||
name="Test",
|
||||
path="test.yml",
|
||||
triggers=["push"],
|
||||
jobs={},
|
||||
secrets_used={"API_KEY", "DATABASE_URL"},
|
||||
)
|
||||
|
||||
diffs = differ.diff_workflows({"test.yml": old_workflow}, {"test.yml": new_workflow})
|
||||
|
||||
assert len(diffs) == 1
|
||||
assert diffs[0].status == "modified"
|
||||
assert any(c.field == "secrets_used" for c in diffs[0].changes)
|
||||
|
||||
|
||||
def test_diff_workflows_unchanged(tmp_path: Path) -> None:
|
||||
"""Test workflow diff when unchanged."""
|
||||
baseline_path = tmp_path / "baseline"
|
||||
differ = Differ(baseline_path)
|
||||
|
||||
workflow = WorkflowMeta(
|
||||
name="Test",
|
||||
path="test.yml",
|
||||
triggers=["push"],
|
||||
jobs={},
|
||||
)
|
||||
|
||||
diffs = differ.diff_workflows({"test.yml": workflow}, {"test.yml": workflow})
|
||||
|
||||
assert len(diffs) == 1
|
||||
assert diffs[0].status == "unchanged"
|
||||
assert len(diffs[0].changes) == 0
|
||||
|
||||
|
||||
def test_diff_actions_added(tmp_path: Path) -> None:
|
||||
"""Test action diff with added action."""
|
||||
baseline_path = tmp_path / "baseline"
|
||||
differ = Differ(baseline_path)
|
||||
|
||||
new_action = ActionManifest(name="New Action", description="Test")
|
||||
|
||||
diffs = differ.diff_actions({}, {"actions/new@v1": new_action})
|
||||
|
||||
assert len(diffs) == 1
|
||||
assert diffs[0].status == "added"
|
||||
assert diffs[0].key == "actions/new@v1"
|
||||
|
||||
|
||||
def test_diff_actions_removed(tmp_path: Path) -> None:
|
||||
"""Test action diff with removed action."""
|
||||
baseline_path = tmp_path / "baseline"
|
||||
differ = Differ(baseline_path)
|
||||
|
||||
old_action = ActionManifest(name="Old Action", description="Test")
|
||||
|
||||
diffs = differ.diff_actions({"actions/old@v1": old_action}, {})
|
||||
|
||||
assert len(diffs) == 1
|
||||
assert diffs[0].status == "removed"
|
||||
assert diffs[0].key == "actions/old@v1"
|
||||
|
||||
|
||||
def test_diff_actions_unchanged(tmp_path: Path) -> None:
|
||||
"""Test action diff when unchanged."""
|
||||
baseline_path = tmp_path / "baseline"
|
||||
differ = Differ(baseline_path)
|
||||
|
||||
action = ActionManifest(name="Test Action", description="Test")
|
||||
|
||||
diffs = differ.diff_actions({"actions/test@v1": action}, {"actions/test@v1": action})
|
||||
|
||||
assert len(diffs) == 1
|
||||
assert diffs[0].status == "unchanged"
|
||||
assert len(diffs[0].changes) == 0
|
||||
|
||||
|
||||
def test_render_diff_markdown(tmp_path: Path) -> None:
|
||||
"""Test rendering diff as Markdown."""
|
||||
from ghaw_auditor.models import ActionDiff, DiffEntry, WorkflowDiff
|
||||
|
||||
baseline_path = tmp_path / "baseline"
|
||||
differ = Differ(baseline_path)
|
||||
|
||||
workflow_diffs = [
|
||||
WorkflowDiff(path="added.yml", status="added", changes=[]),
|
||||
WorkflowDiff(path="removed.yml", status="removed", changes=[]),
|
||||
WorkflowDiff(
|
||||
path="modified.yml",
|
||||
status="modified",
|
||||
changes=[
|
||||
DiffEntry(
|
||||
field="triggers",
|
||||
old_value=["push"],
|
||||
new_value=["push", "pull_request"],
|
||||
change_type="modified",
|
||||
)
|
||||
],
|
||||
),
|
||||
]
|
||||
|
||||
action_diffs = [
|
||||
ActionDiff(key="actions/new@v1", status="added", changes=[]),
|
||||
ActionDiff(key="actions/old@v1", status="removed", changes=[]),
|
||||
]
|
||||
|
||||
output_path = tmp_path / "diff.md"
|
||||
differ.render_diff_markdown(workflow_diffs, action_diffs, output_path)
|
||||
|
||||
assert output_path.exists()
|
||||
content = output_path.read_text()
|
||||
|
||||
# Check content
|
||||
assert "# Audit Diff Report" in content
|
||||
assert "## Workflow Changes" in content
|
||||
assert "## Action Changes" in content
|
||||
assert "added.yml" in content
|
||||
assert "removed.yml" in content
|
||||
assert "modified.yml" in content
|
||||
assert "actions/new@v1" in content
|
||||
assert "actions/old@v1" in content
|
||||
assert "triggers" in content
|
||||
|
||||
|
||||
def test_render_diff_markdown_empty(tmp_path: Path) -> None:
|
||||
"""Test rendering empty diff."""
|
||||
baseline_path = tmp_path / "baseline"
|
||||
differ = Differ(baseline_path)
|
||||
|
||||
output_path = tmp_path / "diff.md"
|
||||
differ.render_diff_markdown([], [], output_path)
|
||||
|
||||
assert output_path.exists()
|
||||
content = output_path.read_text()
|
||||
|
||||
assert "# Audit Diff Report" in content
|
||||
assert "**Added:** 0" in content
|
||||
assert "**Removed:** 0" in content
|
||||
81
tests/test_factory.py
Normal file
81
tests/test_factory.py
Normal file
@@ -0,0 +1,81 @@
|
||||
"""Tests for factory module."""
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
from ghaw_auditor.factory import AuditServiceFactory
|
||||
from ghaw_auditor.models import Policy
|
||||
|
||||
|
||||
def test_factory_create_basic(tmp_path: Path) -> None:
|
||||
"""Test factory creates service with basic configuration."""
|
||||
service = AuditServiceFactory.create(
|
||||
repo_path=tmp_path,
|
||||
offline=True,
|
||||
)
|
||||
|
||||
assert service.scanner is not None
|
||||
assert service.parser is not None
|
||||
assert service.analyzer is not None
|
||||
assert service.resolver is None # Offline mode
|
||||
assert service.validator is None # No policy
|
||||
|
||||
|
||||
def test_factory_create_with_policy(tmp_path: Path) -> None:
|
||||
"""Test factory creates service with policy."""
|
||||
policy = Policy(require_pinned_actions=True)
|
||||
|
||||
service = AuditServiceFactory.create(
|
||||
repo_path=tmp_path,
|
||||
offline=True,
|
||||
policy=policy,
|
||||
)
|
||||
|
||||
assert service.validator is not None
|
||||
|
||||
|
||||
def test_factory_create_with_resolver(tmp_path: Path) -> None:
|
||||
"""Test factory creates service with resolver."""
|
||||
service = AuditServiceFactory.create(
|
||||
repo_path=tmp_path,
|
||||
offline=False,
|
||||
token="test_token",
|
||||
)
|
||||
|
||||
assert service.resolver is not None
|
||||
|
||||
|
||||
def test_factory_create_with_exclude_patterns(tmp_path: Path) -> None:
|
||||
"""Test factory creates service with exclusion patterns."""
|
||||
service = AuditServiceFactory.create(
|
||||
repo_path=tmp_path,
|
||||
offline=True,
|
||||
exclude_patterns=["**/node_modules/**", "**/dist/**"],
|
||||
)
|
||||
|
||||
assert len(service.scanner.exclude_patterns) == 2
|
||||
|
||||
|
||||
def test_factory_create_with_cache_dir(tmp_path: Path) -> None:
|
||||
"""Test factory creates service with custom cache directory."""
|
||||
cache_dir = tmp_path / "custom_cache"
|
||||
|
||||
service = AuditServiceFactory.create(
|
||||
repo_path=tmp_path,
|
||||
offline=True,
|
||||
cache_dir=cache_dir,
|
||||
)
|
||||
|
||||
# Service created successfully
|
||||
assert service is not None
|
||||
|
||||
|
||||
def test_factory_create_with_concurrency(tmp_path: Path) -> None:
|
||||
"""Test factory creates service with custom concurrency."""
|
||||
service = AuditServiceFactory.create(
|
||||
repo_path=tmp_path,
|
||||
offline=False,
|
||||
concurrency=8,
|
||||
)
|
||||
|
||||
assert service.resolver is not None
|
||||
assert service.resolver.concurrency == 8
|
||||
399
tests/test_github_client.py
Normal file
399
tests/test_github_client.py
Normal file
@@ -0,0 +1,399 @@
|
||||
"""Tests for GitHub client module."""
|
||||
|
||||
from unittest.mock import Mock, patch
|
||||
|
||||
import httpx
|
||||
import pytest
|
||||
|
||||
from ghaw_auditor.github_client import GitHubClient, should_retry_http_error
|
||||
|
||||
|
||||
def test_github_client_initialization_no_token() -> None:
|
||||
"""Test GitHub client initialization without token."""
|
||||
client = GitHubClient()
|
||||
|
||||
assert client.base_url == "https://api.github.com"
|
||||
assert "Accept" in client.headers
|
||||
assert "Authorization" not in client.headers
|
||||
assert client.client is not None
|
||||
|
||||
client.close()
|
||||
|
||||
|
||||
def test_github_client_initialization_with_token() -> None:
|
||||
"""Test GitHub client initialization with token."""
|
||||
client = GitHubClient(token="ghp_test123")
|
||||
|
||||
assert "Authorization" in client.headers
|
||||
assert client.headers["Authorization"] == "Bearer ghp_test123"
|
||||
|
||||
client.close()
|
||||
|
||||
|
||||
def test_github_client_custom_base_url() -> None:
|
||||
"""Test GitHub client with custom base URL."""
|
||||
client = GitHubClient(base_url="https://github.enterprise.com/api/v3")
|
||||
|
||||
assert client.base_url == "https://github.enterprise.com/api/v3"
|
||||
|
||||
client.close()
|
||||
|
||||
|
||||
@patch("httpx.Client")
|
||||
def test_get_ref_sha_success(mock_client_class: Mock) -> None:
|
||||
"""Test successful ref SHA resolution."""
|
||||
# Setup mock
|
||||
mock_response = Mock()
|
||||
mock_response.json.return_value = {"sha": "abc123def456"}
|
||||
mock_response.raise_for_status = Mock()
|
||||
|
||||
mock_http_client = Mock()
|
||||
mock_http_client.get.return_value = mock_response
|
||||
mock_client_class.return_value = mock_http_client
|
||||
|
||||
# Test
|
||||
client = GitHubClient(token="test")
|
||||
sha = client.get_ref_sha("actions", "checkout", "v4")
|
||||
|
||||
assert sha == "abc123def456"
|
||||
mock_http_client.get.assert_called_once_with("https://api.github.com/repos/actions/checkout/commits/v4")
|
||||
|
||||
|
||||
@patch("httpx.Client")
|
||||
def test_get_ref_sha_http_error(mock_client_class: Mock) -> None:
|
||||
"""Test ref SHA resolution with HTTP error."""
|
||||
# Setup mock to raise HTTPStatusError
|
||||
mock_error_response = Mock()
|
||||
mock_error_response.status_code = 404
|
||||
|
||||
mock_response = Mock()
|
||||
mock_response.raise_for_status.side_effect = httpx.HTTPStatusError(
|
||||
"404 Not Found",
|
||||
request=Mock(),
|
||||
response=mock_error_response,
|
||||
)
|
||||
|
||||
mock_http_client = Mock()
|
||||
mock_http_client.get.return_value = mock_response
|
||||
mock_client_class.return_value = mock_http_client
|
||||
|
||||
# Test - 404 errors should not be retried, so expect HTTPStatusError
|
||||
client = GitHubClient(token="test")
|
||||
with pytest.raises(httpx.HTTPStatusError):
|
||||
client.get_ref_sha("actions", "nonexistent", "v1")
|
||||
|
||||
|
||||
@patch("httpx.Client")
|
||||
def test_get_file_content_success(mock_client_class: Mock) -> None:
|
||||
"""Test successful file content retrieval."""
|
||||
# Setup mock
|
||||
mock_response = Mock()
|
||||
mock_response.text = "name: Test Action\\nruns:\\n using: node20"
|
||||
mock_response.raise_for_status = Mock()
|
||||
|
||||
mock_http_client = Mock()
|
||||
mock_http_client.get.return_value = mock_response
|
||||
mock_client_class.return_value = mock_http_client
|
||||
|
||||
# Test
|
||||
client = GitHubClient()
|
||||
content = client.get_file_content("actions", "checkout", "action.yml", "abc123")
|
||||
|
||||
assert "Test Action" in content
|
||||
mock_http_client.get.assert_called_once_with("https://raw.githubusercontent.com/actions/checkout/abc123/action.yml")
|
||||
|
||||
|
||||
@patch("httpx.Client")
|
||||
def test_get_file_content_http_error(mock_client_class: Mock) -> None:
|
||||
"""Test file content retrieval with HTTP error."""
|
||||
# Setup mock to raise HTTPStatusError
|
||||
mock_error_response = Mock()
|
||||
mock_error_response.status_code = 404
|
||||
|
||||
mock_response = Mock()
|
||||
mock_response.raise_for_status.side_effect = httpx.HTTPStatusError(
|
||||
"404 Not Found",
|
||||
request=Mock(),
|
||||
response=mock_error_response,
|
||||
)
|
||||
|
||||
mock_http_client = Mock()
|
||||
mock_http_client.get.return_value = mock_response
|
||||
mock_client_class.return_value = mock_http_client
|
||||
|
||||
# Test - 404 errors should not be retried, so expect HTTPStatusError
|
||||
client = GitHubClient()
|
||||
with pytest.raises(httpx.HTTPStatusError):
|
||||
client.get_file_content("actions", "checkout", "missing.yml", "abc123")
|
||||
|
||||
|
||||
@patch("httpx.Client")
|
||||
def test_github_client_context_manager(mock_client_class: Mock) -> None:
|
||||
"""Test GitHub client as context manager."""
|
||||
mock_http_client = Mock()
|
||||
mock_client_class.return_value = mock_http_client
|
||||
|
||||
# Test context manager
|
||||
with GitHubClient(token="test") as client:
|
||||
assert client is not None
|
||||
assert isinstance(client, GitHubClient)
|
||||
|
||||
# Should have called close
|
||||
mock_http_client.close.assert_called_once()
|
||||
|
||||
|
||||
@patch("httpx.Client")
|
||||
def test_github_client_close(mock_client_class: Mock) -> None:
|
||||
"""Test GitHub client close method."""
|
||||
mock_http_client = Mock()
|
||||
mock_client_class.return_value = mock_http_client
|
||||
|
||||
client = GitHubClient()
|
||||
client.close()
|
||||
|
||||
mock_http_client.close.assert_called_once()
|
||||
|
||||
|
||||
@patch("httpx.Client")
|
||||
def test_github_client_logs_successful_ref_sha(mock_client_class: Mock, caplog: pytest.LogCaptureFixture) -> None:
|
||||
"""Test that successful ref SHA requests are logged at DEBUG level."""
|
||||
import logging
|
||||
|
||||
mock_http_client = Mock()
|
||||
mock_response = Mock()
|
||||
mock_response.json.return_value = {"sha": "abc123def"}
|
||||
mock_http_client.get.return_value = mock_response
|
||||
mock_client_class.return_value = mock_http_client
|
||||
|
||||
with caplog.at_level(logging.DEBUG):
|
||||
client = GitHubClient(token="test")
|
||||
sha = client.get_ref_sha("actions", "checkout", "v4")
|
||||
|
||||
assert sha == "abc123def"
|
||||
assert "Fetching ref SHA: actions/checkout@v4" in caplog.text
|
||||
assert "Resolved actions/checkout@v4 -> abc123def" in caplog.text
|
||||
|
||||
|
||||
@patch("httpx.Client")
|
||||
def test_github_client_logs_4xx_error(mock_client_class: Mock, caplog: pytest.LogCaptureFixture) -> None:
|
||||
"""Test that 404 errors are logged with user-friendly messages at ERROR level."""
|
||||
import logging
|
||||
|
||||
mock_http_client = Mock()
|
||||
mock_response = Mock()
|
||||
mock_response.status_code = 404
|
||||
mock_response.raise_for_status.side_effect = httpx.HTTPStatusError(
|
||||
"Not found", request=Mock(), response=mock_response
|
||||
)
|
||||
mock_http_client.get.return_value = mock_response
|
||||
mock_client_class.return_value = mock_http_client
|
||||
|
||||
with caplog.at_level(logging.ERROR):
|
||||
client = GitHubClient()
|
||||
with pytest.raises(httpx.HTTPStatusError):
|
||||
client.get_ref_sha("actions", "nonexistent", "v1")
|
||||
|
||||
# Check for user-friendly error message
|
||||
assert "Action not found" in caplog.text
|
||||
assert "actions/nonexistent@v1" in caplog.text
|
||||
|
||||
|
||||
@patch("httpx.Client")
|
||||
def test_github_client_logs_successful_file_content(mock_client_class: Mock, caplog: pytest.LogCaptureFixture) -> None:
|
||||
"""Test that successful file content requests are logged at DEBUG level."""
|
||||
import logging
|
||||
|
||||
mock_http_client = Mock()
|
||||
mock_response = Mock()
|
||||
mock_response.text = "name: Checkout\ndescription: Test"
|
||||
mock_http_client.get.return_value = mock_response
|
||||
mock_client_class.return_value = mock_http_client
|
||||
|
||||
with caplog.at_level(logging.DEBUG):
|
||||
client = GitHubClient(token="test")
|
||||
content = client.get_file_content("actions", "checkout", "action.yml", "v4")
|
||||
|
||||
assert content == "name: Checkout\ndescription: Test"
|
||||
assert "Fetching file: actions/checkout/action.yml@v4" in caplog.text
|
||||
assert "Downloaded action.yml" in caplog.text
|
||||
assert "bytes" in caplog.text
|
||||
|
||||
|
||||
@patch("httpx.Client")
|
||||
def test_github_client_retries_5xx_errors(mock_client_class: Mock) -> None:
|
||||
"""Test that 5xx errors are retried."""
|
||||
from tenacity import RetryError
|
||||
|
||||
mock_http_client = Mock()
|
||||
mock_response = Mock()
|
||||
mock_response.status_code = 500
|
||||
mock_response.raise_for_status.side_effect = httpx.HTTPStatusError(
|
||||
"Server error", request=Mock(), response=mock_response
|
||||
)
|
||||
mock_http_client.get.return_value = mock_response
|
||||
mock_client_class.return_value = mock_http_client
|
||||
|
||||
client = GitHubClient()
|
||||
with pytest.raises(RetryError):
|
||||
client.get_ref_sha("actions", "checkout", "v1")
|
||||
|
||||
# Should have retried 3 times
|
||||
assert mock_http_client.get.call_count == 3
|
||||
|
||||
|
||||
@patch("httpx.Client")
|
||||
def test_github_client_logs_5xx_warning(mock_client_class: Mock, caplog: pytest.LogCaptureFixture) -> None:
|
||||
"""Test that 5xx errors are logged at WARNING level."""
|
||||
import logging
|
||||
|
||||
from tenacity import RetryError
|
||||
|
||||
mock_http_client = Mock()
|
||||
mock_response = Mock()
|
||||
mock_response.status_code = 503
|
||||
mock_response.raise_for_status.side_effect = httpx.HTTPStatusError(
|
||||
"Service unavailable", request=Mock(), response=mock_response
|
||||
)
|
||||
mock_http_client.get.return_value = mock_response
|
||||
mock_client_class.return_value = mock_http_client
|
||||
|
||||
with caplog.at_level(logging.WARNING):
|
||||
client = GitHubClient()
|
||||
with pytest.raises(RetryError):
|
||||
client.get_file_content("actions", "checkout", "action.yml", "v4")
|
||||
|
||||
assert "HTTP 503" in caplog.text
|
||||
|
||||
|
||||
def test_should_retry_http_error_network_errors() -> None:
|
||||
"""Test that network errors should be retried."""
|
||||
error = httpx.RequestError("Connection failed")
|
||||
assert should_retry_http_error(error) is True
|
||||
|
||||
|
||||
def test_should_retry_http_error_404() -> None:
|
||||
"""Test that 404 errors should not be retried."""
|
||||
mock_response = Mock()
|
||||
mock_response.status_code = 404
|
||||
error = httpx.HTTPStatusError("Not found", request=Mock(), response=mock_response)
|
||||
assert should_retry_http_error(error) is False
|
||||
|
||||
|
||||
def test_should_retry_http_error_403() -> None:
|
||||
"""Test that 403 errors should not be retried."""
|
||||
mock_response = Mock()
|
||||
mock_response.status_code = 403
|
||||
error = httpx.HTTPStatusError("Forbidden", request=Mock(), response=mock_response)
|
||||
assert should_retry_http_error(error) is False
|
||||
|
||||
|
||||
def test_should_retry_http_error_429() -> None:
|
||||
"""Test that 429 rate limiting errors should be retried."""
|
||||
mock_response = Mock()
|
||||
mock_response.status_code = 429
|
||||
error = httpx.HTTPStatusError("Rate limited", request=Mock(), response=mock_response)
|
||||
assert should_retry_http_error(error) is True
|
||||
|
||||
|
||||
def test_should_retry_http_error_500() -> None:
|
||||
"""Test that 500 errors should be retried."""
|
||||
mock_response = Mock()
|
||||
mock_response.status_code = 500
|
||||
error = httpx.HTTPStatusError("Server error", request=Mock(), response=mock_response)
|
||||
assert should_retry_http_error(error) is True
|
||||
|
||||
|
||||
def test_should_retry_http_error_other() -> None:
|
||||
"""Test that non-HTTP errors should not be retried."""
|
||||
error = ValueError("Some other error")
|
||||
assert should_retry_http_error(error) is False
|
||||
|
||||
|
||||
@patch("httpx.Client")
|
||||
def test_github_client_logs_403_error(mock_client_class: Mock, caplog: pytest.LogCaptureFixture) -> None:
|
||||
"""Test that 403 errors are logged with user-friendly messages."""
|
||||
import logging
|
||||
|
||||
mock_http_client = Mock()
|
||||
mock_response = Mock()
|
||||
mock_response.status_code = 403
|
||||
mock_response.raise_for_status.side_effect = httpx.HTTPStatusError(
|
||||
"Forbidden", request=Mock(), response=mock_response
|
||||
)
|
||||
mock_http_client.get.return_value = mock_response
|
||||
mock_client_class.return_value = mock_http_client
|
||||
|
||||
with caplog.at_level(logging.ERROR):
|
||||
client = GitHubClient()
|
||||
with pytest.raises(httpx.HTTPStatusError):
|
||||
client.get_ref_sha("actions", "checkout", "v1")
|
||||
|
||||
assert "Access denied (check token permissions)" in caplog.text
|
||||
|
||||
|
||||
@patch("httpx.Client")
|
||||
def test_github_client_logs_401_error(mock_client_class: Mock, caplog: pytest.LogCaptureFixture) -> None:
|
||||
"""Test that 401 errors are logged with user-friendly messages."""
|
||||
import logging
|
||||
|
||||
mock_http_client = Mock()
|
||||
mock_response = Mock()
|
||||
mock_response.status_code = 401
|
||||
mock_response.raise_for_status.side_effect = httpx.HTTPStatusError(
|
||||
"Unauthorized", request=Mock(), response=mock_response
|
||||
)
|
||||
mock_http_client.get.return_value = mock_response
|
||||
mock_client_class.return_value = mock_http_client
|
||||
|
||||
with caplog.at_level(logging.ERROR):
|
||||
client = GitHubClient()
|
||||
with pytest.raises(httpx.HTTPStatusError):
|
||||
client.get_file_content("actions", "checkout", "action.yml", "abc123")
|
||||
|
||||
assert "Authentication required" in caplog.text
|
||||
|
||||
|
||||
@patch("httpx.Client")
|
||||
def test_github_client_logs_401_error_get_ref_sha(mock_client_class: Mock, caplog: pytest.LogCaptureFixture) -> None:
|
||||
"""Test that 401 errors are logged in get_ref_sha."""
|
||||
import logging
|
||||
|
||||
mock_http_client = Mock()
|
||||
mock_response = Mock()
|
||||
mock_response.status_code = 401
|
||||
mock_response.raise_for_status.side_effect = httpx.HTTPStatusError(
|
||||
"Unauthorized", request=Mock(), response=mock_response
|
||||
)
|
||||
mock_http_client.get.return_value = mock_response
|
||||
mock_client_class.return_value = mock_http_client
|
||||
|
||||
with caplog.at_level(logging.ERROR):
|
||||
client = GitHubClient()
|
||||
with pytest.raises(httpx.HTTPStatusError):
|
||||
client.get_ref_sha("actions", "checkout", "v1")
|
||||
|
||||
assert "Authentication required" in caplog.text
|
||||
|
||||
|
||||
@patch("httpx.Client")
|
||||
def test_github_client_logs_403_error_get_file_content(
|
||||
mock_client_class: Mock, caplog: pytest.LogCaptureFixture
|
||||
) -> None:
|
||||
"""Test that 403 errors are logged in get_file_content."""
|
||||
import logging
|
||||
|
||||
mock_http_client = Mock()
|
||||
mock_response = Mock()
|
||||
mock_response.status_code = 403
|
||||
mock_response.raise_for_status.side_effect = httpx.HTTPStatusError(
|
||||
"Forbidden", request=Mock(), response=mock_response
|
||||
)
|
||||
mock_http_client.get.return_value = mock_response
|
||||
mock_client_class.return_value = mock_http_client
|
||||
|
||||
with caplog.at_level(logging.ERROR):
|
||||
client = GitHubClient()
|
||||
with pytest.raises(httpx.HTTPStatusError):
|
||||
client.get_file_content("actions", "checkout", "action.yml", "abc123")
|
||||
|
||||
assert "Access denied (check token permissions)" in caplog.text
|
||||
168
tests/test_golden.py
Normal file
168
tests/test_golden.py
Normal file
@@ -0,0 +1,168 @@
|
||||
"""Golden file tests for reports."""
|
||||
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
from ghaw_auditor.models import (
|
||||
ActionInput,
|
||||
ActionManifest,
|
||||
JobMeta,
|
||||
WorkflowMeta,
|
||||
)
|
||||
from ghaw_auditor.renderer import Renderer
|
||||
|
||||
|
||||
def test_json_workflow_output(tmp_path: Path) -> None:
|
||||
"""Test workflow JSON matches golden file."""
|
||||
renderer = Renderer(tmp_path)
|
||||
|
||||
workflows = {
|
||||
"test.yml": WorkflowMeta(
|
||||
name="Test Workflow",
|
||||
path="test.yml",
|
||||
triggers=["push", "pull_request"],
|
||||
jobs={
|
||||
"test": JobMeta(
|
||||
name="test",
|
||||
runs_on="ubuntu-latest",
|
||||
secrets_used={"GITHUB_TOKEN"},
|
||||
)
|
||||
},
|
||||
secrets_used={"GITHUB_TOKEN"},
|
||||
)
|
||||
}
|
||||
|
||||
renderer.render_json(workflows, {}, [])
|
||||
|
||||
# Load generated and golden files
|
||||
with open(tmp_path / "workflows.json") as f:
|
||||
generated = json.load(f)
|
||||
|
||||
golden_path = Path(__file__).parent / "golden" / "workflows.json"
|
||||
with open(golden_path) as f:
|
||||
golden = json.load(f)
|
||||
|
||||
# Compare structure (ignoring list order differences)
|
||||
assert generated["test.yml"]["name"] == golden["test.yml"]["name"]
|
||||
assert set(generated["test.yml"]["triggers"]) == set(golden["test.yml"]["triggers"])
|
||||
assert generated["test.yml"]["jobs"]["test"]["runs_on"] == golden["test.yml"]["jobs"]["test"]["runs_on"]
|
||||
|
||||
|
||||
def test_json_action_output(tmp_path: Path) -> None:
|
||||
"""Test action JSON matches golden file."""
|
||||
renderer = Renderer(tmp_path)
|
||||
|
||||
actions = {
|
||||
"actions/checkout@abc123": ActionManifest(
|
||||
name="Checkout",
|
||||
description="Checkout a Git repository",
|
||||
author="GitHub",
|
||||
inputs={
|
||||
"repository": ActionInput(
|
||||
name="repository",
|
||||
description="Repository name with owner",
|
||||
required=False,
|
||||
),
|
||||
"ref": ActionInput(
|
||||
name="ref",
|
||||
description="The branch, tag or SHA to checkout",
|
||||
required=False,
|
||||
),
|
||||
},
|
||||
runs={"using": "node20", "main": "dist/index.js"},
|
||||
is_javascript=True,
|
||||
)
|
||||
}
|
||||
|
||||
renderer.render_json({}, actions, [])
|
||||
|
||||
with open(tmp_path / "actions.json") as f:
|
||||
generated = json.load(f)
|
||||
|
||||
golden_path = Path(__file__).parent / "golden" / "actions.json"
|
||||
with open(golden_path) as f:
|
||||
golden = json.load(f)
|
||||
|
||||
assert generated["actions/checkout@abc123"]["name"] == golden["actions/checkout@abc123"]["name"]
|
||||
assert generated["actions/checkout@abc123"]["is_javascript"] is True
|
||||
|
||||
|
||||
def test_markdown_report_structure(tmp_path: Path) -> None:
|
||||
"""Test markdown report structure."""
|
||||
renderer = Renderer(tmp_path)
|
||||
|
||||
workflows = {
|
||||
"test.yml": WorkflowMeta(
|
||||
name="Test Workflow",
|
||||
path="test.yml",
|
||||
triggers=["push", "pull_request"],
|
||||
jobs={
|
||||
"test": JobMeta(
|
||||
name="test",
|
||||
runs_on="ubuntu-latest",
|
||||
secrets_used={"GITHUB_TOKEN"},
|
||||
)
|
||||
},
|
||||
secrets_used={"GITHUB_TOKEN"},
|
||||
)
|
||||
}
|
||||
|
||||
actions = {
|
||||
"actions/checkout@abc123": ActionManifest(
|
||||
name="Checkout",
|
||||
description="Checkout a Git repository",
|
||||
inputs={
|
||||
"repository": ActionInput(
|
||||
name="repository",
|
||||
description="Repository name with owner",
|
||||
),
|
||||
"ref": ActionInput(
|
||||
name="ref",
|
||||
description="The branch, tag or SHA to checkout",
|
||||
),
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
analysis = {
|
||||
"total_jobs": 1,
|
||||
"reusable_workflows": 0,
|
||||
"triggers": {"push": 1, "pull_request": 1},
|
||||
"runners": {"ubuntu-latest": 1},
|
||||
"secrets": {"total_unique_secrets": 1, "secrets": ["GITHUB_TOKEN"]},
|
||||
}
|
||||
|
||||
renderer.render_markdown(workflows, actions, [], analysis)
|
||||
|
||||
with open(tmp_path / "report.md") as f:
|
||||
content = f.read()
|
||||
|
||||
# Check key sections exist
|
||||
assert "# GitHub Actions & Workflows Audit Report" in content
|
||||
assert "## Summary" in content
|
||||
assert "## Analysis" in content
|
||||
assert "## Workflows" in content
|
||||
assert "## Actions Inventory" in content
|
||||
|
||||
# Check specific content
|
||||
assert "Test Workflow" in content
|
||||
assert "Checkout" in content
|
||||
assert "GITHUB_TOKEN" in content
|
||||
assert "`ubuntu-latest`" in content
|
||||
|
||||
|
||||
def test_empty_report_generation(tmp_path: Path) -> None:
|
||||
"""Test report generation with empty data."""
|
||||
renderer = Renderer(tmp_path)
|
||||
|
||||
renderer.render_json({}, {}, [])
|
||||
renderer.render_markdown({}, {}, [], {})
|
||||
|
||||
# Files should exist even with empty data
|
||||
assert (tmp_path / "workflows.json").exists()
|
||||
assert (tmp_path / "actions.json").exists()
|
||||
assert (tmp_path / "violations.json").exists()
|
||||
assert (tmp_path / "report.md").exists()
|
||||
|
||||
with open(tmp_path / "workflows.json") as f:
|
||||
assert json.load(f) == {}
|
||||
105
tests/test_models.py
Normal file
105
tests/test_models.py
Normal file
@@ -0,0 +1,105 @@
|
||||
"""Tests for models."""
|
||||
|
||||
from datetime import datetime
|
||||
|
||||
from ghaw_auditor.models import (
|
||||
ActionInput,
|
||||
ActionManifest,
|
||||
ActionRef,
|
||||
ActionType,
|
||||
BaselineMeta,
|
||||
PermissionLevel,
|
||||
Permissions,
|
||||
)
|
||||
|
||||
|
||||
def test_action_ref_canonical_key_github() -> None:
|
||||
"""Test canonical key for GitHub action."""
|
||||
ref = ActionRef(
|
||||
type=ActionType.GITHUB,
|
||||
owner="actions",
|
||||
repo="checkout",
|
||||
ref="v4",
|
||||
resolved_sha="abc123",
|
||||
source_file="test.yml",
|
||||
)
|
||||
|
||||
key = ref.canonical_key()
|
||||
assert key == "actions/checkout@abc123"
|
||||
|
||||
|
||||
def test_action_ref_canonical_key_local() -> None:
|
||||
"""Test canonical key for local action."""
|
||||
ref = ActionRef(
|
||||
type=ActionType.LOCAL,
|
||||
path="./.github/actions/custom",
|
||||
source_file="test.yml",
|
||||
)
|
||||
|
||||
key = ref.canonical_key()
|
||||
assert key == "local:./.github/actions/custom"
|
||||
|
||||
|
||||
def test_action_ref_canonical_key_reusable_workflow() -> None:
|
||||
"""Test canonical key for reusable workflow."""
|
||||
ref = ActionRef(
|
||||
type=ActionType.REUSABLE_WORKFLOW,
|
||||
owner="owner",
|
||||
repo="repo",
|
||||
path=".github/workflows/reusable.yml",
|
||||
ref="v1",
|
||||
resolved_sha="abc123",
|
||||
source_file="test.yml",
|
||||
)
|
||||
|
||||
key = ref.canonical_key()
|
||||
assert key == "owner/repo/.github/workflows/reusable.yml@abc123"
|
||||
|
||||
|
||||
def test_action_ref_canonical_key_docker() -> None:
|
||||
"""Test canonical key for Docker action."""
|
||||
ref = ActionRef(
|
||||
type=ActionType.DOCKER,
|
||||
path="docker://alpine:3.8",
|
||||
source_file="test.yml",
|
||||
)
|
||||
|
||||
key = ref.canonical_key()
|
||||
assert key == "docker:docker://alpine:3.8"
|
||||
|
||||
|
||||
def test_permissions_model() -> None:
|
||||
"""Test permissions model."""
|
||||
perms = Permissions(
|
||||
contents=PermissionLevel.READ,
|
||||
pull_requests=PermissionLevel.WRITE,
|
||||
)
|
||||
|
||||
assert perms.contents == PermissionLevel.READ
|
||||
assert perms.pull_requests == PermissionLevel.WRITE
|
||||
|
||||
|
||||
def test_action_manifest() -> None:
|
||||
"""Test action manifest model."""
|
||||
manifest = ActionManifest(
|
||||
name="Test Action",
|
||||
description="A test action",
|
||||
inputs={"test-input": ActionInput(name="test-input", required=True)},
|
||||
)
|
||||
|
||||
assert manifest.name == "Test Action"
|
||||
assert "test-input" in manifest.inputs
|
||||
assert manifest.inputs["test-input"].required is True
|
||||
|
||||
|
||||
def test_baseline_meta() -> None:
|
||||
"""Test baseline metadata model."""
|
||||
meta = BaselineMeta(
|
||||
auditor_version="1.0.0",
|
||||
commit_sha="abc123",
|
||||
timestamp=datetime.now(),
|
||||
)
|
||||
|
||||
assert meta.auditor_version == "1.0.0"
|
||||
assert meta.commit_sha == "abc123"
|
||||
assert meta.schema_version == "1.0"
|
||||
672
tests/test_parser.py
Normal file
672
tests/test_parser.py
Normal file
@@ -0,0 +1,672 @@
|
||||
"""Tests for parser module."""
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from ghaw_auditor.models import ActionType, PermissionLevel
|
||||
from ghaw_auditor.parser import Parser
|
||||
|
||||
FIXTURES_DIR = Path(__file__).parent / "fixtures"
|
||||
|
||||
|
||||
def test_parser_initialization() -> None:
|
||||
"""Test parser can be initialized."""
|
||||
parser = Parser(Path.cwd())
|
||||
assert parser.yaml is not None
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Workflow Parsing Tests
|
||||
# ============================================================================
|
||||
|
||||
|
||||
def test_parse_basic_workflow() -> None:
|
||||
"""Test parsing a basic workflow."""
|
||||
parser = Parser(FIXTURES_DIR)
|
||||
workflow = parser.parse_workflow(FIXTURES_DIR / "basic-workflow.yml")
|
||||
|
||||
assert workflow.name == "Basic Workflow"
|
||||
assert workflow.path == "basic-workflow.yml"
|
||||
assert workflow.triggers == ["push"]
|
||||
assert "test" in workflow.jobs
|
||||
assert workflow.jobs["test"].runs_on == "ubuntu-latest"
|
||||
assert len(workflow.jobs["test"].actions_used) == 1
|
||||
assert workflow.jobs["test"].actions_used[0].owner == "actions"
|
||||
assert workflow.jobs["test"].actions_used[0].repo == "checkout"
|
||||
|
||||
|
||||
def test_parse_complex_workflow() -> None:
|
||||
"""Test parsing a complex workflow with all features."""
|
||||
parser = Parser(FIXTURES_DIR)
|
||||
workflow = parser.parse_workflow(FIXTURES_DIR / "complex-workflow.yml")
|
||||
|
||||
# Basic metadata
|
||||
assert workflow.name == "Complex Workflow"
|
||||
assert set(workflow.triggers) == {"push", "pull_request", "workflow_dispatch"}
|
||||
|
||||
# Permissions
|
||||
assert workflow.permissions is not None
|
||||
assert workflow.permissions.contents == PermissionLevel.READ
|
||||
assert workflow.permissions.issues == PermissionLevel.WRITE
|
||||
assert workflow.permissions.pull_requests == PermissionLevel.WRITE
|
||||
|
||||
# Environment variables
|
||||
assert workflow.env["NODE_ENV"] == "production"
|
||||
assert workflow.env["API_URL"] == "https://api.example.com"
|
||||
|
||||
# Concurrency
|
||||
assert workflow.concurrency is not None
|
||||
|
||||
# Defaults
|
||||
assert workflow.defaults["run"]["shell"] == "bash"
|
||||
|
||||
# Jobs
|
||||
assert "build" in workflow.jobs
|
||||
assert "test" in workflow.jobs
|
||||
|
||||
# Build job
|
||||
build = workflow.jobs["build"]
|
||||
assert build.timeout_minutes == 30
|
||||
assert build.permissions is not None
|
||||
assert build.environment == {"name": "production", "url": "https://example.com"}
|
||||
|
||||
# Test job
|
||||
test = workflow.jobs["test"]
|
||||
assert test.needs == ["build"]
|
||||
assert test.if_condition == "github.event_name == 'pull_request'"
|
||||
assert test.container is not None
|
||||
assert test.container.image == "node:20-alpine"
|
||||
assert "NODE_ENV" in test.container.env
|
||||
assert test.continue_on_error is True
|
||||
|
||||
# Services
|
||||
assert "postgres" in test.services
|
||||
assert test.services["postgres"].image == "postgres:15"
|
||||
|
||||
# Strategy
|
||||
assert test.strategy is not None
|
||||
assert test.strategy.fail_fast is False
|
||||
assert test.strategy.max_parallel == 2
|
||||
|
||||
# Secrets extraction
|
||||
assert "API_KEY" in workflow.secrets_used
|
||||
assert "GITHUB_TOKEN" in workflow.secrets_used
|
||||
assert "DATABASE_URL" in workflow.secrets_used
|
||||
|
||||
|
||||
def test_parse_reusable_workflow() -> None:
|
||||
"""Test parsing a reusable workflow."""
|
||||
parser = Parser(FIXTURES_DIR)
|
||||
workflow = parser.parse_workflow(FIXTURES_DIR / "reusable-workflow.yml")
|
||||
|
||||
assert workflow.is_reusable is True
|
||||
assert workflow.reusable_contract is not None
|
||||
|
||||
# Check inputs
|
||||
assert "environment" in workflow.reusable_contract.inputs
|
||||
assert workflow.reusable_contract.inputs["environment"]["required"] is True
|
||||
assert workflow.reusable_contract.inputs["debug"]["default"] is False
|
||||
|
||||
# Check outputs
|
||||
assert "deployment-id" in workflow.reusable_contract.outputs
|
||||
|
||||
# Check secrets
|
||||
assert "deploy-token" in workflow.reusable_contract.secrets
|
||||
assert workflow.reusable_contract.secrets["deploy-token"]["required"] is True
|
||||
|
||||
|
||||
def test_parse_workflow_with_empty_workflow_call() -> None:
|
||||
"""Test parsing workflow with empty workflow_call."""
|
||||
parser = Parser(FIXTURES_DIR)
|
||||
workflow = parser.parse_workflow(FIXTURES_DIR / "empty-workflow-call.yml")
|
||||
|
||||
assert workflow.is_reusable is True
|
||||
# Empty workflow_call should result in None contract
|
||||
assert workflow.reusable_contract is None or workflow.reusable_contract.inputs == {}
|
||||
|
||||
|
||||
def test_parse_empty_workflow() -> None:
|
||||
"""Test parsing an empty workflow file raises error."""
|
||||
parser = Parser(FIXTURES_DIR)
|
||||
|
||||
with pytest.raises(ValueError, match="Empty workflow file"):
|
||||
parser.parse_workflow(FIXTURES_DIR / "invalid-workflow.yml")
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Action Reference Parsing Tests
|
||||
# ============================================================================
|
||||
|
||||
|
||||
def test_parse_action_ref_github() -> None:
|
||||
"""Test parsing GitHub action reference."""
|
||||
parser = Parser(Path.cwd())
|
||||
ref = parser._parse_action_ref("actions/checkout@v4", Path("test.yml"))
|
||||
|
||||
assert ref.type == ActionType.GITHUB
|
||||
assert ref.owner == "actions"
|
||||
assert ref.repo == "checkout"
|
||||
assert ref.ref == "v4"
|
||||
|
||||
|
||||
def test_parse_action_ref_github_with_path() -> None:
|
||||
"""Test parsing GitHub action reference with path (monorepo)."""
|
||||
parser = Parser(Path.cwd())
|
||||
ref = parser._parse_action_ref("owner/repo/path/to/action@v1", Path("test.yml"))
|
||||
|
||||
assert ref.type == ActionType.GITHUB
|
||||
assert ref.owner == "owner"
|
||||
assert ref.repo == "repo"
|
||||
assert ref.path == "path/to/action"
|
||||
assert ref.ref == "v1"
|
||||
|
||||
|
||||
def test_parse_action_ref_local() -> None:
|
||||
"""Test parsing local action reference."""
|
||||
parser = Parser(Path.cwd())
|
||||
ref = parser._parse_action_ref("./.github/actions/custom", Path("test.yml"))
|
||||
|
||||
assert ref.type == ActionType.LOCAL
|
||||
assert ref.path == "./.github/actions/custom"
|
||||
|
||||
|
||||
def test_parse_action_ref_docker() -> None:
|
||||
"""Test parsing Docker action reference."""
|
||||
parser = Parser(Path.cwd())
|
||||
ref = parser._parse_action_ref("docker://alpine:3.8", Path("test.yml"))
|
||||
|
||||
assert ref.type == ActionType.DOCKER
|
||||
assert ref.path == "docker://alpine:3.8"
|
||||
|
||||
|
||||
def test_parse_action_ref_invalid() -> None:
|
||||
"""Test parsing invalid action reference raises error."""
|
||||
parser = Parser(Path.cwd())
|
||||
|
||||
with pytest.raises(ValueError, match="Invalid action reference"):
|
||||
parser._parse_action_ref("invalid-ref", Path("test.yml"))
|
||||
|
||||
|
||||
def test_extract_secrets() -> None:
|
||||
"""Test extracting secrets from content."""
|
||||
parser = Parser(Path.cwd())
|
||||
content = """
|
||||
env:
|
||||
TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
API_KEY: ${{ secrets.API_KEY }}
|
||||
"""
|
||||
secrets = parser._extract_secrets(content)
|
||||
|
||||
assert "GITHUB_TOKEN" in secrets
|
||||
assert "API_KEY" in secrets
|
||||
assert len(secrets) == 2
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Trigger Extraction Tests
|
||||
# ============================================================================
|
||||
|
||||
|
||||
def test_extract_triggers_string() -> None:
|
||||
"""Test extracting triggers from string."""
|
||||
parser = Parser(Path.cwd())
|
||||
triggers = parser._extract_triggers("push")
|
||||
|
||||
assert triggers == ["push"]
|
||||
|
||||
|
||||
def test_extract_triggers_list() -> None:
|
||||
"""Test extracting triggers from list."""
|
||||
parser = Parser(Path.cwd())
|
||||
triggers = parser._extract_triggers(["push", "pull_request"])
|
||||
|
||||
assert triggers == ["push", "pull_request"]
|
||||
|
||||
|
||||
def test_extract_triggers_dict() -> None:
|
||||
"""Test extracting triggers from dict."""
|
||||
parser = Parser(Path.cwd())
|
||||
triggers = parser._extract_triggers(
|
||||
{
|
||||
"push": {"branches": ["main"]},
|
||||
"pull_request": None,
|
||||
"workflow_dispatch": None,
|
||||
}
|
||||
)
|
||||
|
||||
assert set(triggers) == {"push", "pull_request", "workflow_dispatch"}
|
||||
|
||||
|
||||
def test_extract_triggers_empty() -> None:
|
||||
"""Test extracting triggers from empty value."""
|
||||
parser = Parser(Path.cwd())
|
||||
triggers = parser._extract_triggers(None)
|
||||
|
||||
assert triggers == []
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Permissions Parsing Tests
|
||||
# ============================================================================
|
||||
|
||||
|
||||
def test_parse_permissions_none() -> None:
|
||||
"""Test parsing None permissions."""
|
||||
parser = Parser(Path.cwd())
|
||||
perms = parser._parse_permissions(None)
|
||||
|
||||
assert perms is None
|
||||
|
||||
|
||||
def test_parse_permissions_string() -> None:
|
||||
"""Test parsing string permissions (read-all/write-all)."""
|
||||
parser = Parser(Path.cwd())
|
||||
perms = parser._parse_permissions("read-all")
|
||||
|
||||
# Should return an empty Permissions object
|
||||
assert perms is not None
|
||||
|
||||
|
||||
def test_parse_permissions_dict() -> None:
|
||||
"""Test parsing dict permissions."""
|
||||
parser = Parser(Path.cwd())
|
||||
perms = parser._parse_permissions(
|
||||
{
|
||||
"contents": "read",
|
||||
"issues": "write",
|
||||
"pull_requests": "write",
|
||||
}
|
||||
)
|
||||
|
||||
assert perms is not None
|
||||
assert perms.contents == PermissionLevel.READ
|
||||
assert perms.issues == PermissionLevel.WRITE
|
||||
assert perms.pull_requests == PermissionLevel.WRITE
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Job Parsing Tests
|
||||
# ============================================================================
|
||||
|
||||
|
||||
def test_parse_job_with_none_data() -> None:
|
||||
"""Test parsing job with None data."""
|
||||
parser = Parser(Path.cwd())
|
||||
job = parser._parse_job("test", None, Path("test.yml"), "")
|
||||
|
||||
assert job.name == "test"
|
||||
assert job.runs_on == "ubuntu-latest" # default value
|
||||
|
||||
|
||||
def test_parse_job_needs_string_vs_list() -> None:
|
||||
"""Test parsing job needs as string vs list."""
|
||||
parser = Parser(Path.cwd())
|
||||
|
||||
# String needs
|
||||
job1 = parser._parse_job("test", {"needs": "build"}, Path("test.yml"), "")
|
||||
assert job1.needs == ["build"]
|
||||
|
||||
# List needs
|
||||
job2 = parser._parse_job("test", {"needs": ["build", "lint"]}, Path("test.yml"), "")
|
||||
assert job2.needs == ["build", "lint"]
|
||||
|
||||
|
||||
def test_parse_job_with_none_steps() -> None:
|
||||
"""Test parsing job with None steps."""
|
||||
parser = Parser(Path.cwd())
|
||||
job = parser._parse_job(
|
||||
"test",
|
||||
{"steps": [None, {"uses": "actions/checkout@v4"}]},
|
||||
Path("test.yml"),
|
||||
"",
|
||||
)
|
||||
|
||||
# Should skip None steps
|
||||
assert len(job.actions_used) == 1
|
||||
assert job.actions_used[0].repo == "checkout"
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Container/Services/Strategy Parsing Tests
|
||||
# ============================================================================
|
||||
|
||||
|
||||
def test_parse_container_none() -> None:
|
||||
"""Test parsing None container."""
|
||||
parser = Parser(Path.cwd())
|
||||
container = parser._parse_container(None)
|
||||
|
||||
assert container is None
|
||||
|
||||
|
||||
def test_parse_container_string() -> None:
|
||||
"""Test parsing container from string."""
|
||||
parser = Parser(Path.cwd())
|
||||
container = parser._parse_container("ubuntu:latest")
|
||||
|
||||
assert container is not None
|
||||
assert container.image == "ubuntu:latest"
|
||||
|
||||
|
||||
def test_parse_container_dict() -> None:
|
||||
"""Test parsing container from dict."""
|
||||
parser = Parser(Path.cwd())
|
||||
container = parser._parse_container(
|
||||
{
|
||||
"image": "node:20",
|
||||
"credentials": {"username": "user", "password": "pass"},
|
||||
"env": {"NODE_ENV": "test"},
|
||||
"ports": [8080],
|
||||
"volumes": ["/tmp:/tmp"],
|
||||
"options": "--cpus 2",
|
||||
}
|
||||
)
|
||||
|
||||
assert container is not None
|
||||
assert container.image == "node:20"
|
||||
assert container.credentials == {"username": "user", "password": "pass"}
|
||||
assert container.env["NODE_ENV"] == "test"
|
||||
assert container.ports == [8080]
|
||||
assert container.volumes == ["/tmp:/tmp"]
|
||||
assert container.options == "--cpus 2"
|
||||
|
||||
|
||||
def test_parse_services_none() -> None:
|
||||
"""Test parsing None services."""
|
||||
parser = Parser(Path.cwd())
|
||||
services = parser._parse_services(None)
|
||||
|
||||
assert services == {}
|
||||
|
||||
|
||||
def test_parse_services_string_image() -> None:
|
||||
"""Test parsing service with string image."""
|
||||
parser = Parser(Path.cwd())
|
||||
services = parser._parse_services({"postgres": "postgres:15"})
|
||||
|
||||
assert "postgres" in services
|
||||
assert services["postgres"].name == "postgres"
|
||||
assert services["postgres"].image == "postgres:15"
|
||||
|
||||
|
||||
def test_parse_services_dict() -> None:
|
||||
"""Test parsing service with dict config."""
|
||||
parser = Parser(Path.cwd())
|
||||
services = parser._parse_services(
|
||||
{
|
||||
"redis": {
|
||||
"image": "redis:alpine",
|
||||
"ports": [6379],
|
||||
"options": "--health-cmd 'redis-cli ping'",
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
assert "redis" in services
|
||||
assert services["redis"].image == "redis:alpine"
|
||||
assert services["redis"].ports == [6379]
|
||||
|
||||
|
||||
def test_parse_strategy_none() -> None:
|
||||
"""Test parsing None strategy."""
|
||||
parser = Parser(Path.cwd())
|
||||
strategy = parser._parse_strategy(None)
|
||||
|
||||
assert strategy is None
|
||||
|
||||
|
||||
def test_parse_strategy_matrix() -> None:
|
||||
"""Test parsing strategy with matrix."""
|
||||
parser = Parser(Path.cwd())
|
||||
strategy = parser._parse_strategy(
|
||||
{
|
||||
"matrix": {"node-version": [18, 20], "os": ["ubuntu-latest", "windows-latest"]},
|
||||
"fail-fast": False,
|
||||
"max-parallel": 4,
|
||||
}
|
||||
)
|
||||
|
||||
assert strategy is not None
|
||||
assert strategy.matrix == {"node-version": [18, 20], "os": ["ubuntu-latest", "windows-latest"]}
|
||||
assert strategy.fail_fast is False
|
||||
assert strategy.max_parallel == 4
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Action Manifest Parsing Tests
|
||||
# ============================================================================
|
||||
|
||||
|
||||
def test_parse_composite_action() -> None:
|
||||
"""Test parsing a composite action."""
|
||||
parser = Parser(FIXTURES_DIR)
|
||||
action = parser.parse_action(FIXTURES_DIR / "composite-action.yml")
|
||||
|
||||
assert action.name == "Composite Action"
|
||||
assert action.description == "A composite action example"
|
||||
assert action.author == "Test Author"
|
||||
assert action.is_composite is True
|
||||
assert action.is_docker is False
|
||||
assert action.is_javascript is False
|
||||
|
||||
# Check inputs
|
||||
assert "message" in action.inputs
|
||||
assert action.inputs["message"].required is True
|
||||
assert "debug" in action.inputs
|
||||
assert action.inputs["debug"].required is False
|
||||
assert action.inputs["debug"].default == "false"
|
||||
|
||||
# Check outputs
|
||||
assert "result" in action.outputs
|
||||
assert action.outputs["result"].description == "Action result"
|
||||
|
||||
# Check branding
|
||||
assert action.branding is not None
|
||||
|
||||
|
||||
def test_parse_docker_action() -> None:
|
||||
"""Test parsing a Docker action."""
|
||||
parser = Parser(FIXTURES_DIR)
|
||||
action = parser.parse_action(FIXTURES_DIR / "docker-action.yml")
|
||||
|
||||
assert action.name == "Docker Action"
|
||||
assert action.is_docker is True
|
||||
assert action.is_composite is False
|
||||
assert action.is_javascript is False
|
||||
|
||||
# Check inputs
|
||||
assert "dockerfile" in action.inputs
|
||||
assert action.inputs["dockerfile"].default == "Dockerfile"
|
||||
|
||||
# Check outputs
|
||||
assert "image-id" in action.outputs
|
||||
|
||||
|
||||
def test_parse_javascript_action() -> None:
|
||||
"""Test parsing a JavaScript action."""
|
||||
parser = Parser(FIXTURES_DIR)
|
||||
action = parser.parse_action(FIXTURES_DIR / "javascript-action.yml")
|
||||
|
||||
assert action.name == "JavaScript Action"
|
||||
assert action.is_javascript is True
|
||||
assert action.is_composite is False
|
||||
assert action.is_docker is False
|
||||
|
||||
# Check runs config
|
||||
assert action.runs["using"] == "node20"
|
||||
assert action.runs["main"] == "dist/index.js"
|
||||
|
||||
|
||||
def test_parse_action_with_various_defaults() -> None:
|
||||
"""Test parsing action with different input default types."""
|
||||
parser = Parser(FIXTURES_DIR)
|
||||
action = parser.parse_action(FIXTURES_DIR / "action-with-defaults.yml")
|
||||
|
||||
assert action.name == "Action with Various Defaults"
|
||||
|
||||
# String default
|
||||
assert action.inputs["string-input"].default == "hello"
|
||||
|
||||
# Boolean default
|
||||
assert action.inputs["boolean-input"].default is True
|
||||
|
||||
# Number default
|
||||
assert action.inputs["number-input"].default == 42
|
||||
|
||||
# No default
|
||||
assert action.inputs["no-default"].required is True
|
||||
|
||||
|
||||
def test_parse_action_empty_inputs_outputs() -> None:
|
||||
"""Test parsing action with empty inputs/outputs."""
|
||||
parser = Parser(FIXTURES_DIR)
|
||||
action = parser.parse_action(FIXTURES_DIR / "composite-action.yml")
|
||||
|
||||
# Even if action has inputs/outputs, the parser should handle missing ones
|
||||
assert action.inputs is not None
|
||||
assert action.outputs is not None
|
||||
|
||||
|
||||
def test_parse_empty_action() -> None:
|
||||
"""Test parsing an empty action file raises error."""
|
||||
parser = Parser(FIXTURES_DIR)
|
||||
|
||||
with pytest.raises(ValueError, match="Empty action file"):
|
||||
parser.parse_action(FIXTURES_DIR / "invalid-action.yml")
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Reusable Workflow Tests
|
||||
# ============================================================================
|
||||
|
||||
|
||||
def test_parse_reusable_workflow_caller() -> None:
|
||||
"""Test parsing workflow that calls reusable workflows."""
|
||||
parser = Parser(FIXTURES_DIR)
|
||||
workflow = parser.parse_workflow(FIXTURES_DIR / "reusable-workflow-caller.yml")
|
||||
|
||||
assert workflow.name == "Reusable Workflow Caller"
|
||||
assert "call-workflow" in workflow.jobs
|
||||
assert "call-workflow-inherit" in workflow.jobs
|
||||
assert "call-local-workflow" in workflow.jobs
|
||||
|
||||
# Test job with explicit secrets
|
||||
call_job = workflow.jobs["call-workflow"]
|
||||
assert call_job.uses == "owner/repo/.github/workflows/deploy.yml@v1"
|
||||
assert call_job.with_inputs["environment"] == "production"
|
||||
assert call_job.with_inputs["debug"] is False
|
||||
assert call_job.with_inputs["version"] == "1.2.3"
|
||||
assert call_job.secrets_passed is not None
|
||||
assert "deploy-token" in call_job.secrets_passed
|
||||
assert call_job.inherit_secrets is False
|
||||
|
||||
# Verify reusable workflow tracked as action
|
||||
assert len(call_job.actions_used) == 1
|
||||
assert call_job.actions_used[0].type == ActionType.REUSABLE_WORKFLOW
|
||||
assert call_job.actions_used[0].owner == "owner"
|
||||
assert call_job.actions_used[0].repo == "repo"
|
||||
assert call_job.actions_used[0].path == ".github/workflows/deploy.yml"
|
||||
assert call_job.actions_used[0].ref == "v1"
|
||||
|
||||
# Test job with inherited secrets
|
||||
inherit_job = workflow.jobs["call-workflow-inherit"]
|
||||
assert inherit_job.uses == "owner/repo/.github/workflows/test.yml@main"
|
||||
assert inherit_job.inherit_secrets is True
|
||||
assert inherit_job.secrets_passed is None
|
||||
|
||||
# Test local reusable workflow
|
||||
local_job = workflow.jobs["call-local-workflow"]
|
||||
assert local_job.uses == "./.github/workflows/shared.yml"
|
||||
assert local_job.actions_used[0].type == ActionType.REUSABLE_WORKFLOW
|
||||
assert local_job.actions_used[0].path == "./.github/workflows/shared.yml"
|
||||
|
||||
|
||||
def test_parse_job_with_outputs() -> None:
|
||||
"""Test parsing job with outputs."""
|
||||
parser = Parser(FIXTURES_DIR)
|
||||
workflow = parser.parse_workflow(FIXTURES_DIR / "job-with-outputs.yml")
|
||||
|
||||
assert "build" in workflow.jobs
|
||||
build_job = workflow.jobs["build"]
|
||||
|
||||
assert build_job.outputs is not None
|
||||
assert "version" in build_job.outputs
|
||||
assert "artifact-url" in build_job.outputs
|
||||
assert "status" in build_job.outputs
|
||||
assert build_job.outputs["status"] == "success"
|
||||
|
||||
|
||||
def test_parse_reusable_workflow_ref_local() -> None:
|
||||
"""Test parsing local reusable workflow reference."""
|
||||
parser = Parser(Path.cwd())
|
||||
ref = parser._parse_reusable_workflow_ref("./.github/workflows/deploy.yml", Path("test.yml"))
|
||||
|
||||
assert ref.type == ActionType.REUSABLE_WORKFLOW
|
||||
assert ref.path == "./.github/workflows/deploy.yml"
|
||||
|
||||
|
||||
def test_parse_reusable_workflow_ref_github() -> None:
|
||||
"""Test parsing GitHub reusable workflow reference."""
|
||||
parser = Parser(Path.cwd())
|
||||
ref = parser._parse_reusable_workflow_ref("actions/reusable/.github/workflows/build.yml@v1", Path("test.yml"))
|
||||
|
||||
assert ref.type == ActionType.REUSABLE_WORKFLOW
|
||||
assert ref.owner == "actions"
|
||||
assert ref.repo == "reusable"
|
||||
assert ref.path == ".github/workflows/build.yml"
|
||||
assert ref.ref == "v1"
|
||||
|
||||
|
||||
def test_parse_reusable_workflow_ref_invalid() -> None:
|
||||
"""Test parsing invalid reusable workflow reference raises error."""
|
||||
parser = Parser(Path.cwd())
|
||||
|
||||
with pytest.raises(ValueError, match="Invalid reusable workflow reference"):
|
||||
parser._parse_reusable_workflow_ref("invalid-workflow-ref", Path("test.yml"))
|
||||
|
||||
|
||||
def test_parse_permissions_invalid_type(tmp_path: Path) -> None:
|
||||
"""Test parsing permissions with invalid type."""
|
||||
parser = Parser(tmp_path)
|
||||
|
||||
# Test with boolean (invalid type)
|
||||
result = parser._parse_permissions(True)
|
||||
assert result is None
|
||||
|
||||
# Test with int (invalid type)
|
||||
result = parser._parse_permissions(123)
|
||||
assert result is None
|
||||
|
||||
# Test with list (invalid type)
|
||||
result = parser._parse_permissions(["read", "write"])
|
||||
assert result is None
|
||||
|
||||
|
||||
def test_parse_workflow_with_boolean_and_number_env(tmp_path: Path) -> None:
|
||||
"""Test parsing workflow with boolean and number values in env."""
|
||||
workflow_file = tmp_path / "test.yml"
|
||||
workflow_file.write_text(
|
||||
"""
|
||||
name: Test
|
||||
on: push
|
||||
env:
|
||||
STRING_VAR: "hello"
|
||||
BOOL_VAR: true
|
||||
NUMBER_VAR: 42
|
||||
FLOAT_VAR: 3.14
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: echo test
|
||||
"""
|
||||
)
|
||||
|
||||
parser = Parser(tmp_path)
|
||||
workflow = parser.parse_workflow(workflow_file)
|
||||
|
||||
assert workflow.env["STRING_VAR"] == "hello"
|
||||
assert workflow.env["BOOL_VAR"] is True
|
||||
assert workflow.env["NUMBER_VAR"] == 42
|
||||
assert workflow.env["FLOAT_VAR"] == 3.14
|
||||
256
tests/test_policy.py
Normal file
256
tests/test_policy.py
Normal file
@@ -0,0 +1,256 @@
|
||||
"""Tests for policy validator."""
|
||||
|
||||
from ghaw_auditor.models import ActionRef, ActionType, JobMeta, Policy, WorkflowMeta
|
||||
from ghaw_auditor.policy import PolicyValidator
|
||||
|
||||
|
||||
def test_policy_validator_initialization() -> None:
|
||||
"""Test validator initialization."""
|
||||
policy = Policy()
|
||||
validator = PolicyValidator(policy)
|
||||
assert validator.policy == policy
|
||||
|
||||
|
||||
def test_pinned_actions_validation() -> None:
|
||||
"""Test pinned actions policy."""
|
||||
policy = Policy(require_pinned_actions=True)
|
||||
validator = PolicyValidator(policy)
|
||||
|
||||
workflow = WorkflowMeta(
|
||||
name="Test",
|
||||
path="test.yml",
|
||||
triggers=["push"],
|
||||
jobs={
|
||||
"test": JobMeta(
|
||||
name="test",
|
||||
runs_on="ubuntu-latest",
|
||||
actions_used=[
|
||||
ActionRef(
|
||||
type=ActionType.GITHUB,
|
||||
owner="actions",
|
||||
repo="checkout",
|
||||
ref="v4", # Not pinned to SHA
|
||||
source_file="test.yml",
|
||||
)
|
||||
],
|
||||
)
|
||||
},
|
||||
actions_used=[
|
||||
ActionRef(
|
||||
type=ActionType.GITHUB,
|
||||
owner="actions",
|
||||
repo="checkout",
|
||||
ref="v4",
|
||||
source_file="test.yml",
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
violations = validator.validate({"test.yml": workflow}, [])
|
||||
|
||||
assert len(violations) > 0
|
||||
assert violations[0]["rule"] == "require_pinned_actions"
|
||||
assert violations[0]["severity"] == "error"
|
||||
|
||||
|
||||
def test_pinned_actions_with_sha() -> None:
|
||||
"""Test pinned actions with SHA pass validation."""
|
||||
policy = Policy(require_pinned_actions=True)
|
||||
validator = PolicyValidator(policy)
|
||||
|
||||
workflow = WorkflowMeta(
|
||||
name="Test",
|
||||
path="test.yml",
|
||||
triggers=["push"],
|
||||
jobs={
|
||||
"test": JobMeta(
|
||||
name="test",
|
||||
runs_on="ubuntu-latest",
|
||||
actions_used=[
|
||||
ActionRef(
|
||||
type=ActionType.GITHUB,
|
||||
owner="actions",
|
||||
repo="checkout",
|
||||
ref="abc123def456789012345678901234567890abcd", # SHA
|
||||
source_file="test.yml",
|
||||
)
|
||||
],
|
||||
)
|
||||
},
|
||||
actions_used=[],
|
||||
)
|
||||
|
||||
violations = validator.validate({"test.yml": workflow}, [])
|
||||
|
||||
assert len(violations) == 0
|
||||
|
||||
|
||||
def test_branch_refs_validation() -> None:
|
||||
"""Test forbid branch refs policy."""
|
||||
policy = Policy(require_pinned_actions=False, forbid_branch_refs=True)
|
||||
validator = PolicyValidator(policy)
|
||||
|
||||
workflow = WorkflowMeta(
|
||||
name="Test",
|
||||
path="test.yml",
|
||||
triggers=["push"],
|
||||
jobs={
|
||||
"test": JobMeta(
|
||||
name="test",
|
||||
runs_on="ubuntu-latest",
|
||||
actions_used=[
|
||||
ActionRef(
|
||||
type=ActionType.GITHUB,
|
||||
owner="actions",
|
||||
repo="checkout",
|
||||
ref="main",
|
||||
source_file="test.yml",
|
||||
)
|
||||
],
|
||||
)
|
||||
},
|
||||
actions_used=[
|
||||
ActionRef(
|
||||
type=ActionType.GITHUB,
|
||||
owner="actions",
|
||||
repo="checkout",
|
||||
ref="main",
|
||||
source_file="test.yml",
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
violations = validator.validate({"test.yml": workflow}, [])
|
||||
|
||||
assert len(violations) > 0
|
||||
assert violations[0]["rule"] == "forbid_branch_refs"
|
||||
|
||||
|
||||
def test_allowed_actions_validation() -> None:
|
||||
"""Test allowed actions whitelist."""
|
||||
policy = Policy(require_pinned_actions=False, allowed_actions=["actions/*", "github/*"])
|
||||
validator = PolicyValidator(policy)
|
||||
|
||||
workflow = WorkflowMeta(
|
||||
name="Test",
|
||||
path="test.yml",
|
||||
triggers=["push"],
|
||||
jobs={
|
||||
"test": JobMeta(
|
||||
name="test",
|
||||
runs_on="ubuntu-latest",
|
||||
actions_used=[
|
||||
ActionRef(
|
||||
type=ActionType.GITHUB,
|
||||
owner="thirdparty",
|
||||
repo="action",
|
||||
ref="v1",
|
||||
source_file="test.yml",
|
||||
)
|
||||
],
|
||||
)
|
||||
},
|
||||
actions_used=[
|
||||
ActionRef(
|
||||
type=ActionType.GITHUB,
|
||||
owner="thirdparty",
|
||||
repo="action",
|
||||
ref="v1",
|
||||
source_file="test.yml",
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
violations = validator.validate({"test.yml": workflow}, [])
|
||||
|
||||
assert len(violations) > 0
|
||||
assert violations[0]["rule"] == "allowed_actions"
|
||||
|
||||
|
||||
def test_denied_actions_validation() -> None:
|
||||
"""Test denied actions blacklist."""
|
||||
policy = Policy(require_pinned_actions=False, denied_actions=["dangerous/*"])
|
||||
validator = PolicyValidator(policy)
|
||||
|
||||
workflow = WorkflowMeta(
|
||||
name="Test",
|
||||
path="test.yml",
|
||||
triggers=["push"],
|
||||
jobs={
|
||||
"test": JobMeta(
|
||||
name="test",
|
||||
runs_on="ubuntu-latest",
|
||||
actions_used=[
|
||||
ActionRef(
|
||||
type=ActionType.GITHUB,
|
||||
owner="dangerous",
|
||||
repo="action",
|
||||
ref="v1",
|
||||
source_file="test.yml",
|
||||
)
|
||||
],
|
||||
)
|
||||
},
|
||||
actions_used=[
|
||||
ActionRef(
|
||||
type=ActionType.GITHUB,
|
||||
owner="dangerous",
|
||||
repo="action",
|
||||
ref="v1",
|
||||
source_file="test.yml",
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
violations = validator.validate({"test.yml": workflow}, [])
|
||||
|
||||
assert len(violations) > 0
|
||||
assert violations[0]["rule"] == "denied_actions"
|
||||
|
||||
|
||||
def test_pr_concurrency_validation() -> None:
|
||||
"""Test PR concurrency requirement."""
|
||||
policy = Policy(require_concurrency_on_pr=True)
|
||||
validator = PolicyValidator(policy)
|
||||
|
||||
workflow = WorkflowMeta(
|
||||
name="Test",
|
||||
path="test.yml",
|
||||
triggers=["pull_request"],
|
||||
concurrency=None,
|
||||
jobs={},
|
||||
)
|
||||
|
||||
violations = validator.validate({"test.yml": workflow}, [])
|
||||
|
||||
assert len(violations) > 0
|
||||
assert violations[0]["rule"] == "require_concurrency_on_pr"
|
||||
assert violations[0]["severity"] == "warning"
|
||||
|
||||
|
||||
def test_pr_concurrency_with_group() -> None:
|
||||
"""Test PR with concurrency group passes."""
|
||||
policy = Policy(require_concurrency_on_pr=True)
|
||||
validator = PolicyValidator(policy)
|
||||
|
||||
workflow = WorkflowMeta(
|
||||
name="Test",
|
||||
path="test.yml",
|
||||
triggers=["pull_request"],
|
||||
concurrency={"group": "${{ github.workflow }}"},
|
||||
jobs={},
|
||||
)
|
||||
|
||||
violations = validator.validate({"test.yml": workflow}, [])
|
||||
|
||||
assert len(violations) == 0
|
||||
|
||||
|
||||
def test_matches_pattern() -> None:
|
||||
"""Test pattern matching."""
|
||||
policy = Policy()
|
||||
validator = PolicyValidator(policy)
|
||||
|
||||
assert validator._matches_pattern("actions/checkout", "actions/*") is True
|
||||
assert validator._matches_pattern("github/codeql-action", "github/*") is True
|
||||
assert validator._matches_pattern("thirdparty/action", "actions/*") is False
|
||||
755
tests/test_renderer.py
Normal file
755
tests/test_renderer.py
Normal file
@@ -0,0 +1,755 @@
|
||||
"""Tests for renderer."""
|
||||
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
from ghaw_auditor.models import ActionManifest, JobMeta, WorkflowMeta
|
||||
from ghaw_auditor.renderer import Renderer
|
||||
|
||||
|
||||
def test_renderer_initialization(tmp_path: Path) -> None:
|
||||
"""Test renderer initialization."""
|
||||
renderer = Renderer(tmp_path)
|
||||
assert renderer.output_dir == tmp_path
|
||||
assert renderer.output_dir.exists()
|
||||
|
||||
|
||||
def test_render_json(tmp_path: Path) -> None:
|
||||
"""Test JSON rendering."""
|
||||
renderer = Renderer(tmp_path)
|
||||
|
||||
workflows = {
|
||||
"test.yml": WorkflowMeta(
|
||||
name="Test",
|
||||
path="test.yml",
|
||||
triggers=["push"],
|
||||
jobs={"test": JobMeta(name="test", runs_on="ubuntu-latest")},
|
||||
)
|
||||
}
|
||||
|
||||
actions = {
|
||||
"actions/checkout@v4": ActionManifest(
|
||||
name="Checkout",
|
||||
description="Checkout code",
|
||||
)
|
||||
}
|
||||
|
||||
violations = [
|
||||
{
|
||||
"workflow": "test.yml",
|
||||
"rule": "test_rule",
|
||||
"severity": "error",
|
||||
"message": "Test violation",
|
||||
}
|
||||
]
|
||||
|
||||
renderer.render_json(workflows, actions, violations)
|
||||
|
||||
# Check files exist
|
||||
assert (tmp_path / "workflows.json").exists()
|
||||
assert (tmp_path / "actions.json").exists()
|
||||
assert (tmp_path / "violations.json").exists()
|
||||
|
||||
# Verify JSON content
|
||||
with open(tmp_path / "workflows.json") as f:
|
||||
data = json.load(f)
|
||||
assert "test.yml" in data
|
||||
assert data["test.yml"]["name"] == "Test"
|
||||
|
||||
with open(tmp_path / "actions.json") as f:
|
||||
data = json.load(f)
|
||||
assert "actions/checkout@v4" in data
|
||||
|
||||
with open(tmp_path / "violations.json") as f:
|
||||
data = json.load(f)
|
||||
assert len(data) == 1
|
||||
assert data[0]["rule"] == "test_rule"
|
||||
|
||||
|
||||
def test_render_markdown(tmp_path: Path) -> None:
|
||||
"""Test Markdown rendering."""
|
||||
renderer = Renderer(tmp_path)
|
||||
|
||||
workflows = {
|
||||
"test.yml": WorkflowMeta(
|
||||
name="Test Workflow",
|
||||
path="test.yml",
|
||||
triggers=["push", "pull_request"],
|
||||
jobs={"test": JobMeta(name="test", runs_on="ubuntu-latest")},
|
||||
)
|
||||
}
|
||||
|
||||
actions = {
|
||||
"actions/checkout@v4": ActionManifest(
|
||||
name="Checkout",
|
||||
description="Checkout repository",
|
||||
)
|
||||
}
|
||||
|
||||
violations = [
|
||||
{
|
||||
"workflow": "test.yml",
|
||||
"rule": "require_pinned_actions",
|
||||
"severity": "error",
|
||||
"message": "Action not pinned to SHA",
|
||||
}
|
||||
]
|
||||
|
||||
analysis = {
|
||||
"total_jobs": 1,
|
||||
"reusable_workflows": 0,
|
||||
"triggers": {"push": 1, "pull_request": 1},
|
||||
"runners": {"ubuntu-latest": 1},
|
||||
"secrets": {"total_unique_secrets": 0, "secrets": []},
|
||||
}
|
||||
|
||||
renderer.render_markdown(workflows, actions, violations, analysis)
|
||||
|
||||
report_file = tmp_path / "report.md"
|
||||
assert report_file.exists()
|
||||
|
||||
content = report_file.read_text()
|
||||
assert "# GitHub Actions & Workflows Audit Report" in content
|
||||
assert "Test Workflow" in content
|
||||
assert "Checkout" in content
|
||||
assert "require_pinned_actions" in content
|
||||
assert "push" in content
|
||||
assert "pull_request" in content
|
||||
|
||||
|
||||
def test_render_empty_data(tmp_path: Path) -> None:
|
||||
"""Test rendering with empty data."""
|
||||
renderer = Renderer(tmp_path)
|
||||
|
||||
renderer.render_json({}, {}, [])
|
||||
|
||||
assert (tmp_path / "workflows.json").exists()
|
||||
assert (tmp_path / "actions.json").exists()
|
||||
assert (tmp_path / "violations.json").exists()
|
||||
|
||||
with open(tmp_path / "workflows.json") as f:
|
||||
assert json.load(f) == {}
|
||||
|
||||
with open(tmp_path / "violations.json") as f:
|
||||
assert json.load(f) == []
|
||||
|
||||
|
||||
def test_render_markdown_with_actions_used(tmp_path: Path) -> None:
|
||||
"""Test Markdown rendering with job actions_used."""
|
||||
from ghaw_auditor.models import ActionRef, ActionType
|
||||
|
||||
renderer = Renderer(tmp_path)
|
||||
|
||||
# Create a job with actions_used
|
||||
action_ref = ActionRef(
|
||||
type=ActionType.GITHUB,
|
||||
owner="actions",
|
||||
repo="checkout",
|
||||
ref="v4",
|
||||
source_file="test.yml",
|
||||
)
|
||||
|
||||
job = JobMeta(
|
||||
name="test",
|
||||
runs_on="ubuntu-latest",
|
||||
actions_used=[action_ref],
|
||||
)
|
||||
|
||||
workflows = {
|
||||
"test.yml": WorkflowMeta(
|
||||
name="Test Workflow",
|
||||
path="test.yml",
|
||||
triggers=["push"],
|
||||
jobs={"test": job},
|
||||
)
|
||||
}
|
||||
|
||||
renderer.render_markdown(workflows, {}, [], {})
|
||||
|
||||
report_file = tmp_path / "report.md"
|
||||
assert report_file.exists()
|
||||
|
||||
content = report_file.read_text()
|
||||
# Should render the actions used with link
|
||||
assert "Actions used:" in content
|
||||
assert "[actions/checkout](#actions-checkout)" in content
|
||||
|
||||
|
||||
def test_render_markdown_with_secrets(tmp_path: Path) -> None:
|
||||
"""Test Markdown rendering with secrets."""
|
||||
renderer = Renderer(tmp_path)
|
||||
|
||||
workflows = {
|
||||
"test.yml": WorkflowMeta(
|
||||
name="Test Workflow",
|
||||
path="test.yml",
|
||||
triggers=["push"],
|
||||
jobs={},
|
||||
)
|
||||
}
|
||||
|
||||
analysis = {
|
||||
"total_jobs": 0,
|
||||
"reusable_workflows": 0,
|
||||
"secrets": {
|
||||
"total_unique_secrets": 2,
|
||||
"secrets": ["API_KEY", "DATABASE_URL"],
|
||||
},
|
||||
}
|
||||
|
||||
renderer.render_markdown(workflows, {}, [], analysis)
|
||||
|
||||
report_file = tmp_path / "report.md"
|
||||
content = report_file.read_text()
|
||||
|
||||
# Should render secrets
|
||||
assert "API_KEY" in content
|
||||
assert "DATABASE_URL" in content
|
||||
|
||||
|
||||
def test_render_markdown_with_action_inputs(tmp_path: Path) -> None:
|
||||
"""Test Markdown rendering with action inputs."""
|
||||
from ghaw_auditor.models import ActionInput
|
||||
|
||||
renderer = Renderer(tmp_path)
|
||||
|
||||
action = ActionManifest(
|
||||
name="Test Action",
|
||||
description="A test action",
|
||||
inputs={
|
||||
"token": ActionInput(
|
||||
name="token",
|
||||
description="GitHub token",
|
||||
required=True,
|
||||
),
|
||||
"debug": ActionInput(
|
||||
name="debug",
|
||||
description="Enable debug mode",
|
||||
required=False,
|
||||
),
|
||||
},
|
||||
)
|
||||
|
||||
renderer.render_markdown({}, {"test/action@v1": action}, [], {})
|
||||
|
||||
report_file = tmp_path / "report.md"
|
||||
content = report_file.read_text()
|
||||
|
||||
# Should render inputs with required/optional status
|
||||
assert "token" in content
|
||||
assert "required" in content
|
||||
assert "debug" in content
|
||||
assert "optional" in content
|
||||
assert "GitHub token" in content
|
||||
assert "Enable debug mode" in content
|
||||
|
||||
|
||||
def test_render_markdown_with_action_anchors(tmp_path: Path) -> None:
|
||||
"""Test that action anchors are created for linking."""
|
||||
from ghaw_auditor.models import ActionRef, ActionType
|
||||
|
||||
renderer = Renderer(tmp_path)
|
||||
|
||||
action_ref = ActionRef(
|
||||
type=ActionType.GITHUB,
|
||||
owner="actions",
|
||||
repo="checkout",
|
||||
ref="v4",
|
||||
resolved_sha="abc123",
|
||||
source_file="test.yml",
|
||||
)
|
||||
|
||||
workflow = WorkflowMeta(
|
||||
name="Test",
|
||||
path="test.yml",
|
||||
triggers=["push"],
|
||||
jobs={},
|
||||
actions_used=[action_ref],
|
||||
)
|
||||
|
||||
action = ActionManifest(
|
||||
name="Checkout",
|
||||
description="Checkout code",
|
||||
)
|
||||
|
||||
renderer.render_markdown({"test.yml": workflow}, {"actions/checkout@abc123": action}, [], {})
|
||||
|
||||
report_file = tmp_path / "report.md"
|
||||
content = report_file.read_text()
|
||||
|
||||
# Should have anchor tag
|
||||
assert '<a id="actions-checkout"></a>' in content
|
||||
|
||||
|
||||
def test_render_markdown_with_repo_urls(tmp_path: Path) -> None:
|
||||
"""Test that GitHub action repository URLs are included."""
|
||||
from ghaw_auditor.models import ActionRef, ActionType
|
||||
|
||||
renderer = Renderer(tmp_path)
|
||||
|
||||
action_ref = ActionRef(
|
||||
type=ActionType.GITHUB,
|
||||
owner="actions",
|
||||
repo="setup-node",
|
||||
ref="v4",
|
||||
resolved_sha="def456",
|
||||
source_file="test.yml",
|
||||
)
|
||||
|
||||
workflow = WorkflowMeta(
|
||||
name="Test",
|
||||
path="test.yml",
|
||||
triggers=["push"],
|
||||
jobs={},
|
||||
actions_used=[action_ref],
|
||||
)
|
||||
|
||||
action = ActionManifest(
|
||||
name="Setup Node",
|
||||
description="Setup Node.js",
|
||||
)
|
||||
|
||||
renderer.render_markdown({"test.yml": workflow}, {"actions/setup-node@def456": action}, [], {})
|
||||
|
||||
report_file = tmp_path / "report.md"
|
||||
content = report_file.read_text()
|
||||
|
||||
# Should have repository link
|
||||
assert "https://github.com/actions/setup-node" in content
|
||||
assert "[actions/setup-node](https://github.com/actions/setup-node)" in content
|
||||
|
||||
|
||||
def test_render_markdown_with_details_tags(tmp_path: Path) -> None:
|
||||
"""Test that inputs are wrapped in details tags."""
|
||||
from ghaw_auditor.models import ActionInput, ActionRef, ActionType
|
||||
|
||||
renderer = Renderer(tmp_path)
|
||||
|
||||
action_ref = ActionRef(
|
||||
type=ActionType.GITHUB,
|
||||
owner="actions",
|
||||
repo="checkout",
|
||||
ref="v4",
|
||||
source_file="test.yml",
|
||||
)
|
||||
|
||||
workflow = WorkflowMeta(
|
||||
name="Test",
|
||||
path="test.yml",
|
||||
triggers=["push"],
|
||||
jobs={},
|
||||
actions_used=[action_ref],
|
||||
)
|
||||
|
||||
action = ActionManifest(
|
||||
name="Checkout",
|
||||
description="Checkout code",
|
||||
inputs={
|
||||
"token": ActionInput(
|
||||
name="token",
|
||||
description="GitHub token",
|
||||
required=False,
|
||||
),
|
||||
},
|
||||
)
|
||||
|
||||
renderer.render_markdown({"test.yml": workflow}, {"actions/checkout@v4": action}, [], {})
|
||||
|
||||
report_file = tmp_path / "report.md"
|
||||
content = report_file.read_text()
|
||||
|
||||
# Should have details tags
|
||||
assert "<details>" in content
|
||||
assert "<summary><b>Inputs</b></summary>" in content
|
||||
assert "</details>" in content
|
||||
|
||||
|
||||
def test_render_markdown_with_job_action_links(tmp_path: Path) -> None:
|
||||
"""Test that job actions are linked to inventory."""
|
||||
from ghaw_auditor.models import ActionRef, ActionType
|
||||
|
||||
renderer = Renderer(tmp_path)
|
||||
|
||||
action_ref = ActionRef(
|
||||
type=ActionType.GITHUB,
|
||||
owner="actions",
|
||||
repo="checkout",
|
||||
ref="v4",
|
||||
source_file="test.yml",
|
||||
)
|
||||
|
||||
job = JobMeta(
|
||||
name="test",
|
||||
runs_on="ubuntu-latest",
|
||||
actions_used=[action_ref],
|
||||
)
|
||||
|
||||
workflow = WorkflowMeta(
|
||||
name="CI",
|
||||
path="ci.yml",
|
||||
triggers=["push"],
|
||||
jobs={"test": job},
|
||||
actions_used=[action_ref],
|
||||
)
|
||||
|
||||
action = ActionManifest(
|
||||
name="Checkout",
|
||||
description="Checkout code",
|
||||
)
|
||||
|
||||
renderer.render_markdown({"ci.yml": workflow}, {"actions/checkout@v4": action}, [], {})
|
||||
|
||||
report_file = tmp_path / "report.md"
|
||||
content = report_file.read_text()
|
||||
|
||||
# Should have action link in jobs section
|
||||
assert "Actions used:" in content
|
||||
assert "[actions/checkout](#actions-checkout) (GitHub)" in content
|
||||
|
||||
|
||||
def test_create_action_anchor() -> None:
|
||||
"""Test anchor creation from action keys."""
|
||||
# GitHub action
|
||||
assert Renderer._create_action_anchor("actions/checkout@abc123") == "actions-checkout"
|
||||
|
||||
# Local action
|
||||
assert Renderer._create_action_anchor("local:./sync-labels") == "local-sync-labels"
|
||||
|
||||
# Docker action
|
||||
assert Renderer._create_action_anchor("docker://alpine:3.8") == "docker-alpine-3-8"
|
||||
|
||||
# Long SHA
|
||||
assert (
|
||||
Renderer._create_action_anchor("actions/setup-node@1234567890abcdef1234567890abcdef12345678")
|
||||
== "actions-setup-node"
|
||||
)
|
||||
|
||||
|
||||
def test_get_action_repo_url() -> None:
|
||||
"""Test repository URL generation."""
|
||||
from ghaw_auditor.models import ActionRef, ActionType
|
||||
|
||||
# GitHub action
|
||||
github_action = ActionRef(
|
||||
type=ActionType.GITHUB,
|
||||
owner="actions",
|
||||
repo="checkout",
|
||||
ref="v4",
|
||||
source_file="test.yml",
|
||||
)
|
||||
assert Renderer._get_action_repo_url(github_action) == "https://github.com/actions/checkout"
|
||||
|
||||
# Local action (no URL)
|
||||
local_action = ActionRef(
|
||||
type=ActionType.LOCAL,
|
||||
path="./my-action",
|
||||
source_file="test.yml",
|
||||
)
|
||||
assert Renderer._get_action_repo_url(local_action) is None
|
||||
|
||||
# Docker action (no URL)
|
||||
docker_action = ActionRef(
|
||||
type=ActionType.DOCKER,
|
||||
path="docker://alpine:3.8",
|
||||
source_file="test.yml",
|
||||
)
|
||||
assert Renderer._get_action_repo_url(docker_action) is None
|
||||
|
||||
|
||||
def test_render_markdown_with_docker_action(tmp_path: Path) -> None:
|
||||
"""Test Markdown rendering with Docker action in jobs."""
|
||||
from ghaw_auditor.models import ActionRef, ActionType
|
||||
|
||||
renderer = Renderer(tmp_path)
|
||||
|
||||
docker_action = ActionRef(
|
||||
type=ActionType.DOCKER,
|
||||
path="docker://alpine:3.8",
|
||||
source_file="test.yml",
|
||||
)
|
||||
|
||||
job = JobMeta(
|
||||
name="test",
|
||||
runs_on="ubuntu-latest",
|
||||
actions_used=[docker_action],
|
||||
)
|
||||
|
||||
workflow = WorkflowMeta(
|
||||
name="Test",
|
||||
path="test.yml",
|
||||
triggers=["push"],
|
||||
jobs={"test": job},
|
||||
)
|
||||
|
||||
renderer.render_markdown({"test.yml": workflow}, {}, [], {})
|
||||
|
||||
report_file = tmp_path / "report.md"
|
||||
content = report_file.read_text()
|
||||
|
||||
# Should show Docker action with correct type label
|
||||
assert "Actions used:" in content
|
||||
assert "(Docker)" in content
|
||||
assert "docker://alpine:3.8" in content
|
||||
|
||||
|
||||
def test_render_markdown_with_reusable_workflow(tmp_path: Path) -> None:
|
||||
"""Test Markdown rendering with reusable workflow in jobs."""
|
||||
from ghaw_auditor.models import ActionRef, ActionType
|
||||
|
||||
renderer = Renderer(tmp_path)
|
||||
|
||||
reusable_wf = ActionRef(
|
||||
type=ActionType.REUSABLE_WORKFLOW,
|
||||
owner="org",
|
||||
repo="workflows",
|
||||
path=".github/workflows/reusable.yml",
|
||||
ref="main",
|
||||
source_file="test.yml",
|
||||
)
|
||||
|
||||
job = JobMeta(
|
||||
name="test",
|
||||
runs_on="ubuntu-latest",
|
||||
actions_used=[reusable_wf],
|
||||
)
|
||||
|
||||
workflow = WorkflowMeta(
|
||||
name="Test",
|
||||
path="test.yml",
|
||||
triggers=["push"],
|
||||
jobs={"test": job},
|
||||
)
|
||||
|
||||
renderer.render_markdown({"test.yml": workflow}, {}, [], {})
|
||||
|
||||
report_file = tmp_path / "report.md"
|
||||
content = report_file.read_text()
|
||||
|
||||
# Should show reusable workflow with correct type label
|
||||
assert "Actions used:" in content
|
||||
assert "(Reusable Workflow)" in content
|
||||
assert ".github/workflows/reusable.yml" in content
|
||||
|
||||
|
||||
def test_render_markdown_with_docker_action_in_inventory(tmp_path: Path) -> None:
|
||||
"""Test Markdown rendering with Docker action in inventory."""
|
||||
from ghaw_auditor.models import ActionRef, ActionType
|
||||
|
||||
renderer = Renderer(tmp_path)
|
||||
|
||||
docker_action_ref = ActionRef(
|
||||
type=ActionType.DOCKER,
|
||||
path="docker://node:18-alpine",
|
||||
source_file="test.yml",
|
||||
)
|
||||
|
||||
workflow = WorkflowMeta(
|
||||
name="Test",
|
||||
path="test.yml",
|
||||
triggers=["push"],
|
||||
jobs={},
|
||||
actions_used=[docker_action_ref],
|
||||
)
|
||||
|
||||
action_manifest = ActionManifest(
|
||||
name="Node Alpine",
|
||||
description="Node.js on Alpine Linux",
|
||||
)
|
||||
|
||||
renderer.render_markdown({"test.yml": workflow}, {"docker:docker://node:18-alpine": action_manifest}, [], {})
|
||||
|
||||
report_file = tmp_path / "report.md"
|
||||
content = report_file.read_text()
|
||||
|
||||
# Docker actions shouldn't have repository links or Local Action type
|
||||
assert "**Repository:**" not in content or "node:18-alpine" not in content
|
||||
assert "Node Alpine" in content
|
||||
|
||||
|
||||
def test_render_markdown_with_local_action_without_path(tmp_path: Path) -> None:
|
||||
"""Test Markdown rendering with LOCAL action that has no path."""
|
||||
from ghaw_auditor.models import ActionRef, ActionType
|
||||
|
||||
renderer = Renderer(tmp_path)
|
||||
|
||||
local_action = ActionRef(
|
||||
type=ActionType.LOCAL,
|
||||
path=None,
|
||||
source_file="test.yml",
|
||||
)
|
||||
|
||||
job = JobMeta(
|
||||
name="test",
|
||||
runs_on="ubuntu-latest",
|
||||
actions_used=[local_action],
|
||||
)
|
||||
|
||||
workflow = WorkflowMeta(
|
||||
name="Test",
|
||||
path="test.yml",
|
||||
triggers=["push"],
|
||||
jobs={"test": job},
|
||||
)
|
||||
|
||||
renderer.render_markdown({"test.yml": workflow}, {}, [], {})
|
||||
|
||||
report_file = tmp_path / "report.md"
|
||||
content = report_file.read_text()
|
||||
|
||||
# Should show "local" as display name when path is None
|
||||
assert "Actions used:" in content
|
||||
assert "[local](#local-none) (Local)" in content
|
||||
|
||||
|
||||
def test_render_markdown_with_local_action_in_inventory(tmp_path: Path) -> None:
|
||||
"""Test Markdown rendering with LOCAL action in inventory showing Type label."""
|
||||
from ghaw_auditor.models import ActionRef, ActionType
|
||||
|
||||
renderer = Renderer(tmp_path)
|
||||
|
||||
local_action_ref = ActionRef(
|
||||
type=ActionType.LOCAL,
|
||||
path="./my-custom-action",
|
||||
source_file="test.yml",
|
||||
)
|
||||
|
||||
workflow = WorkflowMeta(
|
||||
name="Test",
|
||||
path="test.yml",
|
||||
triggers=["push"],
|
||||
jobs={},
|
||||
actions_used=[local_action_ref],
|
||||
)
|
||||
|
||||
action_manifest = ActionManifest(
|
||||
name="My Custom Action",
|
||||
description="A custom local action",
|
||||
)
|
||||
|
||||
renderer.render_markdown({"test.yml": workflow}, {"local:./my-custom-action": action_manifest}, [], {})
|
||||
|
||||
report_file = tmp_path / "report.md"
|
||||
content = report_file.read_text()
|
||||
|
||||
# Local actions should have "Type: Local Action" label
|
||||
assert "**Type:** Local Action" in content
|
||||
assert "My Custom Action" in content
|
||||
|
||||
|
||||
def test_render_markdown_with_job_permissions(tmp_path: Path) -> None:
|
||||
"""Test Markdown rendering with job permissions."""
|
||||
from ghaw_auditor.models import PermissionLevel, Permissions
|
||||
|
||||
renderer = Renderer(tmp_path)
|
||||
|
||||
job = JobMeta(
|
||||
name="test",
|
||||
runs_on="ubuntu-latest",
|
||||
permissions=Permissions(
|
||||
contents=PermissionLevel.READ,
|
||||
issues=PermissionLevel.WRITE,
|
||||
security_events=PermissionLevel.WRITE,
|
||||
),
|
||||
)
|
||||
|
||||
workflow = WorkflowMeta(
|
||||
name="Test",
|
||||
path="test.yml",
|
||||
triggers=["push"],
|
||||
jobs={"test": job},
|
||||
)
|
||||
|
||||
renderer.render_markdown({"test.yml": workflow}, {}, [], {})
|
||||
|
||||
report_file = tmp_path / "report.md"
|
||||
content = report_file.read_text()
|
||||
|
||||
# Should show permissions
|
||||
assert "Permissions:" in content
|
||||
assert "`contents`: read" in content
|
||||
assert "`issues`: write" in content
|
||||
assert "`security-events`: write" in content
|
||||
|
||||
|
||||
def test_render_markdown_without_job_permissions(tmp_path: Path) -> None:
|
||||
"""Test Markdown rendering with job that has no permissions set."""
|
||||
renderer = Renderer(tmp_path)
|
||||
|
||||
job = JobMeta(
|
||||
name="test",
|
||||
runs_on="ubuntu-latest",
|
||||
permissions=None,
|
||||
)
|
||||
|
||||
workflow = WorkflowMeta(
|
||||
name="Test",
|
||||
path="test.yml",
|
||||
triggers=["push"],
|
||||
jobs={"test": job},
|
||||
)
|
||||
|
||||
renderer.render_markdown({"test.yml": workflow}, {}, [], {})
|
||||
|
||||
report_file = tmp_path / "report.md"
|
||||
content = report_file.read_text()
|
||||
|
||||
# Should not show permissions section
|
||||
assert "Permissions:" not in content
|
||||
|
||||
|
||||
def test_render_markdown_with_workflows_using_action(tmp_path: Path) -> None:
|
||||
"""Test that actions show which workflows use them."""
|
||||
from ghaw_auditor.models import ActionRef, ActionType
|
||||
|
||||
renderer = Renderer(tmp_path)
|
||||
|
||||
# Create an action reference
|
||||
action_ref = ActionRef(
|
||||
type=ActionType.GITHUB,
|
||||
owner="actions",
|
||||
repo="checkout",
|
||||
ref="v4",
|
||||
source_file=".github/workflows/ci.yml",
|
||||
)
|
||||
|
||||
# Create two workflows that use the same action
|
||||
workflow1 = WorkflowMeta(
|
||||
name="CI Workflow",
|
||||
path=".github/workflows/ci.yml",
|
||||
triggers=["push"],
|
||||
actions_used=[action_ref],
|
||||
)
|
||||
|
||||
workflow2 = WorkflowMeta(
|
||||
name="Deploy Workflow",
|
||||
path=".github/workflows/deploy.yml",
|
||||
triggers=["push"],
|
||||
actions_used=[action_ref],
|
||||
)
|
||||
|
||||
# Create the action manifest
|
||||
action = ActionManifest(
|
||||
name="Checkout",
|
||||
description="Checkout repository",
|
||||
)
|
||||
|
||||
workflows = {
|
||||
".github/workflows/ci.yml": workflow1,
|
||||
".github/workflows/deploy.yml": workflow2,
|
||||
}
|
||||
actions = {"actions/checkout@v4": action}
|
||||
|
||||
renderer.render_markdown(workflows, actions, [], {})
|
||||
|
||||
report_file = tmp_path / "report.md"
|
||||
content = report_file.read_text()
|
||||
|
||||
# Should show "Used in Workflows" section
|
||||
assert "Used in Workflows" in content
|
||||
assert "CI Workflow" in content
|
||||
assert "Deploy Workflow" in content
|
||||
assert ".github/workflows/ci.yml" in content
|
||||
assert ".github/workflows/deploy.yml" in content
|
||||
# Should have links to workflow sections
|
||||
assert "[CI Workflow](#ci-workflow)" in content
|
||||
assert "[Deploy Workflow](#deploy-workflow)" in content
|
||||
531
tests/test_resolver.py
Normal file
531
tests/test_resolver.py
Normal file
@@ -0,0 +1,531 @@
|
||||
"""Tests for resolver with mocked API."""
|
||||
|
||||
from pathlib import Path
|
||||
from unittest.mock import Mock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from ghaw_auditor.cache import Cache
|
||||
from ghaw_auditor.github_client import GitHubClient
|
||||
from ghaw_auditor.models import ActionRef, ActionType
|
||||
from ghaw_auditor.resolver import Resolver
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_github_client() -> Mock:
|
||||
"""Create mock GitHub client."""
|
||||
client = Mock(spec=GitHubClient)
|
||||
client.get_ref_sha.return_value = "abc123def456"
|
||||
client.get_file_content.return_value = """
|
||||
name: Test Action
|
||||
description: A test action
|
||||
runs:
|
||||
using: node20
|
||||
main: index.js
|
||||
"""
|
||||
return client
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def temp_cache(tmp_path: Path) -> Cache:
|
||||
"""Create temporary cache."""
|
||||
return Cache(tmp_path / "cache")
|
||||
|
||||
|
||||
def test_resolver_initialization(mock_github_client: Mock, temp_cache: Cache, tmp_path: Path) -> None:
|
||||
"""Test resolver initialization."""
|
||||
resolver = Resolver(mock_github_client, temp_cache, tmp_path)
|
||||
assert resolver.github_client == mock_github_client
|
||||
assert resolver.cache == temp_cache
|
||||
assert resolver.repo_path == tmp_path
|
||||
|
||||
|
||||
def test_resolve_github_action(mock_github_client: Mock, temp_cache: Cache, tmp_path: Path) -> None:
|
||||
"""Test resolving GitHub action."""
|
||||
resolver = Resolver(mock_github_client, temp_cache, tmp_path)
|
||||
|
||||
action = ActionRef(
|
||||
type=ActionType.GITHUB,
|
||||
owner="actions",
|
||||
repo="checkout",
|
||||
ref="v4",
|
||||
source_file="test.yml",
|
||||
)
|
||||
|
||||
key, manifest = resolver._resolve_github_action(action)
|
||||
|
||||
assert key == "actions/checkout@abc123def456"
|
||||
assert manifest is not None
|
||||
assert manifest.name == "Test Action"
|
||||
assert action.resolved_sha == "abc123def456"
|
||||
|
||||
|
||||
def test_resolve_local_action(mock_github_client: Mock, temp_cache: Cache, tmp_path: Path) -> None:
|
||||
"""Test resolving local action."""
|
||||
# Create local action
|
||||
action_dir = tmp_path / ".github" / "actions" / "custom"
|
||||
action_dir.mkdir(parents=True)
|
||||
action_file = action_dir / "action.yml"
|
||||
|
||||
# Write valid composite action YAML
|
||||
action_file.write_text(
|
||||
"""name: Custom Action
|
||||
description: Local action
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Test step
|
||||
run: echo test
|
||||
shell: bash
|
||||
"""
|
||||
)
|
||||
|
||||
resolver = Resolver(mock_github_client, temp_cache, tmp_path)
|
||||
|
||||
action = ActionRef(
|
||||
type=ActionType.LOCAL,
|
||||
path="./.github/actions/custom", # With leading ./
|
||||
source_file="test.yml",
|
||||
)
|
||||
|
||||
key, manifest = resolver._resolve_local_action(action)
|
||||
|
||||
assert key == "local:./.github/actions/custom"
|
||||
assert manifest is not None
|
||||
assert manifest.name == "Custom Action"
|
||||
assert manifest.is_composite is True
|
||||
|
||||
|
||||
def test_resolve_docker_action(mock_github_client: Mock, temp_cache: Cache, tmp_path: Path) -> None:
|
||||
"""Test resolving Docker action."""
|
||||
resolver = Resolver(mock_github_client, temp_cache, tmp_path)
|
||||
|
||||
action = ActionRef(
|
||||
type=ActionType.DOCKER,
|
||||
path="docker://alpine:3.8",
|
||||
source_file="test.yml",
|
||||
)
|
||||
|
||||
key, manifest = resolver._resolve_action(action)
|
||||
|
||||
assert key == "docker:docker://alpine:3.8"
|
||||
assert manifest is None # Docker actions don't have manifests
|
||||
|
||||
|
||||
def test_resolve_actions_parallel(mock_github_client: Mock, temp_cache: Cache, tmp_path: Path) -> None:
|
||||
"""Test parallel action resolution."""
|
||||
resolver = Resolver(mock_github_client, temp_cache, tmp_path, concurrency=2)
|
||||
|
||||
actions = [
|
||||
ActionRef(
|
||||
type=ActionType.GITHUB,
|
||||
owner="actions",
|
||||
repo="checkout",
|
||||
ref="v4",
|
||||
source_file="test.yml",
|
||||
),
|
||||
ActionRef(
|
||||
type=ActionType.GITHUB,
|
||||
owner="actions",
|
||||
repo="setup-node",
|
||||
ref="v4",
|
||||
source_file="test.yml",
|
||||
),
|
||||
]
|
||||
|
||||
resolved = resolver.resolve_actions(actions)
|
||||
|
||||
assert len(resolved) == 2
|
||||
assert mock_github_client.get_ref_sha.call_count == 2
|
||||
|
||||
|
||||
def test_resolve_action_with_cache(mock_github_client: Mock, temp_cache: Cache, tmp_path: Path) -> None:
|
||||
"""Test action resolution with caching."""
|
||||
resolver = Resolver(mock_github_client, temp_cache, tmp_path)
|
||||
|
||||
action = ActionRef(
|
||||
type=ActionType.GITHUB,
|
||||
owner="actions",
|
||||
repo="checkout",
|
||||
ref="v4",
|
||||
source_file="test.yml",
|
||||
)
|
||||
|
||||
# First call
|
||||
key1, manifest1 = resolver._resolve_github_action(action)
|
||||
|
||||
# Reset mock
|
||||
mock_github_client.reset_mock()
|
||||
|
||||
# Second call should use cache
|
||||
key2, manifest2 = resolver._resolve_github_action(action)
|
||||
|
||||
assert key1 == key2
|
||||
# Cache should reduce API calls
|
||||
assert mock_github_client.get_ref_sha.call_count <= 1
|
||||
|
||||
|
||||
def test_resolve_action_api_error(mock_github_client: Mock, temp_cache: Cache, tmp_path: Path) -> None:
|
||||
"""Test handling API errors."""
|
||||
mock_github_client.get_ref_sha.side_effect = Exception("API Error")
|
||||
|
||||
resolver = Resolver(mock_github_client, temp_cache, tmp_path)
|
||||
|
||||
action = ActionRef(
|
||||
type=ActionType.GITHUB,
|
||||
owner="actions",
|
||||
repo="checkout",
|
||||
ref="v4",
|
||||
source_file="test.yml",
|
||||
)
|
||||
|
||||
key, manifest = resolver._resolve_github_action(action)
|
||||
|
||||
assert key == ""
|
||||
assert manifest is None
|
||||
|
||||
|
||||
def test_resolve_monorepo_action(mock_github_client: Mock, temp_cache: Cache, tmp_path: Path) -> None:
|
||||
"""Test resolving monorepo action with path."""
|
||||
resolver = Resolver(mock_github_client, temp_cache, tmp_path)
|
||||
|
||||
action = ActionRef(
|
||||
type=ActionType.GITHUB,
|
||||
owner="owner",
|
||||
repo="repo",
|
||||
path="subdir/action",
|
||||
ref="v1",
|
||||
source_file="test.yml",
|
||||
)
|
||||
|
||||
key, manifest = resolver._resolve_github_action(action)
|
||||
|
||||
# Should try to fetch subdir/action/action.yml
|
||||
mock_github_client.get_file_content.assert_called_with("owner", "repo", "subdir/action/action.yml", "abc123def456")
|
||||
|
||||
|
||||
def test_resolve_action_unknown_type(mock_github_client: Mock, temp_cache: Cache, tmp_path: Path) -> None:
|
||||
"""Test resolving action with unknown type returns empty."""
|
||||
from ghaw_auditor.models import ActionType
|
||||
|
||||
resolver = Resolver(mock_github_client, temp_cache, tmp_path)
|
||||
|
||||
# Create action with REUSABLE_WORKFLOW type (not handled by resolver)
|
||||
action = ActionRef(
|
||||
type=ActionType.REUSABLE_WORKFLOW,
|
||||
owner="owner",
|
||||
repo="repo",
|
||||
path=".github/workflows/test.yml",
|
||||
ref="v1",
|
||||
source_file="test.yml",
|
||||
)
|
||||
|
||||
key, manifest = resolver._resolve_action(action)
|
||||
|
||||
assert key == ""
|
||||
assert manifest is None
|
||||
|
||||
|
||||
def test_resolve_local_action_no_path(mock_github_client: Mock, temp_cache: Cache, tmp_path: Path) -> None:
|
||||
"""Test resolving local action without path."""
|
||||
resolver = Resolver(mock_github_client, temp_cache, tmp_path)
|
||||
|
||||
action = ActionRef(
|
||||
type=ActionType.LOCAL,
|
||||
path=None,
|
||||
source_file="test.yml",
|
||||
)
|
||||
|
||||
key, manifest = resolver._resolve_local_action(action)
|
||||
|
||||
assert key == ""
|
||||
assert manifest is None
|
||||
|
||||
|
||||
def test_resolve_local_action_not_found(mock_github_client: Mock, temp_cache: Cache, tmp_path: Path) -> None:
|
||||
"""Test resolving local action that doesn't exist."""
|
||||
resolver = Resolver(mock_github_client, temp_cache, tmp_path)
|
||||
|
||||
action = ActionRef(
|
||||
type=ActionType.LOCAL,
|
||||
path="./.github/actions/nonexistent",
|
||||
source_file="test.yml",
|
||||
)
|
||||
|
||||
key, manifest = resolver._resolve_local_action(action)
|
||||
|
||||
assert key == ""
|
||||
assert manifest is None
|
||||
|
||||
|
||||
def test_resolve_local_action_invalid_yaml(mock_github_client: Mock, temp_cache: Cache, tmp_path: Path) -> None:
|
||||
"""Test resolving local action with invalid YAML."""
|
||||
action_dir = tmp_path / ".github" / "actions" / "broken"
|
||||
action_dir.mkdir(parents=True)
|
||||
action_file = action_dir / "action.yml"
|
||||
action_file.write_text("invalid: yaml: content: {{{")
|
||||
|
||||
resolver = Resolver(mock_github_client, temp_cache, tmp_path)
|
||||
|
||||
action = ActionRef(
|
||||
type=ActionType.LOCAL,
|
||||
path="./.github/actions/broken",
|
||||
source_file="test.yml",
|
||||
)
|
||||
|
||||
key, manifest = resolver._resolve_local_action(action)
|
||||
|
||||
# Should handle parse error gracefully
|
||||
assert key == ""
|
||||
assert manifest is None
|
||||
|
||||
|
||||
def test_resolve_github_action_missing_fields(mock_github_client: Mock, temp_cache: Cache, tmp_path: Path) -> None:
|
||||
"""Test resolving GitHub action with missing required fields."""
|
||||
resolver = Resolver(mock_github_client, temp_cache, tmp_path)
|
||||
|
||||
# Missing owner
|
||||
action = ActionRef(
|
||||
type=ActionType.GITHUB,
|
||||
owner=None,
|
||||
repo="checkout",
|
||||
ref="v4",
|
||||
source_file="test.yml",
|
||||
)
|
||||
|
||||
key, manifest = resolver._resolve_github_action(action)
|
||||
|
||||
assert key == ""
|
||||
assert manifest is None
|
||||
|
||||
|
||||
def test_resolve_github_action_manifest_not_found(mock_github_client: Mock, temp_cache: Cache, tmp_path: Path) -> None:
|
||||
"""Test resolving GitHub action when manifest cannot be fetched."""
|
||||
# Setup mock to fail fetching manifest
|
||||
mock_github_client.get_ref_sha.return_value = "abc123"
|
||||
mock_github_client.get_file_content.side_effect = Exception("404 Not Found")
|
||||
|
||||
resolver = Resolver(mock_github_client, temp_cache, tmp_path)
|
||||
|
||||
action = ActionRef(
|
||||
type=ActionType.GITHUB,
|
||||
owner="actions",
|
||||
repo="missing",
|
||||
ref="v1",
|
||||
source_file="test.yml",
|
||||
)
|
||||
|
||||
key, manifest = resolver._resolve_github_action(action)
|
||||
|
||||
# Should return key but no manifest
|
||||
assert "actions/missing@abc123" in key
|
||||
assert manifest is None
|
||||
|
||||
|
||||
def test_resolve_monorepo_action_manifest_not_found(
|
||||
mock_github_client: Mock, temp_cache: Cache, tmp_path: Path, caplog: pytest.LogCaptureFixture
|
||||
) -> None:
|
||||
"""Test resolving monorepo action when manifest cannot be fetched."""
|
||||
import logging
|
||||
|
||||
# Setup mock to fail fetching manifest for both .yml and .yaml
|
||||
mock_github_client.get_ref_sha.return_value = "abc123"
|
||||
mock_github_client.get_file_content.side_effect = Exception("404 Not Found")
|
||||
|
||||
resolver = Resolver(mock_github_client, temp_cache, tmp_path)
|
||||
|
||||
action = ActionRef(
|
||||
type=ActionType.GITHUB,
|
||||
owner="owner",
|
||||
repo="repo",
|
||||
path="subdir/action",
|
||||
ref="v1",
|
||||
source_file="test.yml",
|
||||
)
|
||||
|
||||
with caplog.at_level(logging.ERROR):
|
||||
key, manifest = resolver._resolve_github_action(action)
|
||||
|
||||
# Should return key but no manifest
|
||||
assert "owner/repo@abc123" in key
|
||||
assert manifest is None
|
||||
# Should log error with path
|
||||
assert "owner/repo/subdir/action" in caplog.text
|
||||
assert "(tried action.yml and action.yaml)" in caplog.text
|
||||
|
||||
|
||||
def test_resolve_github_action_invalid_manifest(mock_github_client: Mock, temp_cache: Cache, tmp_path: Path) -> None:
|
||||
"""Test resolving GitHub action with invalid manifest content."""
|
||||
# Setup mock to return invalid YAML
|
||||
mock_github_client.get_ref_sha.return_value = "abc123"
|
||||
mock_github_client.get_file_content.return_value = "invalid: yaml: {{{: bad"
|
||||
|
||||
resolver = Resolver(mock_github_client, temp_cache, tmp_path)
|
||||
|
||||
action = ActionRef(
|
||||
type=ActionType.GITHUB,
|
||||
owner="actions",
|
||||
repo="broken",
|
||||
ref="v1",
|
||||
source_file="test.yml",
|
||||
)
|
||||
|
||||
key, manifest = resolver._resolve_github_action(action)
|
||||
|
||||
# Should handle parse error gracefully
|
||||
assert key == ""
|
||||
assert manifest is None
|
||||
|
||||
|
||||
def test_resolve_actions_with_exception(mock_github_client: Mock, temp_cache: Cache, tmp_path: Path) -> None:
|
||||
"""Test parallel resolution handles exceptions gracefully."""
|
||||
|
||||
# Setup one action to succeed, one to fail
|
||||
def side_effect_get_ref(owner: str, repo: str, ref: str) -> str:
|
||||
if repo == "fail":
|
||||
raise Exception("API Error")
|
||||
return "abc123"
|
||||
|
||||
mock_github_client.get_ref_sha.side_effect = side_effect_get_ref
|
||||
|
||||
resolver = Resolver(mock_github_client, temp_cache, tmp_path, concurrency=2)
|
||||
|
||||
actions = [
|
||||
ActionRef(
|
||||
type=ActionType.GITHUB,
|
||||
owner="actions",
|
||||
repo="checkout",
|
||||
ref="v4",
|
||||
source_file="test.yml",
|
||||
),
|
||||
ActionRef(
|
||||
type=ActionType.GITHUB,
|
||||
owner="actions",
|
||||
repo="fail",
|
||||
ref="v1",
|
||||
source_file="test.yml",
|
||||
),
|
||||
]
|
||||
|
||||
resolved = resolver.resolve_actions(actions)
|
||||
|
||||
# Should only resolve the successful one
|
||||
assert len(resolved) == 1
|
||||
assert "actions/checkout" in list(resolved.keys())[0]
|
||||
|
||||
|
||||
def test_resolve_actions_logs_exception(mock_github_client: Mock, temp_cache: Cache, tmp_path: Path) -> None:
|
||||
"""Test that exceptions during resolution are logged."""
|
||||
resolver = Resolver(mock_github_client, temp_cache, tmp_path)
|
||||
|
||||
# Patch _resolve_action to raise an exception directly
|
||||
# This will propagate to future.result() and trigger the exception handler
|
||||
with patch.object(resolver, "_resolve_action", side_effect=RuntimeError("Unexpected error")):
|
||||
actions = [
|
||||
ActionRef(
|
||||
type=ActionType.GITHUB,
|
||||
owner="actions",
|
||||
repo="broken",
|
||||
ref="v1",
|
||||
source_file="test.yml",
|
||||
),
|
||||
]
|
||||
|
||||
resolved = resolver.resolve_actions(actions)
|
||||
|
||||
# Should handle exception gracefully and log error
|
||||
assert len(resolved) == 0
|
||||
|
||||
|
||||
def test_resolve_local_action_file_path_parse_error(
|
||||
mock_github_client: Mock, temp_cache: Cache, tmp_path: Path
|
||||
) -> None:
|
||||
"""Test resolving local action when file path parsing fails."""
|
||||
# Create a directory with invalid action.yml
|
||||
action_dir = tmp_path / "my-action"
|
||||
action_dir.mkdir()
|
||||
action_file = action_dir / "action.yml"
|
||||
action_file.write_text("invalid: yaml: content: {{{")
|
||||
|
||||
resolver = Resolver(mock_github_client, temp_cache, tmp_path)
|
||||
|
||||
# Reference a file that starts with "action." so parent = action_path.parent
|
||||
# This triggers the else branch where we look in parent directory
|
||||
action = ActionRef(
|
||||
type=ActionType.LOCAL,
|
||||
path="./my-action/action.custom.yml",
|
||||
source_file="test.yml",
|
||||
)
|
||||
|
||||
key, manifest = resolver._resolve_local_action(action)
|
||||
|
||||
# Should handle parse error in file path branch (else branch)
|
||||
# The code will look in parent (my-action/) for action.yml and fail to parse
|
||||
assert key == ""
|
||||
assert manifest is None
|
||||
|
||||
|
||||
def test_resolve_action_local_type(mock_github_client: Mock, temp_cache: Cache, tmp_path: Path) -> None:
|
||||
"""Test _resolve_action with LOCAL action type."""
|
||||
# Create valid local action
|
||||
action_dir = tmp_path / "my-action"
|
||||
action_dir.mkdir()
|
||||
action_file = action_dir / "action.yml"
|
||||
action_file.write_text("""
|
||||
name: My Action
|
||||
description: Test action
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- run: echo test
|
||||
shell: bash
|
||||
""")
|
||||
|
||||
resolver = Resolver(mock_github_client, temp_cache, tmp_path)
|
||||
|
||||
action = ActionRef(
|
||||
type=ActionType.LOCAL,
|
||||
path="./my-action",
|
||||
source_file="test.yml",
|
||||
)
|
||||
|
||||
# Call _resolve_action to hit the LOCAL branch
|
||||
key, manifest = resolver._resolve_action(action)
|
||||
|
||||
assert key == "local:./my-action"
|
||||
assert manifest is not None
|
||||
assert manifest.name == "My Action"
|
||||
|
||||
|
||||
def test_resolve_local_action_file_path_success(mock_github_client: Mock, temp_cache: Cache, tmp_path: Path) -> None:
|
||||
"""Test resolving local action via file path (else branch) with valid YAML."""
|
||||
# Create a directory with valid action.yml
|
||||
action_dir = tmp_path / "my-action"
|
||||
action_dir.mkdir()
|
||||
action_file = action_dir / "action.yml"
|
||||
action_file.write_text("""
|
||||
name: File Path Action
|
||||
description: Test action via file path
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- run: echo test
|
||||
shell: bash
|
||||
""")
|
||||
|
||||
resolver = Resolver(mock_github_client, temp_cache, tmp_path)
|
||||
|
||||
# Reference a file that starts with "action." to trigger else branch
|
||||
# with parent = action_path.parent
|
||||
action = ActionRef(
|
||||
type=ActionType.LOCAL,
|
||||
path="./my-action/action.yml",
|
||||
source_file="test.yml",
|
||||
)
|
||||
|
||||
key, manifest = resolver._resolve_local_action(action)
|
||||
|
||||
# Should successfully parse from parent directory
|
||||
assert key == "local:./my-action/action.yml"
|
||||
assert manifest is not None
|
||||
assert manifest.name == "File Path Action"
|
||||
205
tests/test_scanner.py
Normal file
205
tests/test_scanner.py
Normal file
@@ -0,0 +1,205 @@
|
||||
"""Tests for scanner module."""
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
from ghaw_auditor.scanner import Scanner
|
||||
|
||||
|
||||
def test_scanner_initialization() -> None:
|
||||
"""Test scanner can be initialized."""
|
||||
scanner = Scanner(".")
|
||||
assert scanner.repo_path.exists()
|
||||
|
||||
|
||||
def test_scanner_initialization_with_exclusions() -> None:
|
||||
"""Test scanner initialization with exclusion patterns."""
|
||||
scanner = Scanner(".", exclude_patterns=["**/node_modules/**", "**/dist/**"])
|
||||
assert len(scanner.exclude_patterns) == 2
|
||||
assert "**/node_modules/**" in scanner.exclude_patterns
|
||||
|
||||
|
||||
def test_scanner_should_exclude(tmp_path: Path) -> None:
|
||||
"""Test exclusion pattern matching."""
|
||||
# Note: glob patterns need to match the full path including files
|
||||
scanner = Scanner(tmp_path, exclude_patterns=["node_modules/**/*", ".git/**/*"])
|
||||
|
||||
# Create test directories and files
|
||||
node_modules_path = tmp_path / "node_modules" / "test" / "action.yml"
|
||||
node_modules_path.parent.mkdir(parents=True)
|
||||
node_modules_path.touch()
|
||||
|
||||
git_path = tmp_path / ".git" / "hooks" / "pre-commit"
|
||||
git_path.parent.mkdir(parents=True)
|
||||
git_path.touch()
|
||||
|
||||
valid_path = tmp_path / ".github" / "actions" / "test" / "action.yml"
|
||||
valid_path.parent.mkdir(parents=True)
|
||||
valid_path.touch()
|
||||
|
||||
# Test exclusions
|
||||
assert scanner._should_exclude(node_modules_path) is True
|
||||
assert scanner._should_exclude(git_path) is True
|
||||
assert scanner._should_exclude(valid_path) is False
|
||||
|
||||
|
||||
def test_find_workflows_empty_dir(tmp_path: Path) -> None:
|
||||
"""Test finding workflows in empty directory."""
|
||||
scanner = Scanner(tmp_path)
|
||||
workflows = scanner.find_workflows()
|
||||
assert len(workflows) == 0
|
||||
|
||||
|
||||
def test_find_workflows_with_files(tmp_path: Path) -> None:
|
||||
"""Test finding workflow files."""
|
||||
# Create workflow directory
|
||||
workflows_dir = tmp_path / ".github" / "workflows"
|
||||
workflows_dir.mkdir(parents=True)
|
||||
|
||||
# Create workflow files
|
||||
(workflows_dir / "ci.yml").write_text("name: CI\non: push")
|
||||
(workflows_dir / "release.yaml").write_text("name: Release\non: push")
|
||||
(workflows_dir / "README.md").write_text("# Workflows") # Should be ignored
|
||||
|
||||
scanner = Scanner(tmp_path)
|
||||
workflows = scanner.find_workflows()
|
||||
|
||||
assert len(workflows) == 2
|
||||
assert workflows[0].name == "ci.yml"
|
||||
assert workflows[1].name == "release.yaml"
|
||||
|
||||
|
||||
def test_find_workflows_with_exclusions(tmp_path: Path) -> None:
|
||||
"""Test finding workflows with exclusion patterns."""
|
||||
# Create workflow directory
|
||||
workflows_dir = tmp_path / ".github" / "workflows"
|
||||
workflows_dir.mkdir(parents=True)
|
||||
|
||||
# Create workflow files
|
||||
(workflows_dir / "ci.yml").write_text("name: CI")
|
||||
(workflows_dir / "test.yml").write_text("name: Test")
|
||||
|
||||
scanner = Scanner(tmp_path, exclude_patterns=["**test.yml"])
|
||||
workflows = scanner.find_workflows()
|
||||
|
||||
assert len(workflows) == 1
|
||||
assert workflows[0].name == "ci.yml"
|
||||
|
||||
|
||||
def test_find_actions_empty_dir(tmp_path: Path) -> None:
|
||||
"""Test finding actions in empty directory."""
|
||||
scanner = Scanner(tmp_path)
|
||||
actions = scanner.find_actions()
|
||||
assert len(actions) == 0
|
||||
|
||||
|
||||
def test_find_actions_in_github_directory(tmp_path: Path) -> None:
|
||||
"""Test finding actions in .github/actions directory."""
|
||||
# Create actions directory
|
||||
actions_dir = tmp_path / ".github" / "actions"
|
||||
|
||||
# Create multiple actions
|
||||
action1_dir = actions_dir / "action1"
|
||||
action1_dir.mkdir(parents=True)
|
||||
(action1_dir / "action.yml").write_text("name: Action 1")
|
||||
|
||||
action2_dir = actions_dir / "action2"
|
||||
action2_dir.mkdir(parents=True)
|
||||
(action2_dir / "action.yaml").write_text("name: Action 2")
|
||||
|
||||
# Create nested action
|
||||
nested_dir = actions_dir / "group" / "nested"
|
||||
nested_dir.mkdir(parents=True)
|
||||
(nested_dir / "action.yml").write_text("name: Nested Action")
|
||||
|
||||
scanner = Scanner(tmp_path)
|
||||
actions = scanner.find_actions()
|
||||
|
||||
assert len(actions) == 3
|
||||
assert any("action1" in str(a) for a in actions)
|
||||
assert any("action2" in str(a) for a in actions)
|
||||
assert any("nested" in str(a) for a in actions)
|
||||
|
||||
|
||||
def test_find_actions_in_root(tmp_path: Path) -> None:
|
||||
"""Test finding action in root directory."""
|
||||
# Create action in root
|
||||
(tmp_path / "action.yml").write_text("name: Root Action")
|
||||
|
||||
scanner = Scanner(tmp_path)
|
||||
actions = scanner.find_actions()
|
||||
|
||||
assert len(actions) == 1
|
||||
assert actions[0].name == "action.yml"
|
||||
|
||||
|
||||
def test_find_actions_excludes_workflows_dir(tmp_path: Path) -> None:
|
||||
"""Test that actions in workflows directory are excluded."""
|
||||
# Create workflow directory with action file (should be ignored)
|
||||
workflows_dir = tmp_path / ".github" / "workflows"
|
||||
workflows_dir.mkdir(parents=True)
|
||||
(workflows_dir / "action.yml").write_text("name: Not an action")
|
||||
|
||||
# Create real action
|
||||
actions_dir = tmp_path / ".github" / "actions" / "real"
|
||||
actions_dir.mkdir(parents=True)
|
||||
(actions_dir / "action.yml").write_text("name: Real Action")
|
||||
|
||||
scanner = Scanner(tmp_path)
|
||||
actions = scanner.find_actions()
|
||||
|
||||
# Should only find the action in .github/actions, not in workflows
|
||||
assert len(actions) == 1
|
||||
assert "actions/real" in str(actions[0])
|
||||
|
||||
|
||||
def test_find_actions_with_exclusions(tmp_path: Path) -> None:
|
||||
"""Test finding actions with exclusion patterns."""
|
||||
# Create actions
|
||||
actions_dir = tmp_path / ".github" / "actions"
|
||||
|
||||
action1_dir = actions_dir / "include-me"
|
||||
action1_dir.mkdir(parents=True)
|
||||
(action1_dir / "action.yml").write_text("name: Include")
|
||||
|
||||
action2_dir = actions_dir / "exclude-me"
|
||||
action2_dir.mkdir(parents=True)
|
||||
(action2_dir / "action.yml").write_text("name: Exclude")
|
||||
|
||||
scanner = Scanner(tmp_path, exclude_patterns=["**/exclude-me/**"])
|
||||
actions = scanner.find_actions()
|
||||
|
||||
assert len(actions) == 1
|
||||
assert "include-me" in str(actions[0])
|
||||
|
||||
|
||||
def test_find_actions_deduplication(tmp_path: Path) -> None:
|
||||
"""Test that duplicate actions are not included."""
|
||||
# Create action in .github/actions
|
||||
actions_dir = tmp_path / ".github" / "actions" / "my-action"
|
||||
actions_dir.mkdir(parents=True)
|
||||
action_file = actions_dir / "action.yml"
|
||||
action_file.write_text("name: My Action")
|
||||
|
||||
scanner = Scanner(tmp_path)
|
||||
actions = scanner.find_actions()
|
||||
|
||||
# Should find it exactly once
|
||||
assert len(actions) == 1
|
||||
assert actions[0] == action_file
|
||||
|
||||
|
||||
def test_find_actions_monorepo_structure(tmp_path: Path) -> None:
|
||||
"""Test finding actions in monorepo with multiple root-level action directories."""
|
||||
# Create monorepo structure: ./action1/, ./action2/, etc.
|
||||
for name in ["sync-labels", "deploy-action", "test-action"]:
|
||||
action_dir = tmp_path / name
|
||||
action_dir.mkdir()
|
||||
(action_dir / "action.yml").write_text(f"name: {name}\ndescription: Test action")
|
||||
|
||||
scanner = Scanner(tmp_path)
|
||||
actions = scanner.find_actions()
|
||||
|
||||
assert len(actions) == 3
|
||||
assert any("sync-labels" in str(a) for a in actions)
|
||||
assert any("deploy-action" in str(a) for a in actions)
|
||||
assert any("test-action" in str(a) for a in actions)
|
||||
227
tests/test_services.py
Normal file
227
tests/test_services.py
Normal file
@@ -0,0 +1,227 @@
|
||||
"""Tests for service layer."""
|
||||
|
||||
from pathlib import Path
|
||||
from unittest.mock import Mock
|
||||
|
||||
import pytest
|
||||
|
||||
from ghaw_auditor.analyzer import Analyzer
|
||||
from ghaw_auditor.differ import Differ
|
||||
from ghaw_auditor.models import (
|
||||
ActionManifest,
|
||||
Policy,
|
||||
WorkflowMeta,
|
||||
)
|
||||
from ghaw_auditor.parser import Parser
|
||||
from ghaw_auditor.policy import PolicyValidator
|
||||
from ghaw_auditor.scanner import Scanner
|
||||
from ghaw_auditor.services import AuditService, DiffService
|
||||
|
||||
|
||||
def test_audit_service_scan_basic(tmp_path: Path) -> None:
|
||||
"""Test basic scan without workflows."""
|
||||
scanner = Scanner(tmp_path)
|
||||
parser = Parser(tmp_path)
|
||||
analyzer = Analyzer()
|
||||
|
||||
service = AuditService(scanner, parser, analyzer)
|
||||
result = service.scan(offline=True)
|
||||
|
||||
assert result.workflow_count == 0
|
||||
assert result.action_count == 0
|
||||
assert result.unique_action_count == 0
|
||||
assert len(result.workflows) == 0
|
||||
assert len(result.actions) == 0
|
||||
assert len(result.violations) == 0
|
||||
|
||||
|
||||
def test_audit_service_scan_with_workflow(tmp_path: Path) -> None:
|
||||
"""Test scan with a simple workflow."""
|
||||
# Create test workflow
|
||||
workflows_dir = tmp_path / ".github" / "workflows"
|
||||
workflows_dir.mkdir(parents=True)
|
||||
(workflows_dir / "ci.yml").write_text(
|
||||
"""
|
||||
name: CI
|
||||
on: push
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
"""
|
||||
)
|
||||
|
||||
scanner = Scanner(tmp_path)
|
||||
parser = Parser(tmp_path)
|
||||
analyzer = Analyzer()
|
||||
|
||||
service = AuditService(scanner, parser, analyzer)
|
||||
result = service.scan(offline=True)
|
||||
|
||||
assert result.workflow_count == 1
|
||||
assert len(result.workflows) == 1
|
||||
assert ".github/workflows/ci.yml" in result.workflows
|
||||
assert result.unique_action_count == 1
|
||||
|
||||
|
||||
def test_audit_service_scan_with_policy_violations(tmp_path: Path) -> None:
|
||||
"""Test scan with policy violations."""
|
||||
# Create workflow with branch ref (violates pinning policy)
|
||||
workflows_dir = tmp_path / ".github" / "workflows"
|
||||
workflows_dir.mkdir(parents=True)
|
||||
(workflows_dir / "ci.yml").write_text(
|
||||
"""
|
||||
name: CI
|
||||
on: push
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@main
|
||||
"""
|
||||
)
|
||||
|
||||
scanner = Scanner(tmp_path)
|
||||
parser = Parser(tmp_path)
|
||||
analyzer = Analyzer()
|
||||
policy = Policy(require_pinned_actions=True)
|
||||
validator = PolicyValidator(policy)
|
||||
|
||||
service = AuditService(scanner, parser, analyzer, validator=validator)
|
||||
result = service.scan(offline=True)
|
||||
|
||||
assert len(result.violations) > 0
|
||||
assert any("pinned" in v["message"].lower() for v in result.violations)
|
||||
|
||||
|
||||
def test_audit_service_scan_parse_error(tmp_path: Path) -> None:
|
||||
"""Test scan handles parse errors gracefully."""
|
||||
# Create invalid workflow
|
||||
workflows_dir = tmp_path / ".github" / "workflows"
|
||||
workflows_dir.mkdir(parents=True)
|
||||
(workflows_dir / "invalid.yml").write_text("invalid: yaml: {{{")
|
||||
|
||||
scanner = Scanner(tmp_path)
|
||||
parser = Parser(tmp_path)
|
||||
analyzer = Analyzer()
|
||||
|
||||
service = AuditService(scanner, parser, analyzer)
|
||||
result = service.scan(offline=True)
|
||||
|
||||
# Should continue despite parse error
|
||||
assert result.workflow_count == 1
|
||||
assert len(result.workflows) == 0 # Workflow not parsed
|
||||
|
||||
|
||||
def test_audit_service_scan_with_resolver(tmp_path: Path) -> None:
|
||||
"""Test scan with resolver (mocked)."""
|
||||
workflows_dir = tmp_path / ".github" / "workflows"
|
||||
workflows_dir.mkdir(parents=True)
|
||||
(workflows_dir / "ci.yml").write_text(
|
||||
"""
|
||||
name: CI
|
||||
on: push
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
"""
|
||||
)
|
||||
|
||||
scanner = Scanner(tmp_path)
|
||||
parser = Parser(tmp_path)
|
||||
analyzer = Analyzer()
|
||||
|
||||
# Mock resolver
|
||||
mock_resolver = Mock()
|
||||
mock_resolver.resolve_actions.return_value = {
|
||||
"actions/checkout@abc123": ActionManifest(
|
||||
name="Checkout",
|
||||
description="Checkout code",
|
||||
)
|
||||
}
|
||||
|
||||
service = AuditService(scanner, parser, analyzer, resolver=mock_resolver)
|
||||
result = service.scan(offline=False)
|
||||
|
||||
# Should call resolver
|
||||
assert mock_resolver.resolve_actions.called
|
||||
assert len(result.actions) == 1
|
||||
|
||||
|
||||
def test_audit_service_scan_analysis(tmp_path: Path) -> None:
|
||||
"""Test that scan includes analysis."""
|
||||
workflows_dir = tmp_path / ".github" / "workflows"
|
||||
workflows_dir.mkdir(parents=True)
|
||||
(workflows_dir / "ci.yml").write_text(
|
||||
"""
|
||||
name: CI
|
||||
on:
|
||||
- push
|
||||
- pull_request
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: echo test
|
||||
"""
|
||||
)
|
||||
|
||||
scanner = Scanner(tmp_path)
|
||||
parser = Parser(tmp_path)
|
||||
analyzer = Analyzer()
|
||||
|
||||
service = AuditService(scanner, parser, analyzer)
|
||||
result = service.scan(offline=True)
|
||||
|
||||
# Check analysis
|
||||
assert "total_workflows" in result.analysis
|
||||
assert result.analysis["total_workflows"] == 1
|
||||
assert "triggers" in result.analysis
|
||||
assert "push" in result.analysis["triggers"]
|
||||
assert "pull_request" in result.analysis["triggers"]
|
||||
|
||||
|
||||
def test_diff_service_compare(tmp_path: Path) -> None:
|
||||
"""Test diff service comparison."""
|
||||
baseline_dir = tmp_path / "baseline"
|
||||
baseline_dir.mkdir()
|
||||
|
||||
# Create baseline
|
||||
differ = Differ(baseline_dir)
|
||||
old_workflow = WorkflowMeta(
|
||||
name="Old",
|
||||
path="test.yml",
|
||||
triggers=["push"],
|
||||
jobs={},
|
||||
)
|
||||
differ.save_baseline({"test.yml": old_workflow}, {})
|
||||
|
||||
# Create diff service
|
||||
diff_service = DiffService(differ)
|
||||
|
||||
# New workflow
|
||||
new_workflow = WorkflowMeta(
|
||||
name="New",
|
||||
path="test.yml",
|
||||
triggers=["push", "pull_request"],
|
||||
jobs={},
|
||||
)
|
||||
|
||||
workflow_diffs, action_diffs = diff_service.compare({"test.yml": new_workflow}, {})
|
||||
|
||||
assert len(workflow_diffs) == 1
|
||||
assert workflow_diffs[0].status == "modified"
|
||||
|
||||
|
||||
def test_diff_service_compare_no_baseline(tmp_path: Path) -> None:
|
||||
"""Test diff service with missing baseline."""
|
||||
baseline_dir = tmp_path / "nonexistent"
|
||||
|
||||
differ = Differ(baseline_dir)
|
||||
diff_service = DiffService(differ)
|
||||
|
||||
with pytest.raises(FileNotFoundError):
|
||||
diff_service.compare({}, {})
|
||||
Reference in New Issue
Block a user