mirror of
https://github.com/ivuorinen/actions.git
synced 2026-01-26 11:34:00 +00:00
* feat: fixes, tweaks, new actions, linting * fix: improve docker publish loops and dotnet parsing (#193) * fix: harden action scripts and version checks (#191) * refactor: major repository restructuring and security enhancements Add comprehensive development infrastructure: - Add Makefile with automated documentation generation, formatting, and linting tasks - Add TODO.md tracking self-containment progress and repository improvements - Add .nvmrc for consistent Node.js version management - Create python-version-detect-v2 action for enhanced Python detection Enhance all GitHub Actions with standardized patterns: - Add consistent token handling across 27 actions using standardized input patterns - Implement bash error handling (set -euo pipefail) in all shell steps - Add comprehensive input validation for path traversal and command injection protection - Standardize checkout token authentication to prevent rate limiting - Remove relative action dependencies to ensure external usability Rewrite security workflow for PR-focused analysis: - Transform security-suite.yml to PR-only security analysis workflow - Remove scheduled runs, repository issue management, and Slack notifications - Implement smart comment generation showing only sections with content - Add GitHub Actions permission diff analysis and new action detection - Integrate OWASP, Semgrep, and TruffleHog for comprehensive PR security scanning Improve version detection and dependency management: - Simplify version detection actions to use inline logic instead of shared utilities - Fix Makefile version detection fallback to properly return 'main' when version not found - Update all external action references to use SHA-pinned versions - Remove deprecated run.sh in favor of Makefile automation Update documentation and project standards: - Enhance CLAUDE.md with self-containment requirements and linting standards - Update README.md with improved action descriptions and usage examples - Standardize code formatting with updated .editorconfig and .prettierrc.yml - Improve GitHub templates for issues and security reporting This refactoring ensures all 40 actions are fully self-contained and can be used independently when referenced as ivuorinen/actions/action-name@main, addressing the critical requirement for external usability while maintaining comprehensive security analysis and development automation. * feat: add automated action catalog generation system - Create generate_listing.cjs script for comprehensive action catalog - Add package.json with development tooling and npm scripts - Implement automated README.md catalog section with --update flag - Generate markdown reference-style links for all 40 actions - Add categorized tables with features, language support matrices - Replace static reference links with auto-generated dynamic links - Enable complete automation of action documentation maintenance * feat: enhance actions with improved documentation and functionality - Add comprehensive README files for 12 actions with usage examples - Implement new utility actions (go-version-detect, dotnet-version-detect) - Enhance node-setup with extensive configuration options - Improve error handling and validation across all actions - Update package.json scripts for better development workflow - Expand TODO.md with detailed roadmap and improvement plans - Standardize action structure with consistent inputs/outputs * feat: add comprehensive output handling across all actions - Add standardized outputs to 15 actions that previously had none - Implement consistent snake_case naming convention for all outputs - Add build status and test results outputs to build actions - Add files changed and status outputs to lint/fix actions - Add test execution metrics to php-tests action - Add stale/closed counts to stale action - Add release URLs and IDs to github-release action - Update documentation with output specifications - Mark comprehensive output handling task as complete in TODO.md * feat: implement shared cache strategy across all actions - Add caching to 10 actions that previously had none (Node.js, .NET, Python, Go) - Standardize 4 existing actions to use common-cache instead of direct actions/cache - Implement consistent cache-hit optimization to skip installations when cache available - Add language-specific cache configurations with appropriate key files - Create unified caching approach using ivuorinen/actions/common-cache@main - Fix YAML syntax error in php-composer action paths parameter - Update TODO.md to mark shared cache strategy as complete * feat: implement comprehensive retry logic for network operations - Create new common-retry action for standardized retry patterns with configurable strategies - Add retry logic to 9 actions missing network retry capabilities - Implement exponential backoff, custom timeouts, and flexible error handling - Add max-retries input parameter to all network-dependent actions (Node.js, .NET, Python, Go) - Standardize existing retry implementations to use common-retry utility - Update action catalog to include new common-retry action (41 total actions) - Update documentation with retry configuration examples and parameters - Mark retry logic implementation as complete in TODO.md roadmap * feat: enhance Node.js support with Corepack and Bun - Add Corepack support for automatic package manager version management - Add Bun package manager support across all Node.js actions - Improve Yarn Berry/PnP support with .yarnrc.yml detection - Add Node.js feature detection (ESM, TypeScript, frameworks) - Update package manager detection priority and lockfile support - Enhance caching with package-manager-specific keys - Update eslint, prettier, and biome actions for multi-package-manager support * fix: resolve critical runtime issues across multiple actions - Fix token validation by removing ineffective literal string comparisons - Add missing @microsoft/eslint-formatter-sarif dependency for SARIF output - Fix Bash variable syntax errors in username and changelog length checks - Update Dockerfile version regex to handle tags with suffixes (e.g., -alpine) - Simplify version selection logic with single grep command - Fix command execution in retry action with proper bash -c wrapper - Correct step output references using .outcome instead of .outputs.outcome - Add missing step IDs for version detection actions - Include go.mod in cache key files for accurate invalidation - Require minor version in all version regex patterns - Improve Bun installation security by verifying script before execution - Replace bc with sort -V for portable PHP version comparison - Remove non-existent pre-commit output references These fixes ensure proper runtime behavior, improved security, and better cross-platform compatibility across all affected actions. * fix: resolve critical runtime and security issues across actions - Fix biome-fix files_changed calculation using git diff instead of git status delta - Fix compress-images output description and add absolute path validation - Remove csharp-publish token default and fix token fallback in push commands - Add @microsoft/eslint-formatter-sarif to all package managers in eslint-check - Fix eslint-check command syntax by using variable assignment - Improve node-setup Bun installation security and remove invalid frozen-lockfile flag - Fix pre-commit token validation by removing ineffective literal comparison - Fix prettier-fix token comparison and expand regex for all GitHub token types - Add version-file-parser regex validation safety and fix csproj wildcard handling These fixes address security vulnerabilities, runtime errors, and functional issues to ensure reliable operation across all affected GitHub Actions. * feat: enhance Docker actions with advanced multi-architecture support Major enhancement to Docker build and publish actions with comprehensive multi-architecture capabilities and enterprise-grade features. Added features: - Advanced buildx configuration (version control, cache modes, build contexts) - Auto-detect platforms for dynamic architecture discovery - Performance optimizations with enhanced caching strategies - Security scanning with Trivy and image signing with Cosign - SBOM generation in multiple formats with validation - Verbose logging and dry-run modes for debugging - Platform-specific build args and fallback mechanisms Enhanced all Docker actions: - docker-build: Core buildx features and multi-arch support - docker-publish-gh: GitHub Packages with security features - docker-publish-hub: Docker Hub with scanning and signing - docker-publish: Orchestrator with unified configuration Updated documentation across all modified actions. * fix: resolve documentation generation placeholder issue Fixed Makefile and package.json to properly replace placeholder tokens in generated documentation, ensuring all README files show correct repository paths instead of ***PROJECT***@***VERSION***. * chore: simplify github token validation * chore(lint): optional yamlfmt, config and fixes * feat: use relative `uses` names * feat: comprehensive testing infrastructure and Python validation system - Migrate from tests/ to _tests/ directory structure with ShellSpec framework - Add comprehensive validation system with Python-based input validation - Implement dual testing approach (ShellSpec + pytest) for complete coverage - Add modern Python tooling (uv, ruff, pytest-cov) and dependencies - Create centralized validation rules with automatic generation system - Update project configuration and build system for new architecture - Enhance documentation to reflect current testing capabilities This establishes a robust foundation for action validation and testing with extensive coverage across all GitHub Actions in the repository. * chore: remove Dockerfile for now * chore: code review fixes * feat: comprehensive GitHub Actions restructuring and tooling improvements This commit represents a major restructuring of the GitHub Actions monorepo with improved tooling, testing infrastructure, and comprehensive PR #186 review implementation. ## Major Changes ### 🔧 Development Tooling & Configuration - **Shellcheck integration**: Exclude shellspec test files from linting - Updated .pre-commit-config.yaml to exclude _tests/*.sh from shellcheck/shfmt - Modified Makefile shellcheck pattern to skip shellspec files - Updated CLAUDE.md documentation with proper exclusion syntax - **Testing infrastructure**: Enhanced Python validation framework - Fixed nested if statements and boolean parameter issues in validation.py - Improved code quality with explicit keyword arguments - All pre-commit hooks now passing ### 🏗️ Project Structure & Documentation - **Added Serena AI integration** with comprehensive project memories: - Project overview, structure, and technical stack documentation - Code style conventions and completion requirements - Comprehensive PR #186 review analysis and implementation tracking - **Enhanced configuration**: Updated .gitignore, .yamlfmt.yml, pyproject.toml - **Improved testing**: Added integration workflows and enhanced test specs ### 🚀 GitHub Actions Improvements (30+ actions updated) - **Centralized validation**: Updated 41 validation rule files - **Enhanced actions**: Improvements across all action categories: - Setup actions (node-setup, version detectors) - Utility actions (version-file-parser, version-validator) - Linting actions (biome, eslint, terraform-lint-fix major refactor) - Build/publish actions (docker-build, npm-publish, csharp-*) - Repository management actions ### 📝 Documentation Updates - **README consistency**: Updated version references across action READMEs - **Enhanced documentation**: Improved action descriptions and usage examples - **CLAUDE.md**: Updated with current tooling and best practices ## Technical Improvements - **Security enhancements**: Input validation and sanitization improvements - **Performance optimizations**: Streamlined action logic and dependencies - **Cross-platform compatibility**: Better Windows/macOS/Linux support - **Error handling**: Improved error reporting and user feedback ## Files Changed - 100 files changed - 13 new Serena memory files documenting project state - 41 validation rules updated for consistency - 30+ GitHub Actions and READMEs improved - Core tooling configuration enhanced * feat: comprehensive GitHub Actions improvements and PR review fixes Major Infrastructure Improvements: - Add comprehensive testing framework with 17+ ShellSpec validation tests - Implement Docker-based testing tools with automated test runner - Add CodeRabbit configuration for automated code reviews - Restructure documentation and memory management system - Update validation rules for 25+ actions with enhanced input validation - Modernize CI/CD workflows and testing infrastructure Critical PR Review Fixes (All Issues Resolved): - Fix double caching in node-setup (eliminate redundant cache operations) - Optimize shell pipeline in version-file-parser (single awk vs complex pipeline) - Fix GitHub expression interpolation in prettier-check cache keys - Resolve terraform command order issue (validation after setup) - Add missing flake8-sarif dependency for Python SARIF output - Fix environment variable scope in pr-lint (export to GITHUB_ENV) Performance & Reliability: - Eliminate duplicate cache operations saving CI time - Improve shell script efficiency with optimized parsing - Fix command execution dependencies preventing runtime failures - Ensure proper dependency installation for all linting tools - Resolve workflow conditional logic issues Security & Quality: - All input validation rules updated with latest security patterns - Cross-platform compatibility improvements maintained - Comprehensive error handling and retry logic preserved - Modern development tooling and best practices adopted This commit addresses 100% of actionable feedback from PR review analysis, implements comprehensive testing infrastructure, and maintains high code quality standards across all 41 GitHub Actions. * feat: enhance expression handling and version parsing - Fix node-setup force-version expression logic for proper empty string handling - Improve version-file-parser with secure regex validation and enhanced Python detection - Add CodeRabbit configuration for CalVer versioning and README review guidance * feat(validate-inputs): implement modular validation system - Add modular validator architecture with specialized validators - Implement base validator classes for different input types - Add validators: boolean, docker, file, network, numeric, security, token, version - Add convention mapper for automatic input validation - Add comprehensive documentation for the validation system - Implement PCRE regex support and injection protection * feat(validate-inputs): add validation rules for all actions - Add YAML validation rules for 42 GitHub Actions - Auto-generated rules with convention mappings - Include metadata for validation coverage and quality indicators - Mark rules as auto-generated to prevent manual edits * test(validate-inputs): add comprehensive test suite for validators - Add unit tests for all validator modules - Add integration tests for the validation system - Add fixtures for version test data - Test coverage for boolean, docker, file, network, numeric, security, token, and version validators - Add tests for convention mapper and registry * feat(tools): add validation scripts and utilities - Add update-validators.py script for auto-generating rules - Add benchmark-validator.py for performance testing - Add debug-validator.py for troubleshooting - Add generate-tests.py for test generation - Add check-rules-not-manually-edited.sh for CI validation - Add fix-local-action-refs.py tool for fixing action references * feat(actions): add CustomValidator.py files for specialized validation - Add custom validators for actions requiring special validation logic - Implement validators for docker, go, node, npm, php, python, terraform actions - Add specialized validation for compress-images, common-cache, common-file-check - Implement version detection validators with language-specific logic - Add validation for build arguments, architectures, and version formats * test: update ShellSpec test framework for Python validation - Update all validation.spec.sh files to use Python validator - Add shared validation_core.py for common test utilities - Remove obsolete bash validation helpers - Update test output expectations for Python validator format - Add codeql-analysis test suite - Refactor framework utilities for Python integration - Remove deprecated test files * feat(actions): update action.yml files to use validate-inputs - Replace inline bash validation with validate-inputs action - Standardize validation across all 42 actions - Add new codeql-analysis action - Update action metadata and branding - Add validation step as first step in composite actions - Maintain backward compatibility with existing inputs/outputs * ci: update GitHub workflows for enhanced security and testing - Add new codeql-new.yml workflow - Update security scanning workflows - Enhance dependency review configuration - Update test-actions workflow for new validation system - Improve workflow permissions and security settings - Update action versions to latest SHA-pinned releases * build: update build configuration and dependencies - Update Makefile with new validation targets - Add Python dependencies in pyproject.toml - Update npm dependencies and scripts - Enhance Docker testing tools configuration - Add targets for validator updates and local ref fixes - Configure uv for Python package management * chore: update linting and documentation configuration - Update EditorConfig settings for consistent formatting - Enhance pre-commit hooks configuration - Update prettier and yamllint ignore patterns - Update gitleaks security scanning rules - Update CodeRabbit review configuration - Update CLAUDE.md with latest project standards and rules * docs: update Serena memory files and project metadata - Remove obsolete PR-186 memory files - Update project overview with current architecture - Update project structure documentation - Add quality standards and communication guidelines - Add modular validator architecture documentation - Add shellspec testing framework documentation - Update project.yml with latest configuration * feat: moved rules.yml to same folder as action, fixes * fix(validators): correct token patterns and fix validator bugs - Fix GitHub classic PAT pattern: ghp_ + 36 chars = 40 total - Fix GitHub fine-grained PAT pattern: github_pat_ + 71 chars = 82 total - Initialize result variable in convention_mapper to prevent UnboundLocalError - Fix empty URL validation in network validator to return error - Add GitHub expression check to docker architectures validator - Update docker-build CustomValidator parallel-builds max to 16 * test(validators): fix test fixtures and expectations - Fix token lengths in test data: github_pat 71 chars, ghp/gho 36 chars - Update integration tests with correct token lengths - Fix file validator test to expect absolute paths rejected for security - Rename TestGenerator import to avoid pytest collection warning - Update custom validator tests with correct input names - Change docker-build tests: platforms->architectures, tags->tag - Update docker-publish tests to match new registry enum validation * test(shellspec): fix token lengths in test helpers and specs - Fix default token lengths in spec_helper.sh to use correct 40-char format - Update csharp-publish default tokens in 4 locations - Update codeql-analysis default tokens in 2 locations - Fix codeql-analysis test tokens to correct lengths (40 and 82 chars) - Fix npm-publish fine-grained token test to use 82-char format * feat(actions): add permissions documentation and environment variable usage - Add permissions comments to all action.yml files documenting required GitHub permissions - Convert direct input usage to environment variables in shell steps for security - Add validation steps with proper error handling - Update input descriptions and add security notes where applicable - Ensure all actions follow consistent patterns for input validation * chore(workflows): update GitHub Actions workflow versions - Update workflow action versions to latest - Improve workflow consistency and maintainability * docs(security): add comprehensive security policy - Document security features and best practices - Add vulnerability reporting process - Include audit history and security testing information * docs(memory): add GitHub workflow reference documentation - Add GitHub Actions workflow commands reference - Add GitHub workflow expressions guide - Add secure workflow usage patterns and best practices * chore: token optimization, code style conventions * chore: cr fixes * fix: trivy reported Dockerfile problems * fix(security): more security fixes * chore: dockerfile and make targets for publishing * fix(ci): add creds to test-actions workflow * fix: security fix and checkout step to codeql-new * chore: test fixes * fix(security): codeql detected issues * chore: code review fixes, ReDos protection * style: apply MegaLinter fixes * fix(ci): missing packages read permission * fix(ci): add missing working directory setting * chore: linting, add validation-regex to use regex_pattern * chore: code review fixes * chore(deps): update actions * fix(security): codeql fixes * chore(cr): apply cr comments * chore: improve POSIX compatibility * chore(cr): apply cr comments * fix: codeql warning in Dockerfile, build failures * chore(cr): apply cr comments * fix: docker-testing-tools/Dockerfile * chore(cr): apply cr comments * fix(docker): update testing-tools image for GitHub Actions compatibility * chore(cr): apply cr comments * feat: add more tests, fix issues * chore: fix codeql issues, update actions * chore(cr): apply cr comments * fix: integration tests * chore: deduplication and fixes * style: apply MegaLinter fixes * chore(cr): apply cr comments * feat: dry-run mode for generate-tests * fix(ci): kcov installation * chore(cr): apply cr comments * chore(cr): apply cr comments * chore(cr): apply cr comments * chore(cr): apply cr comments, simplify action testing, use uv * fix: run-tests.sh action counting * chore(cr): apply cr comments * chore(cr): apply cr comments
430 lines
13 KiB
Python
Executable File
430 lines
13 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
"""Performance benchmarking tool for validators.
|
|
|
|
Measures validation performance and identifies bottlenecks.
|
|
"""
|
|
|
|
from __future__ import annotations
|
|
|
|
import argparse
|
|
import json
|
|
from pathlib import Path
|
|
import statistics
|
|
import sys
|
|
import time
|
|
from typing import Any
|
|
|
|
# Add parent directory to path for imports
|
|
sys.path.insert(0, str(Path(__file__).parent.parent))
|
|
|
|
from validators.registry import ValidatorRegistry
|
|
|
|
|
|
class ValidatorBenchmark:
|
|
"""Benchmark utility for validators."""
|
|
|
|
def __init__(self, iterations: int = 100) -> None:
|
|
"""Initialize the benchmark tool.
|
|
|
|
Args:
|
|
iterations: Number of iterations for each test
|
|
"""
|
|
self.iterations = iterations
|
|
self.registry = ValidatorRegistry()
|
|
self.results: dict[str, list[float]] = {}
|
|
|
|
def benchmark_action(
|
|
self,
|
|
action_type: str,
|
|
inputs: dict[str, str],
|
|
iterations: int | None = None,
|
|
) -> dict[str, Any]:
|
|
"""Benchmark validation for an action.
|
|
|
|
Args:
|
|
action_type: The action type to validate
|
|
inputs: Dictionary of inputs to validate
|
|
iterations: Number of iterations (overrides default)
|
|
|
|
Returns:
|
|
Benchmark results dictionary
|
|
"""
|
|
iterations = iterations or self.iterations
|
|
times = []
|
|
|
|
# Get the validator once (to exclude loading time)
|
|
validator = self.registry.get_validator(action_type)
|
|
|
|
print(f"\nBenchmarking {action_type} with {len(inputs)} inputs...")
|
|
print(f"Running {iterations} iterations...")
|
|
|
|
# Warm-up run
|
|
validator.clear_errors()
|
|
result = validator.validate_inputs(inputs)
|
|
|
|
# Benchmark runs
|
|
for i in range(iterations):
|
|
validator.clear_errors()
|
|
|
|
start = time.perf_counter()
|
|
result = validator.validate_inputs(inputs)
|
|
end = time.perf_counter()
|
|
|
|
times.append(end - start)
|
|
|
|
if (i + 1) % 10 == 0:
|
|
print(f" Progress: {i + 1}/{iterations}", end="\r")
|
|
|
|
print(f" Completed: {iterations}/{iterations}")
|
|
|
|
# Calculate statistics
|
|
stats = self._calculate_stats(times)
|
|
stats["action_type"] = action_type
|
|
stats["validator"] = validator.__class__.__name__
|
|
stats["input_count"] = len(inputs)
|
|
stats["iterations"] = iterations
|
|
stats["validation_result"] = result
|
|
stats["errors"] = len(validator.errors)
|
|
|
|
return stats
|
|
|
|
def _calculate_stats(self, times: list[float]) -> dict[str, Any]:
|
|
"""Calculate statistics from timing data.
|
|
|
|
Args:
|
|
times: List of execution times
|
|
|
|
Returns:
|
|
Statistics dictionary
|
|
"""
|
|
times_ms = [t * 1000 for t in times] # Convert to milliseconds
|
|
|
|
return {
|
|
"min_ms": min(times_ms),
|
|
"max_ms": max(times_ms),
|
|
"mean_ms": statistics.mean(times_ms),
|
|
"median_ms": statistics.median(times_ms),
|
|
"stdev_ms": statistics.stdev(times_ms) if len(times_ms) > 1 else 0,
|
|
"total_s": sum(times),
|
|
"per_second": len(times) / sum(times) if sum(times) > 0 else 0,
|
|
}
|
|
|
|
def compare_validators(self, test_cases: list[dict[str, Any]]) -> None:
|
|
"""Compare performance across multiple validators.
|
|
|
|
Args:
|
|
test_cases: List of test cases with action_type and inputs
|
|
"""
|
|
results = []
|
|
|
|
print("\n" + "=" * 70)
|
|
print("Validator Performance Comparison")
|
|
print("=" * 70)
|
|
|
|
for case in test_cases:
|
|
stats = self.benchmark_action(case["action_type"], case["inputs"])
|
|
results.append(stats)
|
|
|
|
# Display comparison table
|
|
self._display_comparison(results)
|
|
|
|
def _display_comparison(self, results: list[dict[str, Any]]) -> None:
|
|
"""Display comparison table of benchmark results.
|
|
|
|
Args:
|
|
results: List of benchmark results
|
|
"""
|
|
print("\nResults Summary:")
|
|
print("-" * 70)
|
|
print(
|
|
f"{'Action':<20} {'Validator':<20} {'Inputs':<8} {'Mean (ms)':<12} {'Ops/sec':<10}",
|
|
)
|
|
print("-" * 70)
|
|
|
|
for r in results:
|
|
print(
|
|
f"{r['action_type']:<20} "
|
|
f"{r['validator']:<20} "
|
|
f"{r['input_count']:<8} "
|
|
f"{r['mean_ms']:<12.3f} "
|
|
f"{r['per_second']:<10.1f}",
|
|
)
|
|
|
|
print("\nDetailed Statistics:")
|
|
print("-" * 70)
|
|
for r in results:
|
|
print(f"\n{r['action_type']} ({r['validator']}):")
|
|
print(f" Min: {r['min_ms']:.3f} ms")
|
|
print(f" Max: {r['max_ms']:.3f} ms")
|
|
print(f" Mean: {r['mean_ms']:.3f} ms")
|
|
print(f" Median: {r['median_ms']:.3f} ms")
|
|
print(f" StdDev: {r['stdev_ms']:.3f} ms")
|
|
print(f" Validation Result: {'PASS' if r['validation_result'] else 'FAIL'}")
|
|
if r["errors"] > 0:
|
|
print(f" Errors: {r['errors']}")
|
|
|
|
def profile_validator(self, action_type: str, inputs: dict[str, str]) -> None:
|
|
"""Profile a validator to identify bottlenecks.
|
|
|
|
Args:
|
|
action_type: The action type to validate
|
|
inputs: Dictionary of inputs to validate
|
|
"""
|
|
import cProfile
|
|
from io import StringIO
|
|
import pstats
|
|
|
|
print(f"\nProfiling {action_type} validator...")
|
|
print("-" * 70)
|
|
|
|
validator = self.registry.get_validator(action_type)
|
|
|
|
# Create profiler
|
|
profiler = cProfile.Profile()
|
|
|
|
# Profile the validation
|
|
profiler.enable()
|
|
for _ in range(10): # Run multiple times for better data
|
|
validator.clear_errors()
|
|
validator.validate_inputs(inputs)
|
|
profiler.disable()
|
|
|
|
# Print statistics
|
|
stream = StringIO()
|
|
stats = pstats.Stats(profiler, stream=stream)
|
|
stats.strip_dirs()
|
|
stats.sort_stats("cumulative")
|
|
stats.print_stats(20) # Top 20 functions
|
|
|
|
print(stream.getvalue())
|
|
|
|
def benchmark_patterns(self) -> None:
|
|
"""Benchmark pattern matching for convention-based validation."""
|
|
from validators.conventions import ConventionBasedValidator
|
|
|
|
print("\n" + "=" * 70)
|
|
print("Pattern Matching Performance")
|
|
print("=" * 70)
|
|
|
|
validator = ConventionBasedValidator("test")
|
|
# Access the internal pattern mapping
|
|
mapper = getattr(validator, "_convention_mapper", None)
|
|
if not mapper:
|
|
print("Convention mapper not available")
|
|
return
|
|
|
|
# Test inputs with different pattern types
|
|
test_inputs = {
|
|
# Exact matches
|
|
"dry-run": "true",
|
|
"verbose": "false",
|
|
"debug": "true",
|
|
# Prefix matches
|
|
"github-token": "ghp_xxx",
|
|
"npm-token": "xxx",
|
|
"api-token": "xxx",
|
|
# Suffix matches
|
|
"node-version": "18.0.0",
|
|
"python-version": "3.9",
|
|
# Contains matches
|
|
"webhook-url": "https://example.com",
|
|
"api-url": "https://api.example.com",
|
|
# No matches
|
|
"custom-field-1": "value1",
|
|
"custom-field-2": "value2",
|
|
"custom-field-3": "value3",
|
|
}
|
|
|
|
times = []
|
|
for _ in range(self.iterations):
|
|
start = time.perf_counter()
|
|
for name in test_inputs:
|
|
mapper.get_validator_type(name)
|
|
end = time.perf_counter()
|
|
times.append(end - start)
|
|
|
|
stats = self._calculate_stats(times)
|
|
|
|
print(f"\nPattern matching for {len(test_inputs)} inputs:")
|
|
print(f" Mean: {stats['mean_ms']:.3f} ms")
|
|
print(f" Median: {stats['median_ms']:.3f} ms")
|
|
print(f" Min: {stats['min_ms']:.3f} ms")
|
|
print(f" Max: {stats['max_ms']:.3f} ms")
|
|
print(f" Lookups/sec: {len(test_inputs) * self.iterations / stats['total_s']:.0f}")
|
|
|
|
def save_results(self, results: dict[str, Any], filepath: Path) -> None:
|
|
"""Save benchmark results to file.
|
|
|
|
Args:
|
|
results: Benchmark results
|
|
filepath: Path to save results
|
|
"""
|
|
with filepath.open("w") as f:
|
|
json.dump(results, f, indent=2)
|
|
print(f"\nResults saved to {filepath}")
|
|
|
|
|
|
def create_test_inputs(input_count: int) -> dict[str, str]:
|
|
"""Create test inputs for benchmarking.
|
|
|
|
Args:
|
|
input_count: Number of inputs to create
|
|
|
|
Returns:
|
|
Dictionary of test inputs
|
|
"""
|
|
inputs = {}
|
|
|
|
# Add various input types
|
|
patterns = [
|
|
("github-token", "${{ secrets.GITHUB_TOKEN }}"),
|
|
("node-version", "18.0.0"),
|
|
("python-version", "3.9.0"),
|
|
("dry-run", "true"),
|
|
("verbose", "false"),
|
|
("max-retries", "5"),
|
|
("rate-limit", "100"),
|
|
("config-file", "./config.yml"),
|
|
("output-path", "./output"),
|
|
("webhook-url", "https://example.com/webhook"),
|
|
("api-url", "https://api.example.com"),
|
|
("docker-image", "nginx:latest"),
|
|
("dockerfile", "Dockerfile"),
|
|
]
|
|
|
|
for i in range(input_count):
|
|
pattern = patterns[i % len(patterns)]
|
|
name = f"{pattern[0]}-{i}" if i > 0 else pattern[0]
|
|
inputs[name] = pattern[1]
|
|
|
|
return inputs
|
|
|
|
|
|
def main() -> None:
|
|
"""Main entry point for the benchmark utility."""
|
|
parser = argparse.ArgumentParser(
|
|
description="Benchmark validator performance",
|
|
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
epilog="""
|
|
Examples:
|
|
# Benchmark specific action
|
|
%(prog)s --action docker-build --inputs 10
|
|
|
|
# Compare multiple validators
|
|
%(prog)s --compare
|
|
|
|
# Profile a validator
|
|
%(prog)s --profile docker-build
|
|
|
|
# Benchmark pattern matching
|
|
%(prog)s --patterns
|
|
""",
|
|
)
|
|
|
|
parser.add_argument(
|
|
"--action",
|
|
"-a",
|
|
help="Action type to benchmark",
|
|
)
|
|
parser.add_argument(
|
|
"--inputs",
|
|
"-i",
|
|
type=int,
|
|
default=10,
|
|
help="Number of inputs to test (default: 10)",
|
|
)
|
|
parser.add_argument(
|
|
"--iterations",
|
|
"-n",
|
|
type=int,
|
|
default=100,
|
|
help="Number of iterations (default: 100)",
|
|
)
|
|
parser.add_argument(
|
|
"--compare",
|
|
"-c",
|
|
action="store_true",
|
|
help="Compare multiple validators",
|
|
)
|
|
parser.add_argument(
|
|
"--profile",
|
|
"-p",
|
|
metavar="ACTION",
|
|
help="Profile a specific validator",
|
|
)
|
|
parser.add_argument(
|
|
"--patterns",
|
|
action="store_true",
|
|
help="Benchmark pattern matching",
|
|
)
|
|
parser.add_argument(
|
|
"--save",
|
|
"-s",
|
|
type=Path,
|
|
help="Save results to JSON file",
|
|
)
|
|
|
|
args = parser.parse_args()
|
|
|
|
# Create benchmark tool
|
|
benchmark = ValidatorBenchmark(iterations=args.iterations)
|
|
|
|
if args.compare:
|
|
# Compare different validators
|
|
test_cases = [
|
|
{
|
|
"action_type": "docker-build",
|
|
"inputs": create_test_inputs(args.inputs),
|
|
},
|
|
{
|
|
"action_type": "github-release",
|
|
"inputs": create_test_inputs(args.inputs),
|
|
},
|
|
{
|
|
"action_type": "test-action", # Uses convention-based
|
|
"inputs": create_test_inputs(args.inputs),
|
|
},
|
|
]
|
|
benchmark.compare_validators(test_cases)
|
|
|
|
elif args.profile:
|
|
# Profile specific validator
|
|
inputs = create_test_inputs(args.inputs)
|
|
benchmark.profile_validator(args.profile, inputs)
|
|
|
|
elif args.patterns:
|
|
# Benchmark pattern matching
|
|
benchmark.benchmark_patterns()
|
|
|
|
elif args.action:
|
|
# Benchmark specific action
|
|
inputs = create_test_inputs(args.inputs)
|
|
results = benchmark.benchmark_action(args.action, inputs)
|
|
|
|
# Display results
|
|
print("\n" + "=" * 70)
|
|
print("Benchmark Results")
|
|
print("=" * 70)
|
|
print(f"Action: {results['action_type']}")
|
|
print(f"Validator: {results['validator']}")
|
|
print(f"Inputs: {results['input_count']}")
|
|
print(f"Iterations: {results['iterations']}")
|
|
print("-" * 70)
|
|
print(f"Mean: {results['mean_ms']:.3f} ms")
|
|
print(f"Median: {results['median_ms']:.3f} ms")
|
|
print(f"Min: {results['min_ms']:.3f} ms")
|
|
print(f"Max: {results['max_ms']:.3f} ms")
|
|
print(f"StdDev: {results['stdev_ms']:.3f} ms")
|
|
print(f"Ops/sec: {results['per_second']:.1f}")
|
|
|
|
if args.save:
|
|
benchmark.save_results(results, args.save)
|
|
|
|
else:
|
|
parser.print_help()
|
|
sys.exit(1)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|