name: Performance Benchmarks
on:
push:
branches: [main]
pull_request:
branches: [main]
pull_request_target:
branches: [main]
types: [opened, synchronize, reopened]
workflow_dispatch:
inputs:
pr_number:
description: "PR number to benchmark (optional)"
required: false
type: string
schedule:
- cron: "0 2 * * 1" # Weekly on Mondays at 2 AM UTC
jobs:
benchmark:
name: Performance Benchmarks
runs-on: ubuntu-latest
permissions:
contents: read
pull-requests: write # For commenting on PRs
steps:
- uses: actions/checkout@v6
with:
# For pull_request_target, checkout the PR head
ref: ${{ github.event_name == 'pull_request_target' && github.event.pull_request.head.sha || github.sha }}
fetch-depth: 0 # Fetch full history for comparison
- name: Install uv
uses: astral-sh/setup-uv@v7
with:
version: "latest"
- name: Set up Python
run: uv python install 3.12
- name: Install dependencies
run: |
uv sync --frozen # Use frozen lockfile to prevent modifications
# Install pytest-benchmark in isolated environment to avoid lock file changes
uv run --isolated pip install pytest-benchmark
- name: Download ZIM test data
run: |
make download-test-data
- name: Run performance benchmarks
run: |
echo "π Searching for benchmark tests..."
BENCHMARK_TESTS=$(uv run pytest --collect-only -k "benchmark" tests/ 2>/dev/null | grep -c "test session starts" || echo "0")
echo "Found benchmark test sessions: $BENCHMARK_TESTS"
# Run benchmarks and save results
echo "π Running performance benchmarks..."
if uv run pytest tests/ -k "benchmark" --benchmark-json=benchmark-results.json --benchmark-only; then
echo "β
Benchmarks completed successfully"
else
echo "β οΈ Benchmark execution completed with warnings or no tests found"
fi
# Verify and create benchmark results file
if [ -f benchmark-results.json ] && [ -s benchmark-results.json ]; then
echo "β
benchmark-results.json created successfully"
echo "File size: $(stat -c%s benchmark-results.json) bytes"
else
echo "π Creating placeholder benchmark results file..."
echo '{"benchmarks": [], "datetime": "'$(date -Iseconds)'", "version": "no-benchmark-tests", "machine_info": {"node": "github-actions", "processor": "unknown"}}' > benchmark-results.json
fi
# Validate JSON format
if uv run python -c "import json; json.load(open('benchmark-results.json'))"; then
echo "β
benchmark-results.json is valid JSON"
else
echo "β Invalid JSON detected, recreating file..."
echo '{"benchmarks": [], "datetime": "'$(date -Iseconds)'", "version": "invalid-json-fallback"}' > benchmark-results.json
fi
- name: Store benchmark results (temporarily disabled)
if: false # Temporarily disable to avoid git conflicts during release system testing
run: |
echo "Benchmark storage temporarily disabled during release system testing"
echo "This will be re-enabled after the release system is validated"
echo "Benchmark results are still generated and available as artifacts"
- name: Verify benchmark results file
if: github.event_name == 'pull_request'
run: |
echo "Checking for benchmark results file..."
if [ -f benchmark-results.json ]; then
echo "β
benchmark-results.json found"
echo "File size: $(stat -c%s benchmark-results.json) bytes"
echo "File contents preview:"
head -n 5 benchmark-results.json
else
echo "β benchmark-results.json not found"
echo "Creating minimal benchmark file for comparison..."
echo '{"benchmarks": [], "datetime": "'$(date -Iseconds)'", "version": "no-benchmarks"}' > benchmark-results.json
fi
- name: Clean up local changes before benchmark comparison
if: github.event_name == 'pull_request'
run: |
# Stash any local changes to avoid conflicts with benchmark action
git config user.name "github-actions[bot]"
git config user.email "github-actions[bot]@users.noreply.github.com"
if ! git diff --quiet; then
echo "Stashing local changes to avoid git conflicts"
git stash push -m "Temporary stash for benchmark comparison"
fi
# Clean any untracked files that might cause conflicts
git clean -fd
- name: Compare benchmark results (PR)
uses: benchmark-action/github-action-benchmark@v1
if: github.event_name == 'pull_request' && hashFiles('benchmark-results.json') != ''
with:
tool: "pytest"
output-file-path: benchmark-results.json
github-token: ${{ secrets.GITHUB_TOKEN }}
comment-on-alert: true
alert-threshold: "150%"
fail-on-alert: false
# Disable auto-push to avoid gh-pages conflicts
auto-push: false
- name: Upload benchmark results
uses: actions/upload-artifact@v6
with:
name: benchmark-results
path: benchmark-results.json
memory-profiling:
name: Memory Profiling
runs-on: ubuntu-latest
if: github.event_name == 'push' && github.ref == 'refs/heads/main'
steps:
- uses: actions/checkout@v6
- name: Install uv
uses: astral-sh/setup-uv@v7
with:
version: "latest"
- name: Set up Python
run: uv python install 3.12
- name: Install dependencies
run: |
uv sync --frozen # Use frozen lockfile to prevent modifications
# Install profiling tools into the project environment
uv pip install memory-profiler psutil
- name: Download ZIM test data
run: |
make download-test-data
- name: Run memory profiling
run: |
# Create a simple memory profiling script
cat > memory_profile.py << 'EOF'
import os
import psutil
import time
from openzim_mcp.main import main
def profile_memory():
process = psutil.Process()
initial_memory = process.memory_info().rss / 1024 / 1024 # MB
print(f"Initial memory: {initial_memory:.2f} MB")
# Simulate some operations
# This would need to be expanded with actual benchmark operations
time.sleep(1)
final_memory = process.memory_info().rss / 1024 / 1024 # MB
print(f"Final memory: {final_memory:.2f} MB")
print(f"Memory increase: {final_memory - initial_memory:.2f} MB")
return {
'initial_memory_mb': initial_memory,
'final_memory_mb': final_memory,
'memory_increase_mb': final_memory - initial_memory
}
if __name__ == "__main__":
results = profile_memory()
print(f"Memory profiling results: {results}")
EOF
uv run python memory_profile.py > memory-profile-results.txt
- name: Upload memory profiling results
uses: actions/upload-artifact@v6
with:
name: memory-profile-results
path: memory-profile-results.txt