name: Quality Gate
on:
pull_request:
branches: [ main, develop ]
types: [opened, synchronize, reopened, ready_for_review]
push:
branches: [ main, develop ]
permissions:
contents: read
pull-requests: write
checks: write
statuses: write
env:
PYTHON_VERSION: "3.11"
COVERAGE_THRESHOLD: 80
SECURITY_THRESHOLD: "high"
jobs:
quality-checks:
name: Quality Checks
runs-on: ubuntu-latest
if: github.event.pull_request.draft == false
outputs:
test-status: ${{ steps.tests.outcome }}
coverage-status: ${{ steps.coverage.outcome }}
security-status: ${{ steps.security.outcome }}
lint-status: ${{ steps.lint.outcome }}
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Install uv
uses: astral-sh/setup-uv@v4
with:
version: "latest"
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: Install dependencies
run: |
uv sync --dev
- name: Code Quality - Linting
id: lint
run: |
echo "π Running code quality checks..."
# Format check
echo "Checking code formatting..."
uv run black --check --diff src/ tests/
# Linting
echo "Running flake8 linting..."
uv run flake8 src/ tests/ --statistics --tee --output-file=flake8-report.txt
# Type checking (if mypy is available)
if uv run python -c "import mypy" 2>/dev/null; then
echo "Running type checking..."
uv run mypy src/ || echo "β οΈ Type checking found issues"
fi
- name: Security Analysis
id: security
run: |
echo "π Running security analysis..."
# Install security tools
uv add --dev safety bandit pip-audit
# Safety check
echo "Running safety check..."
uv run safety check --json --output safety-results.json
SAFETY_EXIT_CODE=$?
# Bandit security linter
echo "Running bandit security scan..."
uv run bandit -r src/ -f json -o bandit-results.json
BANDIT_EXIT_CODE=$?
# Pip audit
echo "Running pip-audit..."
uv run pip-audit --format=json --output pip-audit-results.json
PIP_AUDIT_EXIT_CODE=$?
# Determine overall security status
if [ $SAFETY_EXIT_CODE -eq 0 ] && [ $BANDIT_EXIT_CODE -eq 0 ] && [ $PIP_AUDIT_EXIT_CODE -eq 0 ]; then
echo "β
All security checks passed"
else
echo "β οΈ Some security issues found (see artifacts)"
fi
- name: Test Execution
id: tests
run: |
echo "π§ͺ Running test suite..."
# Run tests with coverage
uv run pytest \
--cov=src \
--cov-report=xml \
--cov-report=html \
--cov-report=term-missing \
--junit-xml=pytest-results.xml \
tests/ \
-v
- name: Coverage Analysis
id: coverage
run: |
echo "π Analyzing test coverage..."
# Extract coverage percentage
COVERAGE=$(uv run python -c "
import xml.etree.ElementTree as ET
tree = ET.parse('coverage.xml')
root = tree.getroot()
coverage = float(root.attrib['line-rate']) * 100
print(f'{coverage:.1f}')
")
echo "Coverage: ${COVERAGE}%"
echo "Threshold: ${COVERAGE_THRESHOLD}%"
if (( $(echo "$COVERAGE >= $COVERAGE_THRESHOLD" | bc -l) )); then
echo "β
Coverage threshold met (${COVERAGE}% >= ${COVERAGE_THRESHOLD}%)"
echo "coverage-passed=true" >> $GITHUB_OUTPUT
else
echo "β Coverage below threshold (${COVERAGE}% < ${COVERAGE_THRESHOLD}%)"
echo "coverage-passed=false" >> $GITHUB_OUTPUT
exit 1
fi
echo "coverage-percentage=$COVERAGE" >> $GITHUB_OUTPUT
- name: Performance Benchmarks
id: performance
run: |
echo "β‘ Running performance benchmarks..."
# Simple performance test
python -c "
import time
import sys
sys.path.insert(0, 'src')
start_time = time.time()
try:
from server import app
import_time = time.time() - start_time
print(f'Import time: {import_time:.3f}s')
if import_time > 2.0:
print('β οΈ Slow import detected')
else:
print('β
Import performance acceptable')
except Exception as e:
print(f'β Import failed: {e}')
"
- name: Upload Test Results
uses: actions/upload-artifact@v4
if: always()
with:
name: test-results
path: |
coverage.xml
htmlcov/
pytest-results.xml
flake8-report.txt
safety-results.json
bandit-results.json
pip-audit-results.json
retention-days: 30
- name: Comment on PR
if: github.event_name == 'pull_request'
uses: actions/github-script@v7
with:
script: |
const fs = require('fs');
// Read coverage info
const coverage = '${{ steps.coverage.outputs.coverage-percentage }}';
const coveragePassed = '${{ steps.coverage.outputs.coverage-passed }}';
// Build status summary
let comment = '## π¦ Quality Gate Results\n\n';
comment += '| Check | Status | Details |\n';
comment += '|-------|--------|----------|\n';
comment += `| π§ͺ Tests | ${{ steps.tests.outcome == 'success' ? 'β
Pass' : 'β Fail' }} | All tests executed |\n`;
comment += `| π Coverage | ${coveragePassed === 'true' ? 'β
Pass' : 'β Fail'} | ${coverage}% (threshold: ${{ env.COVERAGE_THRESHOLD }}%) |\n`;
comment += `| π Linting | ${{ steps.lint.outcome == 'success' ? 'β
Pass' : 'β Fail' }} | Code quality checks |\n`;
comment += `| π Security | ${{ steps.security.outcome == 'success' ? 'β
Pass' : 'β οΈ Issues' }} | Security vulnerability scan |\n`;
// Add overall status
const allPassed = '${{ steps.tests.outcome }}' === 'success' &&
'${{ steps.lint.outcome }}' === 'success' &&
coveragePassed === 'true';
if (allPassed) {
comment += '\nπ **All quality checks passed!** This PR is ready for review.\n';
} else {
comment += '\nβ οΈ **Some quality checks failed.** Please review and fix the issues above.\n';
}
comment += '\nπ Detailed reports are available in the workflow artifacts.';
// Find existing comment and update or create new
const { data: comments } = await github.rest.issues.listComments({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
});
const existingComment = comments.find(comment =>
comment.user.login === 'github-actions[bot]' &&
comment.body.includes('Quality Gate Results')
);
if (existingComment) {
await github.rest.issues.updateComment({
owner: context.repo.owner,
repo: context.repo.repo,
comment_id: existingComment.id,
body: comment
});
} else {
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: comment
});
}
quality-gate-status:
name: Quality Gate Status
runs-on: ubuntu-latest
needs: quality-checks
if: always()
steps:
- name: Set final status
run: |
TEST_STATUS="${{ needs.quality-checks.outputs.test-status }}"
COVERAGE_STATUS="${{ needs.quality-checks.outputs.coverage-status }}"
LINT_STATUS="${{ needs.quality-checks.outputs.lint-status }}"
echo "Test Status: $TEST_STATUS"
echo "Coverage Status: $COVERAGE_STATUS"
echo "Lint Status: $LINT_STATUS"
if [[ "$TEST_STATUS" == "success" && "$COVERAGE_STATUS" == "success" && "$LINT_STATUS" == "success" ]]; then
echo "β
Quality gate PASSED - All checks successful"
exit 0
else
echo "β Quality gate FAILED - Some checks failed"
exit 1
fi
auto-merge-dependabot:
name: Auto-merge Dependabot PRs
runs-on: ubuntu-latest
needs: quality-checks
if: |
github.event_name == 'pull_request' &&
github.actor == 'dependabot[bot]' &&
needs.quality-checks.outputs.test-status == 'success' &&
needs.quality-checks.outputs.coverage-status == 'success' &&
needs.quality-checks.outputs.lint-status == 'success'
steps:
- name: Auto-approve Dependabot PR
run: |
gh pr review --approve "$PR_URL"
env:
PR_URL: ${{ github.event.pull_request.html_url }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Auto-merge Dependabot PR
run: |
gh pr merge --auto --squash "$PR_URL"
env:
PR_URL: ${{ github.event.pull_request.html_url }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}