name: Comprehensive Testing & Quality Assurance
# Trigger configuration - comprehensive CI/CD coverage
on:
push:
branches: [ main, develop, feature/*, hotfix/* ]
paths-ignore:
- '*.md'
- 'docs/**'
- '.gitignore'
- 'LICENSE'
pull_request:
branches: [ main, develop ]
paths-ignore:
- '*.md'
- 'docs/**'
- '.gitignore'
- 'LICENSE'
schedule:
# Run nightly at 2 AM UTC for dependency and security updates
- cron: '0 2 * * *'
workflow_dispatch:
inputs:
test_level:
description: 'Test Level'
required: false
default: 'full'
type: choice
options:
- 'quick'
- 'full'
- 'security'
- 'performance'
# Global environment variables and configuration
env:
PYTHONPATH: ${{ github.workspace }}/src
PYTEST_ADDOPTS: "--strict-config --strict-markers"
COVERAGE_THRESHOLD: 85
MAX_COMPLEXITY: 15
SECURITY_BASELINE: bandit-baseline.json
# Concurrency control - cancel redundant runs
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
# ============================================================================
# Pre-flight Checks & Validation
# ============================================================================
pre_flight:
name: π Pre-flight Checks
runs-on: ubuntu-latest
timeout-minutes: 5
outputs:
python-versions: ${{ steps.setup.outputs.python-versions }}
test-level: ${{ steps.setup.outputs.test-level }}
skip-tests: ${{ steps.setup.outputs.skip-tests }}
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0 # Full history for accurate analysis
- name: Setup test configuration
id: setup
run: |
# Determine Python versions to test
if [[ "${{ github.event_name }}" == "schedule" ]]; then
echo "python-versions=[\"3.9\", \"3.10\", \"3.11\", \"3.12\"]" >> $GITHUB_OUTPUT
echo "test-level=full" >> $GITHUB_OUTPUT
elif [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then
echo "python-versions=[\"3.11\"]" >> $GITHUB_OUTPUT
echo "test-level=${{ github.event.inputs.test_level }}" >> $GITHUB_OUTPUT
elif [[ "${{ github.ref }}" == "refs/heads/main" ]]; then
echo "python-versions=[\"3.10\", \"3.11\"]" >> $GITHUB_OUTPUT
echo "test-level=full" >> $GITHUB_OUTPUT
else
echo "python-versions=[\"3.11\"]" >> $GITHUB_OUTPUT
echo "test-level=quick" >> $GITHUB_OUTPUT
fi
# Check if we should skip tests (docs-only changes)
if git diff --name-only HEAD~1 | grep -E '\.(py|yml|yaml|toml|cfg|ini)$' > /dev/null; then
echo "skip-tests=false" >> $GITHUB_OUTPUT
else
echo "skip-tests=true" >> $GITHUB_OUTPUT
fi
- name: Validate project structure
run: |
# Verify critical files exist
for file in pyproject.toml src/main.py tests/conftest.py; do
if [[ ! -f "$file" ]]; then
echo "β Critical file missing: $file"
exit 1
fi
done
echo "β
Project structure validation passed"
# ============================================================================
# Code Quality & Security Analysis
# ============================================================================
quality_analysis:
name: π‘οΈ Quality & Security Analysis
runs-on: ubuntu-latest
needs: pre_flight
if: needs.pre_flight.outputs.skip-tests == 'false'
timeout-minutes: 10
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Python
uses: actions/setup-python@v4
with:
python-version: "3.11"
cache: 'pip'
- name: Install analysis tools
run: |
pip install --upgrade pip
pip install bandit[toml] safety mypy black isort flake8 pylint
- name: Code formatting check (Black)
run: |
black --check --diff src/ tests/
- name: Import sorting check (isort)
run: |
isort --check-only --diff src/ tests/
- name: Code style check (flake8)
run: |
flake8 src/ tests/ --max-complexity=${{ env.MAX_COMPLEXITY }} \
--exclude=__pycache__,*.pyc,.pytest_cache \
--format='%(path)s:%(row)d:%(col)d: %(code)s %(text)s'
- name: Advanced linting (pylint)
continue-on-error: true # Pylint can be overly strict
run: |
pylint src/ --output-format=github-actions --fail-under=8.0
- name: Type checking (mypy)
run: |
mypy src/ --strict --show-error-codes --pretty
- name: Security analysis (bandit)
run: |
bandit -r src/ -f json -o bandit-report.json
bandit -r src/ -f txt
- name: Dependency vulnerability scan (safety)
run: |
pip freeze | safety check --stdin --output json --file safety-report.json
pip freeze | safety check --stdin
- name: Upload security reports
if: always()
uses: actions/upload-artifact@v3
with:
name: security-reports
path: |
bandit-report.json
safety-report.json
retention-days: 30
# ============================================================================
# Comprehensive Test Matrix
# ============================================================================
test_matrix:
name: π§ͺ Test Suite (Python ${{ matrix.python-version }} on ${{ matrix.os }})
runs-on: ${{ matrix.os }}
needs: [pre_flight, quality_analysis]
if: needs.pre_flight.outputs.skip-tests == 'false'
timeout-minutes: 30
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest, macos-latest]
python-version: ${{ fromJson(needs.pre_flight.outputs.python-versions) }}
exclude:
# Exclude macOS for non-main branches to save resources
- os: macos-latest
python-version: "3.9"
- os: macos-latest
python-version: "3.12"
env:
OS: ${{ matrix.os }}
PYTHON: ${{ matrix.python-version }}
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
cache: 'pip'
- name: Install system dependencies (Ubuntu)
if: matrix.os == 'ubuntu-latest'
run: |
sudo apt-get update
sudo apt-get install -y build-essential libssl-dev libffi-dev
- name: Install system dependencies (macOS)
if: matrix.os == 'macos-latest'
run: |
brew install openssl libffi
- name: Install Python dependencies
run: |
pip install --upgrade pip setuptools wheel
pip install -e .[dev,test]
# Install testing framework dependencies
pip install pytest pytest-asyncio pytest-cov pytest-xdist pytest-benchmark
pip install hypothesis hypothesis[cli] hypothesis[ghostwriter]
pip install coverage[toml] pytest-html pytest-json-report
- name: Verify installation
run: |
python --version
pip list
python -c "import sys; print('Python path:', sys.path)"
python -c "import src; print('Package imported successfully')"
- name: Run unit tests
run: |
pytest tests/ \
--cov=src \
--cov-config=pyproject.toml \
--cov-report=xml \
--cov-report=html \
--cov-report=term-missing \
--junit-xml=test-results.xml \
--html=test-report.html \
--self-contained-html \
-v \
--tb=short \
--durations=10
- name: Run property-based tests
if: needs.pre_flight.outputs.test-level != 'quick'
run: |
pytest tests/properties/ \
--hypothesis-show-statistics \
--hypothesis-verbosity=verbose \
--hypothesis-seed=42 \
-v
- name: Run security tests
if: needs.pre_flight.outputs.test-level == 'full' || needs.pre_flight.outputs.test-level == 'security'
run: |
pytest tests/security/ \
--hypothesis-show-statistics \
-v \
--tb=line
- name: Run performance benchmarks
if: needs.pre_flight.outputs.test-level == 'full' || needs.pre_flight.outputs.test-level == 'performance'
run: |
pytest tests/performance/ \
--benchmark-only \
--benchmark-json=benchmark-results.json \
--benchmark-histogram=benchmark-histogram \
-v
- name: Run concurrent operation tests
if: needs.pre_flight.outputs.test-level == 'full'
run: |
pytest tests/concurrent/ \
--timeout=300 \
-v \
--tb=short
- name: Coverage analysis and quality gates
run: |
# Generate coverage reports
coverage report --show-missing --fail-under=${{ env.COVERAGE_THRESHOLD }}
coverage json
# Extract coverage percentage for badge
COVERAGE=$(python -c "import json; print(json.load(open('coverage.json'))['totals']['percent_covered'])")
echo "COVERAGE_PERCENT=$COVERAGE" >> $GITHUB_ENV
echo "Coverage: $COVERAGE%"
# Fail if coverage is below threshold
if (( $(echo "$COVERAGE < ${{ env.COVERAGE_THRESHOLD }}" | bc -l) )); then
echo "β Coverage $COVERAGE% is below threshold ${{ env.COVERAGE_THRESHOLD }}%"
exit 1
fi
- name: Upload test artifacts
if: always()
uses: actions/upload-artifact@v3
with:
name: test-results-${{ matrix.os }}-py${{ matrix.python-version }}
path: |
test-results.xml
test-report.html
coverage.xml
htmlcov/
benchmark-results.json
benchmark-histogram.svg
retention-days: 30
- name: Upload coverage to Codecov
if: matrix.os == 'ubuntu-latest' && matrix.python-version == '3.11'
uses: codecov/codecov-action@v3
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ./coverage.xml
env_vars: OS,PYTHON
name: codecov-umbrella
fail_ci_if_error: false
verbose: true
# ============================================================================
# Integration & End-to-End Testing
# ============================================================================
integration_tests:
name: π Integration Tests
runs-on: ubuntu-latest
needs: [pre_flight, test_matrix]
if: needs.pre_flight.outputs.test-level == 'full' && needs.pre_flight.outputs.skip-tests == 'false'
timeout-minutes: 20
services:
# Mock external services for integration testing
redis:
image: redis:alpine
ports:
- 6379:6379
options: >-
--health-cmd "redis-cli ping"
--health-interval 10s
--health-timeout 5s
--health-retries 5
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Python
uses: actions/setup-python@v4
with:
python-version: "3.11"
cache: 'pip'
- name: Install dependencies
run: |
pip install --upgrade pip
pip install -e .[dev,test]
pip install pytest pytest-asyncio pytest-integration
- name: Setup test environment
run: |
# Create test directories and files
mkdir -p /tmp/test_sessions
mkdir -p /tmp/test_logs
# Set environment variables for integration tests
echo "TEST_SESSION_ROOT=/tmp/test_sessions" >> $GITHUB_ENV
echo "TEST_LOG_DIR=/tmp/test_logs" >> $GITHUB_ENV
echo "REDIS_URL=redis://localhost:6379" >> $GITHUB_ENV
- name: Run integration tests
run: |
pytest tests/integration/ \
--integration \
--timeout=300 \
-v \
--tb=short \
--durations=10
- name: Cleanup test environment
if: always()
run: |
rm -rf /tmp/test_sessions
rm -rf /tmp/test_logs
# ============================================================================
# Nightly Security & Dependency Checks
# ============================================================================
nightly_security:
name: π Nightly Security Scan
runs-on: ubuntu-latest
if: github.event_name == 'schedule'
timeout-minutes: 15
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Python
uses: actions/setup-python@v4
with:
python-version: "3.11"
cache: 'pip'
- name: Install dependencies
run: |
pip install --upgrade pip
pip install -e .[dev,test]
pip install bandit safety pip-audit
- name: Comprehensive security scan
run: |
# Bandit security scan with detailed reporting
bandit -r src/ -f json -o bandit-detailed.json
bandit -r src/ -f sarif -o bandit.sarif
# Safety vulnerability check
safety check --json --output safety-detailed.json
# Pip-audit for additional vulnerability detection
pip-audit --format=json --output=pip-audit.json
- name: Upload security artifacts
uses: actions/upload-artifact@v3
with:
name: nightly-security-scan
path: |
bandit-detailed.json
bandit.sarif
safety-detailed.json
pip-audit.json
retention-days: 90
- name: Upload SARIF results
if: always()
uses: github/codeql-action/upload-sarif@v2
with:
sarif_file: bandit.sarif
# ============================================================================
# Test Summary & Notification
# ============================================================================
test_summary:
name: π Test Summary
runs-on: ubuntu-latest
needs: [pre_flight, quality_analysis, test_matrix, integration_tests]
if: always() && needs.pre_flight.outputs.skip-tests == 'false'
timeout-minutes: 5
steps:
- name: Collect test results
run: |
echo "## π§ͺ Test Suite Results" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
# Quality Analysis Results
if [[ "${{ needs.quality_analysis.result }}" == "success" ]]; then
echo "β
**Quality Analysis**: PASSED" >> $GITHUB_STEP_SUMMARY
else
echo "β **Quality Analysis**: FAILED" >> $GITHUB_STEP_SUMMARY
fi
# Test Matrix Results
if [[ "${{ needs.test_matrix.result }}" == "success" ]]; then
echo "β
**Test Matrix**: PASSED" >> $GITHUB_STEP_SUMMARY
else
echo "β **Test Matrix**: FAILED" >> $GITHUB_STEP_SUMMARY
fi
# Integration Tests Results (if run)
if [[ "${{ needs.integration_tests.result }}" == "success" ]]; then
echo "β
**Integration Tests**: PASSED" >> $GITHUB_STEP_SUMMARY
elif [[ "${{ needs.integration_tests.result }}" == "failure" ]]; then
echo "β **Integration Tests**: FAILED" >> $GITHUB_STEP_SUMMARY
elif [[ "${{ needs.integration_tests.result }}" == "skipped" ]]; then
echo "βοΈ **Integration Tests**: SKIPPED" >> $GITHUB_STEP_SUMMARY
fi
echo "" >> $GITHUB_STEP_SUMMARY
echo "### π Metrics" >> $GITHUB_STEP_SUMMARY
echo "- **Test Level**: ${{ needs.pre_flight.outputs.test-level }}" >> $GITHUB_STEP_SUMMARY
echo "- **Python Versions**: ${{ needs.pre_flight.outputs.python-versions }}" >> $GITHUB_STEP_SUMMARY
echo "- **Workflow**: ${{ github.workflow }}" >> $GITHUB_STEP_SUMMARY
echo "- **Trigger**: ${{ github.event_name }}" >> $GITHUB_STEP_SUMMARY
- name: Check overall status
run: |
# Determine overall status
if [[ "${{ needs.quality_analysis.result }}" == "success" && \
"${{ needs.test_matrix.result }}" == "success" && \
("${{ needs.integration_tests.result }}" == "success" || \
"${{ needs.integration_tests.result }}" == "skipped") ]]; then
echo "π All tests passed successfully!"
echo "TEST_STATUS=success" >> $GITHUB_ENV
else
echo "π₯ Some tests failed!"
echo "TEST_STATUS=failure" >> $GITHUB_ENV
exit 1
fi
# ============================================================================
# Deployment Readiness Check
# ============================================================================
deployment_readiness:
name: π Deployment Readiness
runs-on: ubuntu-latest
needs: [test_summary]
if: github.ref == 'refs/heads/main' && needs.test_summary.result == 'success'
timeout-minutes: 5
steps:
- name: Deployment readiness check
run: |
echo "## π Deployment Readiness Report" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "β
All quality gates passed" >> $GITHUB_STEP_SUMMARY
echo "β
Test coverage meets requirements" >> $GITHUB_STEP_SUMMARY
echo "β
Security scans completed" >> $GITHUB_STEP_SUMMARY
echo "β
Integration tests passed" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "π― **Ready for deployment!**" >> $GITHUB_STEP_SUMMARY
# Set deployment flag
echo "DEPLOYMENT_READY=true" >> $GITHUB_ENV
- name: Create deployment artifact
run: |
echo "{\"status\": \"ready\", \"timestamp\": \"$(date -Iseconds)\", \"commit\": \"${{ github.sha }}\"}" > deployment-readiness.json
- name: Upload deployment readiness
uses: actions/upload-artifact@v3
with:
name: deployment-readiness
path: deployment-readiness.json
retention-days: 30
# ============================================================================
# Workflow Configuration & Best Practices
# ============================================================================
# Security considerations:
# - All secrets are properly scoped and protected
# - No sensitive information is logged or exposed
# - Dependency scanning for vulnerabilities
# - Code scanning for security issues
# Performance optimizations:
# - Parallel job execution where possible
# - Dependency caching for faster builds
# - Conditional job execution to save resources
# - Appropriate timeouts to prevent hanging
# Reliability features:
# - Comprehensive error handling and reporting
# - Artifact collection for debugging
# - Retry mechanisms where appropriate
# - Clear failure modes and notifications