name: Comprehensive Integration Check
on:
pull_request:
types: [opened, synchronize, reopened]
push:
branches: [main]
workflow_dispatch:
env:
PYTHON_VERSION_DEFAULT: "3.12"
PYTEST_TIMEOUT: 300
COVERAGE_THRESHOLD: 80
permissions:
contents: read
pull-requests: write
checks: write
jobs:
# Unit tests with coverage reporting
unit-tests:
name: Unit Tests (Python ${{ matrix.python-version }})
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["3.10", "3.11", "3.12"]
fail-fast: false
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Install Poetry
uses: snok/install-poetry@v1
with:
version: latest
virtualenvs-create: true
virtualenvs-in-project: true
- name: Load cached venv
id: cached-poetry-dependencies
uses: actions/cache@v4
with:
path: .venv
key: venv-${{ runner.os }}-${{ matrix.python-version }}-${{ hashFiles('**/poetry.lock') }}
- name: Install dependencies
if: steps.cached-poetry-dependencies.outputs.cache-hit != 'true'
run: poetry install --with dev
- name: Run unit tests with coverage
run: |
poetry run pytest tests/unit/ \
--cov=fast_intercom_mcp \
--cov-report=xml \
--cov-report=term-missing \
--cov-report=html \
--junit-xml=junit/test-results-${{ matrix.python-version }}.xml \
-v \
--timeout=${{ env.PYTEST_TIMEOUT }}
- name: Upload coverage reports
uses: actions/upload-artifact@v4
with:
name: coverage-unit-${{ matrix.python-version }}
path: |
coverage.xml
htmlcov/
- name: Upload test results
uses: actions/upload-artifact@v4
if: always()
with:
name: junit-unit-${{ matrix.python-version }}
path: junit/test-results-${{ matrix.python-version }}.xml
- name: Check coverage threshold
if: matrix.python-version == env.PYTHON_VERSION_DEFAULT
run: |
coverage_percent=$(poetry run coverage report | grep TOTAL | awk '{print $4}' | sed 's/%//')
echo "Coverage: $coverage_percent%"
if (( $(echo "$coverage_percent < ${{ env.COVERAGE_THRESHOLD }}" | bc -l) )); then
echo "Coverage $coverage_percent% is below threshold ${{ env.COVERAGE_THRESHOLD }}%"
exit 1
fi
# Integration tests with real API
integration-tests:
name: Integration Tests
runs-on: ubuntu-latest
if: github.event_name == 'pull_request' || github.ref == 'refs/heads/main'
steps:
- uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION_DEFAULT }}
- name: Install Poetry
uses: snok/install-poetry@v1
- name: Install dependencies
run: poetry install --with dev
- name: Run integration tests
env:
INTERCOM_TEST_TOKEN: ${{ secrets.INTERCOM_TEST_TOKEN }}
INTERCOM_TEST_WORKSPACE_ID: ${{ secrets.INTERCOM_TEST_WORKSPACE_ID }}
run: |
poetry run pytest tests/integration/ \
--junit-xml=junit/integration-results.xml \
-v \
--timeout=${{ env.PYTEST_TIMEOUT }} \
--tb=short
- name: Upload test results
uses: actions/upload-artifact@v4
if: always()
with:
name: junit-integration
path: junit/integration-results.xml
# Performance tests with regression detection
performance-tests:
name: Performance Tests
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0 # Need full history for baseline comparison
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION_DEFAULT }}
- name: Install Poetry
uses: snok/install-poetry@v1
- name: Install dependencies
run: poetry install --with dev
- name: Run performance benchmarks
run: |
# Run benchmarks and save results
poetry run pytest tests/performance/ \
--benchmark-only \
--benchmark-json=benchmark_results.json \
--benchmark-name=short \
--benchmark-columns=min,max,mean,stddev,median \
--benchmark-compare-fail=mean:10% \
-v
- name: Upload benchmark results
uses: actions/upload-artifact@v4
with:
name: benchmark-results
path: benchmark_results.json
- name: Download previous benchmark data
uses: actions/cache@v4
id: benchmark-cache
with:
path: .benchmarks
key: benchmarks-${{ runner.os }}-${{ github.base_ref || 'main' }}
- name: Compare with baseline
if: github.event_name == 'pull_request' && steps.benchmark-cache.outputs.cache-hit == 'true'
run: |
poetry run pytest tests/performance/ \
--benchmark-compare=.benchmarks/latest.json \
--benchmark-compare-fail=mean:10% \
--benchmark-json=benchmark_comparison.json
- name: Store benchmark result
if: github.ref == 'refs/heads/main'
run: |
mkdir -p .benchmarks
cp benchmark_results.json .benchmarks/latest.json
# Compatibility tests for cross-feature validation
compatibility-tests:
name: Compatibility Tests
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION_DEFAULT }}
- name: Install Poetry
uses: snok/install-poetry@v1
- name: Install dependencies
run: poetry install --with dev
- name: Run compatibility tests
run: |
poetry run pytest tests/compatibility/ \
--junit-xml=junit/compatibility-results.xml \
-v \
--timeout=${{ env.PYTEST_TIMEOUT }}
- name: Upload test results
uses: actions/upload-artifact@v4
if: always()
with:
name: junit-compatibility
path: junit/compatibility-results.xml
# Linting and code quality checks
code-quality:
name: Code Quality Checks
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION_DEFAULT }}
- name: Install Poetry
uses: snok/install-poetry@v1
- name: Install dependencies
run: poetry install --with dev
- name: Run ruff linting
run: |
poetry run ruff check . --output-format=github
- name: Run ruff formatting check
run: |
poetry run ruff format . --check
- name: Run mypy type checking
run: |
poetry run mypy fast_intercom_mcp/ --ignore-missing-imports
# Security scanning
security-scan:
name: Security Scan
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Run Trivy vulnerability scanner
uses: aquasecurity/trivy-action@master
with:
scan-type: 'fs'
scan-ref: '.'
format: 'sarif'
output: 'trivy-results.sarif'
- name: Upload Trivy scan results
uses: github/codeql-action/upload-sarif@v3
with:
sarif_file: 'trivy-results.sarif'
# Test results reporting
test-report:
name: Test Results Summary
runs-on: ubuntu-latest
needs: [unit-tests, integration-tests, performance-tests, compatibility-tests]
if: always()
steps:
- uses: actions/checkout@v4
- name: Download all test results
uses: actions/download-artifact@v4
with:
pattern: junit-*
merge-multiple: true
path: test-results/
- name: Download coverage results
uses: actions/download-artifact@v4
with:
pattern: coverage-*
merge-multiple: true
path: coverage-results/
- name: Publish test results
uses: EnricoMi/publish-unit-test-result-action@v2
if: github.event_name == 'pull_request'
with:
files: |
test-results/**/*.xml
check_name: Test Results
comment_title: Test Results Summary
comment_mode: always
- name: Generate coverage comment
if: github.event_name == 'pull_request'
uses: py-cov-action/python-coverage-comment-action@v3
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
MINIMUM_GREEN: ${{ env.COVERAGE_THRESHOLD }}
MINIMUM_ORANGE: 70
# Final status check
integration-check-status:
name: Integration Check Status
runs-on: ubuntu-latest
needs: [unit-tests, integration-tests, performance-tests, compatibility-tests, code-quality, security-scan]
if: always()
steps:
- name: Check all job statuses
run: |
if [ "${{ needs.unit-tests.result }}" != "success" ] || \
[ "${{ needs.integration-tests.result }}" != "success" ] || \
[ "${{ needs.performance-tests.result }}" != "success" ] || \
[ "${{ needs.compatibility-tests.result }}" != "success" ] || \
[ "${{ needs.code-quality.result }}" != "success" ] || \
[ "${{ needs.security-scan.result }}" != "success" ]; then
echo "One or more required checks failed"
exit 1
fi
echo "All checks passed!"