name: Collective Intelligence CI/CD
on:
push:
branches: [ main, develop ]
pull_request:
branches: [ main ]
schedule:
# Run tests daily at 2 AM UTC
- cron: '0 2 * * *'
jobs:
test:
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
python-version: ['3.9', '3.10', '3.11', '3.12']
exclude:
# Reduce matrix size for efficiency
- os: macos-latest
python-version: '3.9'
- os: windows-latest
python-version: '3.9'
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Cache dependencies
uses: actions/cache@v3
with:
path: |
~/.cache/pip
~/.cache/pre-commit
key: ${{ runner.os }}-python-${{ matrix.python-version }}-${{ hashFiles('**/requirements*.txt') }}
restore-keys: |
${{ runner.os }}-python-${{ matrix.python-version }}-
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
pip install -r requirements-dev.txt
- name: Lint with pre-commit
run: |
pre-commit run --all-files
continue-on-error: true
- name: Run unit tests
run: |
pytest tests/test_collective_intelligence/ -m "unit" --cov=src/openrouter_mcp/collective_intelligence --cov-report=xml --cov-report=term-missing -v
- name: Run integration tests
run: |
pytest tests/test_collective_intelligence/ -m "integration" --cov=src/openrouter_mcp/collective_intelligence --cov-append --cov-report=xml --cov-report=term-missing -v
- name: Run performance tests
run: |
pytest tests/test_collective_intelligence/ -m "performance" --cov=src/openrouter_mcp/collective_intelligence --cov-append --cov-report=xml --cov-report=term-missing -v --maxfail=3
continue-on-error: true # Performance tests might be unstable
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v3
with:
file: ./coverage.xml
flags: unittests
name: codecov-umbrella
fail_ci_if_error: true
- name: Generate test report
if: always()
run: |
pytest tests/test_collective_intelligence/ --junitxml=test-results.xml --html=test-report.html --self-contained-html
continue-on-error: true
- name: Upload test results
uses: actions/upload-artifact@v3
if: always()
with:
name: test-results-${{ matrix.os }}-${{ matrix.python-version }}
path: |
test-results.xml
test-report.html
htmlcov/
security-scan:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.11'
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install safety bandit semgrep
- name: Run safety check
run: |
safety check --json --output safety-report.json
continue-on-error: true
- name: Run bandit security scan
run: |
bandit -r src/ -f json -o bandit-report.json
continue-on-error: true
- name: Run semgrep scan
run: |
semgrep --config=auto src/ --json --output=semgrep-report.json
continue-on-error: true
- name: Upload security reports
uses: actions/upload-artifact@v3
if: always()
with:
name: security-reports
path: |
safety-report.json
bandit-report.json
semgrep-report.json
quality-checks:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.11'
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
pip install -r requirements-dev.txt
pip install mypy pylint flake8 black isort
- name: Check code formatting with black
run: |
black --check --diff src/ tests/
- name: Check import sorting with isort
run: |
isort --check-only --diff src/ tests/
- name: Run flake8
run: |
flake8 src/ tests/ --output-file=flake8-report.txt
continue-on-error: true
- name: Run pylint
run: |
pylint src/ --output-format=json --output=pylint-report.json
continue-on-error: true
- name: Run mypy type checking
run: |
mypy src/ --json-report mypy-report
continue-on-error: true
- name: Upload quality reports
uses: actions/upload-artifact@v3
if: always()
with:
name: quality-reports
path: |
flake8-report.txt
pylint-report.json
mypy-report/
benchmark:
runs-on: ubuntu-latest
if: github.event_name == 'schedule' || contains(github.event.head_commit.message, '[benchmark]')
steps:
- uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.11'
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
pip install -r requirements-dev.txt
- name: Run comprehensive benchmarks
run: |
pytest tests/test_collective_intelligence/test_performance_benchmarks.py -v --benchmark-json=benchmark-results.json
timeout-minutes: 30
- name: Generate benchmark report
run: |
python -c "
import json
import datetime
with open('benchmark-results.json', 'r') as f:
data = json.load(f)
with open('benchmark-summary.md', 'w') as f:
f.write('# Benchmark Results\\n\\n')
f.write(f'Generated: {datetime.datetime.now().isoformat()}\\n\\n')
f.write('## Performance Summary\\n\\n')
# Add benchmark analysis here
"
- name: Upload benchmark results
uses: actions/upload-artifact@v3
with:
name: benchmark-results
path: |
benchmark-results.json
benchmark-summary.md
- name: Comment benchmark results on PR
if: github.event_name == 'pull_request'
uses: actions/github-script@v6
with:
script: |
const fs = require('fs');
if (fs.existsSync('benchmark-summary.md')) {
const summary = fs.readFileSync('benchmark-summary.md', 'utf8');
github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: summary
});
}
documentation:
runs-on: ubuntu-latest
if: github.ref == 'refs/heads/main'
steps:
- uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.11'
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
pip install sphinx sphinx-rtd-theme sphinx-autodoc-typehints
- name: Generate API documentation
run: |
sphinx-apidoc -o docs/api src/openrouter_mcp/collective_intelligence
sphinx-build -b html docs docs/_build/html
- name: Upload documentation
uses: actions/upload-artifact@v3
with:
name: documentation
path: docs/_build/html
- name: Deploy to GitHub Pages
if: github.ref == 'refs/heads/main'
uses: peaceiris/actions-gh-pages@v3
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: ./docs/_build/html
notify:
runs-on: ubuntu-latest
needs: [test, security-scan, quality-checks]
if: always()
steps:
- name: Notify success
if: needs.test.result == 'success' && needs.security-scan.result == 'success' && needs.quality-checks.result == 'success'
run: |
echo "All CI checks passed successfully!"
- name: Notify failure
if: needs.test.result == 'failure' || needs.security-scan.result == 'failure' || needs.quality-checks.result == 'failure'
run: |
echo "Some CI checks failed. Please review the results."
exit 1