python-test-e2e.yml•1.62 kB
name: Manual E2E Tests
on:
workflow_dispatch:
inputs:
models:
description: "Models to test"
required: false
default: "gpt-4.1-mini,gpt-4o-mini"
type: string
permissions:
contents: read
actions: write
jobs:
test:
runs-on: ubuntu-latest
steps:
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
with:
egress-policy: audit
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
- name: Set up Python 3.12
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065
with:
python-version: "3.12"
cache: "pip"
- name: Install the latest version of uv
uses: astral-sh/setup-uv@39eb6c9dde236bbc368681611e63120a6eb4afac
with:
version: "latest"
activate-environment: true
- name: Install dependencies
run: |
uv sync --extra dev
- name: Test with pytest
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
MODELS_TO_TEST: ${{ inputs.models }}
MCP_USE_ANONYMIZED_TELEMETRY: false
run: |
pytest --run-e2e
- name: Generate HTML report
if: always()
run: |
python scripts/generate_e2e_report.py
- name: Upload HTML report artifact
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
if: always()
with:
name: e2e-test-report
path: ./static_test_report.html