# Testing Guide for Amicus MCP Server
This guide provides practical test implementation patterns for the Amicus MCP Server.
## Test Structure
```
tests/
├── unit/
│ ├── test_state_operations.py # Basic state read/write
│ ├── test_locking.py # Lock management
│ ├── test_path_resolution.py # Path validation
│ └── test_tracking.py # Tracking toggle
├── integration/
│ ├── test_workflows.py # Complete workflows
│ ├── test_mcp_protocol.py # MCP integration
│ └── test_environment.py # Env var handling
├── concurrency/
│ ├── test_race_conditions.py # Multi-process tests
│ ├── test_atomic_writes.py # Atomicity verification
│ └── test_lock_contention.py # Lock stress tests
├── multi_agent/
│ ├── test_handoff.py # Agent coordination
│ ├── test_elicitation.py # ask_user pattern
│ └── test_concurrent_agents.py # Multiple agents
└── fixtures/
├── conftest.py # Shared fixtures
└── mock_agents.py # Agent simulators
```
## Unit Test Examples
### Testing State Operations
```python
import pytest
import tempfile
from pathlib import Path
from server import (
write_with_lock,
read_with_lock,
get_state_file,
atomic_write
)
@pytest.fixture
def temp_state_dir(monkeypatch, tmp_path):
"""Provide a temporary state directory."""
monkeypatch.setenv("CONTEXT_BUS_DIR", str(tmp_path))
return tmp_path
def test_atomic_write_creates_file(temp_state_dir):
"""Test that atomic write creates a file."""
state_file = temp_state_dir / "test.json"
data = {"test": "value"}
atomic_write(state_file, data)
assert state_file.exists()
with open(state_file) as f:
assert json.load(f) == data
def test_read_nonexistent_file_returns_empty(temp_state_dir):
"""Test reading a file that doesn't exist."""
state_file = temp_state_dir / "missing.json"
result = read_with_lock(state_file)
assert result == {}
def test_write_read_roundtrip(temp_state_dir):
"""Test writing and reading state."""
state_file = get_state_file()
data = {
"summary": "Test summary",
"next_steps": "Test steps",
"active_files": ["file1.py"],
"timestamp": time.time()
}
write_with_lock(state_file, data)
result = read_with_lock(state_file)
assert result == data
```
### Testing Lock Management
```python
import time
from server import remove_stale_lock, get_lock_file
def test_stale_lock_removal(temp_state_dir):
"""Test that stale locks are removed."""
lock_file = get_lock_file()
# Create a stale lock
lock_file.touch()
time.sleep(0.1)
# Modify time to make it stale
old_time = time.time() - 15
os.utime(lock_file, (old_time, old_time))
remove_stale_lock(lock_file, timeout=10)
assert not lock_file.exists()
def test_fresh_lock_not_removed(temp_state_dir):
"""Test that fresh locks are not removed."""
lock_file = get_lock_file()
lock_file.touch()
remove_stale_lock(lock_file, timeout=10)
assert lock_file.exists()
```
## Integration Test Examples
### Testing Complete Workflows
```python
from server import update_state, read_state
def test_update_read_workflow(temp_state_dir):
"""Test a complete update and read workflow."""
# Update state
result = update_state(
summary="Implemented feature X",
next_steps="Write tests for feature X",
active_files=["feature_x.py"],
ask_user=False
)
assert "success" in result.lower()
# Read state
state_output = read_state()
assert "Implemented feature X" in state_output
assert "Write tests for feature X" in state_output
assert "feature_x.py" in state_output
def test_ask_user_flag_in_output(temp_state_dir):
"""Test that ask_user flag appears in output."""
update_state(
summary="Need input",
next_steps="Wait for user",
active_files=[],
ask_user=True
)
state_output = read_state()
assert "HUMAN INPUT" in state_output
```
## Concurrency Test Examples
### Testing Race Conditions
```python
import multiprocessing
import time
def writer_process(process_id, iterations, state_file):
"""Simulated writer process."""
for i in range(iterations):
data = {
"process": process_id,
"iteration": i,
"timestamp": time.time()
}
write_with_lock(state_file, data)
time.sleep(0.01)
def test_concurrent_writers(temp_state_dir):
"""Test multiple processes writing concurrently."""
state_file = get_state_file()
num_processes = 5
iterations = 20
processes = []
for i in range(num_processes):
p = multiprocessing.Process(
target=writer_process,
args=(i, iterations, state_file)
)
processes.append(p)
p.start()
for p in processes:
p.join()
# Verify file is valid and not corrupted
final_state = read_with_lock(state_file)
assert "process" in final_state
assert "timestamp" in final_state
```
## Multi-Agent Test Examples
### Testing Agent Handoff
```python
def test_agent_handoff(temp_state_dir):
"""Test handoff between two agents."""
# Agent 1 updates state
update_state(
summary="Agent 1 completed authentication",
next_steps="Agent 2 should add authorization",
active_files=["auth.py"],
ask_user=False
)
# Agent 2 reads state
state = read_state()
assert "Agent 1" in state
assert "authorization" in state
# Agent 2 continues work
update_state(
summary="Agent 2 added authorization",
next_steps="Ready for testing",
active_files=["auth.py", "authz.py"],
ask_user=False
)
# Verify final state
final_state = read_state()
assert "Agent 2" in final_state
assert "authz.py" in final_state
```
## MCP Protocol Tests
### Testing Tool Invocations
```python
import json
from fastmcp.client import MCPClient
async def test_mcp_update_state():
"""Test update_state via MCP protocol."""
async with MCPClient("amicus-mcp") as client:
result = await client.call_tool(
"update_state",
{
"summary": "Test via MCP",
"next_steps": "Verify MCP works",
"active_files": ["test.py"],
"ask_user": False
}
)
assert "success" in result.lower()
async def test_mcp_read_state():
"""Test read_state via MCP protocol."""
async with MCPClient("amicus-mcp") as client:
result = await client.call_tool("read_state", {})
assert isinstance(result, str)
```
## Running Tests
### Using pytest
```bash
# Run all tests
pytest tests/
# Run specific test file
pytest tests/unit/test_state_operations.py
# Run with coverage
pytest --cov=server --cov-report=html tests/
# Run concurrency tests with verbose output
pytest -v tests/concurrency/
# Run specific test
pytest tests/unit/test_state_operations.py::test_atomic_write_creates_file
```
### Using uv
```bash
# Install test dependencies
uv pip install pytest pytest-cov pytest-asyncio
# Run tests
uv run pytest tests/
```
## Test Dependencies
Add to `pyproject.toml`:
```toml
[dependency-groups]
dev = [
"pytest>=7.4.0",
"pytest-cov>=4.1.0",
"pytest-asyncio>=0.21.0",
"pytest-timeout>=2.1.0",
]
```
## CI/CD Integration
### GitHub Actions Example
```yaml
name: Tests
on: [push, pull_request]
jobs:
test:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["3.10", "3.11", "3.12"]
steps:
- uses: actions/checkout@v3
- name: Install uv
run: curl -LsSf https://astral.sh/uv/install.sh | sh
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
uv pip install -e .
uv pip install pytest pytest-cov pytest-asyncio
- name: Run tests
run: pytest --cov=server --cov-report=xml tests/
- name: Upload coverage
uses: codecov/codecov-action@v3
```
## Performance Testing
### Benchmarking State Operations
```python
import time
import statistics
def benchmark_write_operations(iterations=1000):
"""Benchmark state write performance."""
times = []
for i in range(iterations):
start = time.perf_counter()
update_state(
summary=f"Iteration {i}",
next_steps="Continue",
active_files=["test.py"],
ask_user=False
)
elapsed = time.perf_counter() - start
times.append(elapsed)
print(f"Write Operations ({iterations} iterations):")
print(f" Mean: {statistics.mean(times)*1000:.2f}ms")
print(f" Median: {statistics.median(times)*1000:.2f}ms")
print(f" P95: {statistics.quantiles(times, n=20)[18]*1000:.2f}ms")
print(f" P99: {statistics.quantiles(times, n=100)[98]*1000:.2f}ms")
```
## Next Steps
1. Implement test files based on these patterns
2. Run `amicus-mcp --audit-prompt` for comprehensive analysis
3. Use audit recommendations to prioritize test development
4. Set up CI/CD pipeline
5. Achieve >90% code coverage
6. Add performance benchmarks to CI