"""
Comprehensive domain test fixtures for Agent Orchestration Platform.
This module provides test fixtures for all domain types and objects including:
- Agent state and configuration fixtures
- Session state and management fixtures
- Security context and authentication fixtures
- MCP tool request/response fixtures
- iTerm2 integration fixtures
- Claude Code configuration fixtures
- File system and directory structure fixtures
All fixtures are designed to work with ADDER+ techniques and provide realistic test data.
Author: Adder_1 | Created: 2025-06-26 | Testing Infrastructure Task
"""
import pytest
from typing import Dict, List, Any, Optional, Union
from dataclasses import dataclass, field
from datetime import datetime, timedelta
from pathlib import Path
import json
import uuid
import secrets
# ============================================================================
# Domain Type Mock Classes (Placeholders for actual implementations)
# ============================================================================
@dataclass
class MockAgentState:
"""Mock agent state for testing until real implementation is available."""
agent_id: str
session_id: str
name: str
status: str = "ACTIVE"
specialization: str = ""
system_prompt_suffix: str = ""
iterm_tab_id: Optional[str] = None
process_id: Optional[int] = None
created_at: str = field(default_factory=lambda: datetime.now().isoformat())
last_heartbeat: str = field(default_factory=lambda: datetime.now().isoformat())
resource_usage: Dict[str, float] = field(default_factory=dict)
conversation_history: List[Dict[str, str]] = field(default_factory=list)
claude_config: Dict[str, Any] = field(default_factory=dict)
def create_test_agent_id() -> str:
"""Create a test agent ID in proper format."""
return f"Agent_{secrets.randbelow(1000)}"
def create_test_session_id() -> str:
"""Create a test session ID in proper format."""
return f"session_{uuid.uuid4().hex[:8]}"
@dataclass
class MockSessionState:
"""Mock session state for testing until real implementation is available."""
session_id: str
name: str
root_path: str
created_at: str = field(default_factory=lambda: datetime.now().isoformat())
agents: Dict[str, MockAgentState] = field(default_factory=dict)
security_context: Dict[str, Any] = field(default_factory=dict)
task_files: Dict[str, str] = field(default_factory=dict)
git_integration: Dict[str, Any] = field(default_factory=dict)
performance_metrics: Dict[str, float] = field(default_factory=dict)
@dataclass
class MockSecurityContext:
"""Mock security context for testing until real implementation is available."""
session_encryption_key: bytes
agent_state_encryption: bytes
audit_signing_key: bytes
filesystem_boundaries: List[str]
resource_limits: Dict[str, Any]
permission_model: Dict[str, Any] = field(default_factory=dict)
@dataclass
class MockClaudeConfig:
"""Mock Claude Code configuration for testing."""
model: str = "sonnet-3.5"
no_color: bool = True
skip_permissions: bool = False
verbose: bool = False
output_format: str = "text"
working_directory: Optional[str] = None
custom_commands: List[str] = field(default_factory=list)
resource_limits: Dict[str, Any] = field(default_factory=dict)
# ============================================================================
# Agent State Fixtures
# ============================================================================
@pytest.fixture
def basic_agent_state() -> MockAgentState:
"""Provide basic agent state for simple testing."""
return MockAgentState(
agent_id="agent_test_001",
session_id="session_test_001",
name="Agent_1",
status="ACTIVE",
specialization="ADDER+ Implementation",
iterm_tab_id="tab_001",
process_id=12345,
resource_usage={
"memory_mb": 256.0,
"cpu_percent": 15.5,
"files_open": 10
},
claude_config={
"model": "sonnet-3.5",
"no_color": True,
"verbose": False
}
)
@pytest.fixture
def idle_agent_state() -> MockAgentState:
"""Provide idle agent state for testing inactive agents."""
return MockAgentState(
agent_id="agent_test_002",
session_id="session_test_001",
name="Agent_2",
status="IDLE",
specialization="Testing & Validation",
iterm_tab_id=None,
process_id=None,
resource_usage={
"memory_mb": 0.0,
"cpu_percent": 0.0,
"files_open": 0
}
)
@pytest.fixture
def error_agent_state() -> MockAgentState:
"""Provide agent state in error condition for testing error handling."""
return MockAgentState(
agent_id="agent_test_003",
session_id="session_test_001",
name="Agent_3",
status="ERROR",
specialization="Security Testing",
iterm_tab_id="tab_003",
process_id=12347,
last_heartbeat=(datetime.now() - timedelta(minutes=30)).isoformat(),
resource_usage={
"memory_mb": 512.0,
"cpu_percent": 95.0,
"files_open": 1000
}
)
@pytest.fixture
def multiple_agent_states() -> List[MockAgentState]:
"""Provide multiple agent states for concurrent testing."""
agents = []
for i in range(1, 9): # Create 8 agents
status = "ACTIVE" if i % 2 == 1 else "IDLE"
specialization = [
"ADDER+ Implementation", "Security Testing", "Performance Optimization",
"Documentation", "Code Review", "Integration Testing",
"Bug Fixing", "Architecture Design"
][i - 1]
agent = MockAgentState(
agent_id=f"agent_test_{i:03d}",
session_id="session_test_001",
name=f"Agent_{i}",
status=status,
specialization=specialization,
iterm_tab_id=f"tab_{i:03d}" if status == "ACTIVE" else None,
process_id=12340 + i if status == "ACTIVE" else None,
resource_usage={
"memory_mb": 128.0 + (i * 32),
"cpu_percent": 5.0 + (i * 2.5),
"files_open": 5 + i
}
)
agents.append(agent)
return agents
@pytest.fixture
def agent_with_conversation_history() -> MockAgentState:
"""Provide agent with conversation history for testing message handling."""
conversation = [
{
"role": "system",
"content": "You are Agent_1\n\n# ELITE CODE AGENT: ADDER+...",
"timestamp": (datetime.now() - timedelta(hours=1)).isoformat()
},
{
"role": "user",
"content": "Please analyze the TODO.md file and assign yourself the next available task.",
"timestamp": (datetime.now() - timedelta(minutes=30)).isoformat()
},
{
"role": "assistant",
"content": "I'll analyze the TODO.md file and assign myself to TASK_2...",
"timestamp": (datetime.now() - timedelta(minutes=25)).isoformat()
},
{
"role": "user",
"content": "Great! Please implement the security framework with comprehensive contracts.",
"timestamp": (datetime.now() - timedelta(minutes=5)).isoformat()
}
]
return MockAgentState(
agent_id="agent_test_004",
session_id="session_test_001",
name="Agent_4",
status="ACTIVE",
specialization="Security Framework Implementation",
conversation_history=conversation,
iterm_tab_id="tab_004",
process_id=12348
)
# ============================================================================
# Session State Fixtures
# ============================================================================
@pytest.fixture
def basic_session_state(temp_session_root: Path) -> MockSessionState:
"""Provide basic session state for simple testing."""
agent = MockAgentState(
agent_id="agent_test_001",
session_id="session_test_001",
name="Agent_1",
status="ACTIVE"
)
return MockSessionState(
session_id="session_test_001",
name="Test Session",
root_path=str(temp_session_root),
agents={"agent_test_001": agent},
security_context={
"encryption_enabled": True,
"audit_level": "INFO",
"max_agents": 8
},
task_files={
"TODO.md": str(temp_session_root / "development" / "TODO.md"),
"TASK_1.md": str(temp_session_root / "development" / "tasks" / "TASK_1.md")
},
performance_metrics={
"total_agents_created": 1,
"active_agents": 1,
"average_response_time": 1.5,
"error_count": 0
}
)
@pytest.fixture
def multi_agent_session_state(temp_session_root: Path, multiple_agent_states: List[MockAgentState]) -> MockSessionState:
"""Provide session state with multiple agents for concurrency testing."""
agents_dict = {agent.agent_id: agent for agent in multiple_agent_states}
return MockSessionState(
session_id="session_test_001",
name="Multi-Agent Test Session",
root_path=str(temp_session_root),
agents=agents_dict,
security_context={
"encryption_enabled": True,
"audit_level": "DEBUG",
"max_agents": 16,
"require_authentication": True
},
performance_metrics={
"total_agents_created": len(multiple_agent_states),
"active_agents": len([a for a in multiple_agent_states if a.status == "ACTIVE"]),
"average_response_time": 2.1,
"error_count": 1
}
)
@pytest.fixture
def empty_session_state(temp_session_root: Path) -> MockSessionState:
"""Provide empty session state for testing session initialization."""
return MockSessionState(
session_id="session_test_empty",
name="Empty Test Session",
root_path=str(temp_session_root),
agents={},
performance_metrics={
"total_agents_created": 0,
"active_agents": 0,
"average_response_time": 0.0,
"error_count": 0
}
)
# ============================================================================
# Security Context Fixtures
# ============================================================================
@pytest.fixture
def basic_security_context(temp_session_root: Path) -> MockSecurityContext:
"""Provide basic security context for testing."""
return MockSecurityContext(
session_encryption_key=secrets.token_bytes(32),
agent_state_encryption=secrets.token_bytes(32),
audit_signing_key=secrets.token_bytes(32),
filesystem_boundaries=[
str(temp_session_root),
str(temp_session_root / "development"),
str(temp_session_root / "src")
],
resource_limits={
"max_memory_mb": 512,
"max_cpu_percent": 25.0,
"max_processes": 1,
"max_files": 1000
},
permission_model={
"read_access": ["development", "src", "tests"],
"write_access": ["development/tasks", "logs"],
"execute_access": ["scripts"],
"restricted_paths": [".git", ".claude_session"]
}
)
@pytest.fixture
def high_security_context(temp_session_root: Path) -> MockSecurityContext:
"""Provide high security context for security testing."""
return MockSecurityContext(
session_encryption_key=secrets.token_bytes(32),
agent_state_encryption=secrets.token_bytes(32),
audit_signing_key=secrets.token_bytes(32),
filesystem_boundaries=[str(temp_session_root / "development")], # Restricted access
resource_limits={
"max_memory_mb": 256,
"max_cpu_percent": 10.0,
"max_processes": 1,
"max_files": 100
},
permission_model={
"read_access": ["development/protocols"],
"write_access": ["development/tasks"],
"execute_access": [],
"restricted_paths": [".git", ".claude_session", "src", "tests"],
"audit_all_operations": True,
"require_signed_requests": True
}
)
# ============================================================================
# Claude Code Configuration Fixtures
# ============================================================================
@pytest.fixture
def default_claude_config() -> MockClaudeConfig:
"""Provide default Claude Code configuration."""
return MockClaudeConfig(
model="sonnet-3.5",
no_color=True,
skip_permissions=False,
verbose=False,
output_format="text",
working_directory=None,
custom_commands=[],
resource_limits={
"memory_mb": 512,
"timeout_seconds": 300,
"max_files": 1000
}
)
@pytest.fixture
def performance_claude_config() -> MockClaudeConfig:
"""Provide Claude Code configuration optimized for performance."""
return MockClaudeConfig(
model="haiku-3", # Faster model
no_color=True,
skip_permissions=True, # Skip for speed
verbose=False,
output_format="json",
custom_commands=["--fast", "--optimize"],
resource_limits={
"memory_mb": 256,
"timeout_seconds": 60,
"max_files": 500
}
)
@pytest.fixture
def development_claude_config(temp_session_root: Path) -> MockClaudeConfig:
"""Provide Claude Code configuration for development testing."""
return MockClaudeConfig(
model="sonnet-3.5",
no_color=False, # Colors for development
skip_permissions=False,
verbose=True, # Verbose for debugging
output_format="text",
working_directory=str(temp_session_root),
custom_commands=["--debug", "--trace"],
resource_limits={
"memory_mb": 1024,
"timeout_seconds": 600,
"max_files": 2000
}
)
# ============================================================================
# MCP Tool Request/Response Fixtures
# ============================================================================
@pytest.fixture
def create_agent_request() -> Dict[str, Any]:
"""Provide create_agent MCP tool request."""
return {
"tool_name": "create_agent",
"parameters": {
"session_id": "session_test_001",
"agent_name": "Agent_5",
"specialization": "Testing Framework",
"system_prompt_suffix": "Focus on comprehensive testing and validation.",
"claude_config": {
"model": "sonnet-3.5",
"verbose": True
}
},
"context": {
"client_id": "claude_desktop_test",
"request_id": str(uuid.uuid4()),
"timestamp": datetime.now().isoformat()
}
}
@pytest.fixture
def create_agent_response() -> Dict[str, Any]:
"""Provide create_agent MCP tool response."""
return {
"success": True,
"agent_id": "agent_test_005",
"agent_name": "Agent_5",
"iterm_tab_id": "tab_005",
"process_id": 12349,
"session_id": "session_test_001",
"specialization": "Testing Framework",
"status": "ACTIVE",
"created_at": datetime.now().isoformat()
}
@pytest.fixture
def send_message_request() -> Dict[str, Any]:
"""Provide send_message_to_agent MCP tool request."""
return {
"tool_name": "send_message_to_agent",
"parameters": {
"agent_name": "Agent_1",
"message": "Please analyze the current task status and provide recommendations.",
"prepend_adder": True,
"wait_for_response": False
},
"context": {
"client_id": "claude_desktop_test",
"request_id": str(uuid.uuid4()),
"timestamp": datetime.now().isoformat()
}
}
@pytest.fixture
def get_session_status_response() -> Dict[str, Any]:
"""Provide get_session_status MCP tool response."""
return {
"session_id": "session_test_001",
"session_name": "Test Session",
"status": "ACTIVE",
"agents": [
{
"agent_id": "agent_test_001",
"name": "Agent_1",
"status": "ACTIVE",
"specialization": "ADDER+ Implementation",
"last_heartbeat": datetime.now().isoformat(),
"resource_usage": {
"memory_mb": 256.0,
"cpu_percent": 15.5
}
},
{
"agent_id": "agent_test_002",
"name": "Agent_2",
"status": "IDLE",
"specialization": "Testing & Validation",
"last_heartbeat": (datetime.now() - timedelta(minutes=10)).isoformat(),
"resource_usage": {
"memory_mb": 0.0,
"cpu_percent": 0.0
}
}
],
"performance_metrics": {
"total_agents": 2,
"active_agents": 1,
"average_response_time": 1.8,
"uptime_hours": 2.5
},
"system_health": {
"memory_usage_percent": 45.2,
"cpu_usage_percent": 23.1,
"disk_usage_percent": 15.8
}
}
# ============================================================================
# File System and Task File Fixtures
# ============================================================================
@pytest.fixture
def sample_todo_file_content() -> str:
"""Provide comprehensive TODO.md content for testing."""
return '''# Project Task Management Dashboard
**Project**: Agent Orchestration Platform - Claude Code MCP Server
**Last Updated**: 2025-06-26 by Adder_1
**Overall Progress**: 3/13 tasks complete
## Task Status Overview
- **NOT_STARTED**: 8 π΄
- **IN_PROGRESS**: 2 π‘
- **REVIEW_READY**: 0 π
- **COMPLETE**: 3 β
- **BLOCKED**: 0 β
## Current Assignments
| Task | Status | Agent | Priority | Dependencies |
|------|--------|-------|----------|--------------|
| TASK_1 | COMPLETE | Adder_3 | HIGH | None |
| TASK_2 | COMPLETE | Adder_4 | HIGH | None |
| TASK_3 | COMPLETE | Adder_2 | HIGH | TASK_1, TASK_2 |
| TASK_4 | IN_PROGRESS | Adder_1 | HIGH | TASK_3 |
| TASK_5 | IN_PROGRESS | Adder_5 | MEDIUM | TASK_4 |
| TASK_6 | NOT_STARTED | Unassigned | MEDIUM | TASK_4 |
| TASK_7 | NOT_STARTED | Unassigned | MEDIUM | TASK_4 |
| TASK_8 | NOT_STARTED | Unassigned | MEDIUM | TASK_4 |
| TASK_9 | NOT_STARTED | Unassigned | MEDIUM | TASK_4 |
| TASK_10 | NOT_STARTED | Unassigned | MEDIUM | TASK_4 |
| TASK_11 | NOT_STARTED | Unassigned | MEDIUM | TASK_4 |
| TASK_12 | NOT_STARTED | Unassigned | LOW | TASK_5-11 |
| TASK_13 | NOT_STARTED | Unassigned | HIGH | None |
## Architecture Overview
The system consists of:
- **8 Core MCP Tools**: Agent and session management via FastMCP
- **iTerm2 Integration**: Tab-based agent orchestration
- **Claude Code Management**: Process spawning and message injection
- **Security Model**: Maximum isolation with encrypted state persistence
- **ADDER+ Integration**: Comprehensive system prompt prepending
## Next Priorities
1. **TASK_4**: Agent & Session Management - Core infrastructure
2. **TASK_5**: create_agent MCP Tool - Essential functionality
3. **TASK_13**: Testing Infrastructure - Quality assurance
'''
@pytest.fixture
def sample_task_file_content() -> str:
"""Provide sample TASK_X.md content for testing."""
return '''# TASK_4: Agent & Session Management Core
**Created By**: OUTLINER | **Priority**: HIGH | **Duration**: 6 hours
**Technique Focus**: Contracts + Defensive Programming + Types + Testing
**Size Constraint**: Target <250 lines/module, Max 400 if splitting awkward
## π¦ Status & Assignment
**Status**: IN_PROGRESS
**Assigned**: Adder_1
**Dependencies**: TASK_3 (FastMCP Server Foundation)
**Blocking**: TASK_5-11 (All MCP tools)
## β
Implementation Subtasks (Sequential completion)
### Phase 1: Core Management Infrastructure
- [x] **Subtask 1.1**: Design agent lifecycle state machine with contracts
- [x] **Subtask 1.2**: Implement session management with security boundaries
- [ ] **Subtask 1.3**: Create iTerm2 integration manager with health monitoring
- [ ] **Subtask 1.4**: Add Claude Code process orchestration
### Phase 2: State Persistence & Recovery
- [ ] **Subtask 2.1**: Implement encrypted state storage with rollback
- [ ] **Subtask 2.2**: Add session recovery and consistency checking
- [ ] **Subtask 2.3**: Create audit trail with cryptographic signatures
- [ ] **Subtask 2.4**: Implement health monitoring and auto-restart
## Success Criteria
- [ ] Complete agent lifecycle management with state persistence
- [ ] Session management with security boundary enforcement
- [ ] iTerm2 integration with health monitoring and recovery
- [ ] Comprehensive testing with property-based scenarios
'''
@pytest.fixture
def task_file_directory_structure(temp_session_root: Path, sample_todo_file_content: str, sample_task_file_content: str) -> Dict[str, Path]:
"""Create complete task file directory structure with content."""
# Create TODO.md
todo_path = temp_session_root / "development" / "TODO.md"
todo_path.write_text(sample_todo_file_content)
# Create sample task files
task_files = {}
for i in range(1, 6): # Create TASK_1 through TASK_5
task_path = temp_session_root / "development" / "tasks" / f"TASK_{i}.md"
task_content = sample_task_file_content.replace("TASK_4", f"TASK_{i}")
task_path.write_text(task_content)
task_files[f"TASK_{i}"] = task_path
# Create protocol files
protocol_dir = temp_session_root / "development" / "protocols"
fastmcp_protocol = protocol_dir / "FASTMCP_PYTHON_PROTOCOL.md"
fastmcp_protocol.write_text("# FastMCP Python Protocol\n\nComprehensive protocol documentation...")
iterm_protocol = protocol_dir / "iterm_protocol.md"
iterm_protocol.write_text("# iTerm2 Integration Protocol\n\nDetailed iTerm2 integration instructions...")
return {
"todo": todo_path,
"tasks": task_files,
"protocols": {
"fastmcp": fastmcp_protocol,
"iterm": iterm_protocol
}
}
# ============================================================================
# Audit and Logging Fixtures
# ============================================================================
@pytest.fixture
def sample_audit_entries() -> List[Dict[str, Any]]:
"""Provide sample audit log entries for testing."""
base_time = datetime.now() - timedelta(hours=1)
return [
{
"timestamp": base_time.isoformat(),
"event_type": "session_created",
"session_id": "session_test_001",
"details": {
"session_name": "Test Session",
"root_path": "/test/session",
"created_by": "system"
},
"security_level": "INFO",
"signature": "mock_signature_1"
},
{
"timestamp": (base_time + timedelta(minutes=5)).isoformat(),
"event_type": "agent_created",
"session_id": "session_test_001",
"agent_id": "agent_test_001",
"details": {
"agent_name": "Agent_1",
"specialization": "ADDER+ Implementation",
"iterm_tab_id": "tab_001"
},
"security_level": "INFO",
"signature": "mock_signature_2"
},
{
"timestamp": (base_time + timedelta(minutes=10)).isoformat(),
"event_type": "message_sent",
"session_id": "session_test_001",
"agent_id": "agent_test_001",
"details": {
"message_length": 156,
"prepend_adder": True,
"response_received": True
},
"security_level": "DEBUG",
"signature": "mock_signature_3"
},
{
"timestamp": (base_time + timedelta(minutes=30)).isoformat(),
"event_type": "security_violation_attempt",
"session_id": "session_test_001",
"details": {
"violation_type": "path_traversal",
"attempted_path": "../../../etc/passwd",
"blocked": True,
"source_ip": "127.0.0.1"
},
"security_level": "WARNING",
"signature": "mock_signature_4"
},
{
"timestamp": (base_time + timedelta(minutes=45)).isoformat(),
"event_type": "agent_error",
"session_id": "session_test_001",
"agent_id": "agent_test_003",
"details": {
"error_type": "ResourceExhausted",
"error_message": "Memory limit exceeded",
"recovery_action": "agent_restart_scheduled"
},
"security_level": "ERROR",
"signature": "mock_signature_5"
}
]
# ============================================================================
# Error and Exception Fixtures
# ============================================================================
@pytest.fixture
def validation_error_scenarios() -> List[Dict[str, Any]]:
"""Provide validation error scenarios for testing error handling."""
return [
{
"scenario": "invalid_agent_name",
"input": {"agent_name": "invalid_name"},
"expected_error": "ValidationError",
"expected_message": "Agent name must follow 'Agent_#' format"
},
{
"scenario": "missing_session_id",
"input": {"agent_name": "Agent_1"},
"expected_error": "ValidationError",
"expected_message": "session_id is required"
},
{
"scenario": "invalid_session_path",
"input": {
"session_id": "session_test_001",
"root_path": "../../../etc/passwd"
},
"expected_error": "SecurityError",
"expected_message": "Invalid session path"
},
{
"scenario": "resource_limit_exceeded",
"input": {
"memory_limit": 99999,
"cpu_limit": 200.0
},
"expected_error": "ResourceError",
"expected_message": "Resource limits exceeded"
},
{
"scenario": "malicious_input",
"input": {
"specialization": "<script>alert('xss')</script>"
},
"expected_error": "SecurityError",
"expected_message": "Malicious input detected"
}
]
# Export all fixtures for easy importing
__all__ = [
# Agent fixtures
'basic_agent_state', 'idle_agent_state', 'error_agent_state',
'multiple_agent_states', 'agent_with_conversation_history',
# Session fixtures
'basic_session_state', 'multi_agent_session_state', 'empty_session_state',
# Security fixtures
'basic_security_context', 'high_security_context',
# Configuration fixtures
'default_claude_config', 'performance_claude_config', 'development_claude_config',
# MCP tool fixtures
'create_agent_request', 'create_agent_response', 'send_message_request',
'get_session_status_response',
# File system fixtures
'sample_todo_file_content', 'sample_task_file_content', 'task_file_directory_structure',
# Audit and logging fixtures
'sample_audit_entries',
# Error testing fixtures
'validation_error_scenarios'
]
# Aliases for backwards compatibility
create_test_agent_state = basic_agent_state
create_test_security_context = basic_security_context