"""
Test Configuration and Runners for Type System - Agent Orchestration Platform
Comprehensive test configuration with property-based testing settings,
coverage requirements, and performance benchmarks.
Author: Adder_3 | Created: 2025-06-26 | Last Modified: 2025-06-26
"""
import pytest
import sys
from pathlib import Path
# Add src directory to path for imports
sys.path.insert(0, str(Path(__file__).parent.parent.parent / "src"))
# Hypothesis configuration for property-based testing
from hypothesis import settings, Verbosity
# Configure Hypothesis for thorough testing
settings.register_profile("default", max_examples=100, deadline=None)
settings.register_profile("thorough", max_examples=1000, deadline=None, verbosity=Verbosity.verbose)
settings.register_profile("quick", max_examples=10, deadline=None)
settings.register_profile("ci", max_examples=200, deadline=5000) # 5 second deadline for CI
# Load profile based on environment
import os
profile = os.environ.get("HYPOTHESIS_PROFILE", "default")
settings.load_profile(profile)
# Pytest configuration
def pytest_configure(config):
"""Configure pytest with custom markers and settings."""
config.addinivalue_line(
"markers",
"property: mark test as property-based test using Hypothesis"
)
config.addinivalue_line(
"markers",
"security: mark test as security-focused test"
)
config.addinivalue_line(
"markers",
"performance: mark test as performance/benchmark test"
)
config.addinivalue_line(
"markers",
"integration: mark test as integration test"
)
# Test collection configuration
def pytest_collection_modifyitems(config, items):
"""Modify test collection to add markers automatically."""
for item in items:
# Mark property-based tests
if "properties" in item.nodeid or "property" in item.name.lower():
item.add_marker(pytest.mark.property)
# Mark security tests
if "security" in item.nodeid or "security" in item.name.lower():
item.add_marker(pytest.mark.security)
# Mark performance tests
if "performance" in item.nodeid or "benchmark" in item.name.lower():
item.add_marker(pytest.mark.performance)
# Fixtures for testing
@pytest.fixture(scope="session")
def test_data_directory():
"""Provide test data directory path."""
return Path(__file__).parent / "data"
@pytest.fixture
def sample_agent_id():
"""Provide sample agent ID for testing."""
from src.models.ids import create_agent_id
return create_agent_id()
@pytest.fixture
def sample_session_id():
"""Provide sample session ID for testing."""
from src.models.ids import create_session_id
return create_session_id()
@pytest.fixture
def sample_security_context():
"""Provide sample security context for testing."""
from src.models.security import SecurityContext, Permission, SecurityLevel
from datetime import datetime, timedelta
return SecurityContext(
user_id="test_user",
permissions={Permission.READ_AGENT_STATUS, Permission.CREATE_AGENT},
session_permissions={},
token_issued_at=datetime.utcnow() - timedelta(minutes=5),
token_expires_at=datetime.utcnow() + timedelta(hours=1),
security_level=SecurityLevel.INTERNAL
)
@pytest.fixture
def sample_agent_state(sample_agent_id, sample_session_id):
"""Provide sample agent state for testing."""
from src.models.agent import AgentState, AgentStatus, AgentSpecialization, ClaudeConfig, ResourceMetrics
from datetime import datetime
return AgentState(
agent_id=sample_agent_id,
session_id=sample_session_id,
name="Agent_1",
process_id=None,
iterm_tab_id=None,
status=AgentStatus.CREATED,
specialization=AgentSpecialization.GENERAL,
system_prompt_suffix="",
claude_config=ClaudeConfig(),
created_at=datetime.utcnow(),
last_heartbeat=datetime.utcnow(),
resource_metrics=ResourceMetrics(
cpu_percent=0.0,
memory_mb=0,
file_descriptors=0,
uptime_seconds=0,
last_activity=datetime.utcnow()
)
)
# Test reporting hooks
def pytest_runtest_makereport(item, call):
"""Custom test reporting for enhanced output."""
if "property" in item.keywords:
# Enhanced reporting for property-based tests
if call.excinfo is not None:
# Add Hypothesis statistics to failure reports
pass
# Performance testing configuration
def pytest_benchmark_group_stats(config, benchmarks, group_by):
"""Group benchmark statistics for analysis."""
return group_by
if __name__ == "__main__":
# Run tests directly
pytest.main([
__file__.parent,
"-v",
"--tb=short",
"--durations=10"
])