"""
End-to-end integration tests for Percepta MCP server.
Tests the complete workflow including browser automation, test generation, and DevTools analysis.
"""
import asyncio
import json
import pytest
import tempfile
from pathlib import Path
from unittest.mock import AsyncMock, Mock, patch
from typing import Any
from src.percepta_mcp.server import PerceptaMCPServer
from src.percepta_mcp.config import Settings, AIProviderConfig, BrowserConfig
@pytest.fixture
def integration_settings():
"""Create settings for integration testing."""
return Settings(
ai_providers=[
AIProviderConfig(
name="test-provider",
type="openai",
api_key="test-key",
model="gpt-3.5-turbo",
priority=1,
enabled=True
)
],
default_provider="test-provider",
browser=BrowserConfig(
headless=True,
timeout=30000,
viewport_width=1920,
viewport_height=1080
)
)
@pytest.fixture
async def mcp_server(integration_settings):
"""Create MCP server instance for testing."""
with patch('src.percepta_mcp.server.get_ai_router') as mock_router:
mock_ai_router = AsyncMock()
mock_router.return_value = mock_ai_router
server = PerceptaMCPServer(integration_settings)
yield server, mock_ai_router
try:
await server.stop()
except Exception:
pass
class TestEndToEndIntegration:
"""End-to-end integration tests."""
@pytest.mark.asyncio
async def test_complete_page_analysis_workflow(self, mcp_server):
"""Test complete workflow: navigate -> monitor -> analyze -> generate report."""
server, mock_ai_router = mcp_server
# Mock browser responses
with patch.object(server.browser_automation, 'navigate') as mock_navigate, \
patch.object(server.browser_automation, 'evaluate_script') as mock_script:
# Setup navigation success
mock_navigate.return_value = {"success": True, "url": "https://example.com"}
# Setup script evaluation for DevTools monitoring
mock_script.return_value = { # Always return data collection result
"success": True,
"data": {
"consoleLogs": [
{"type": "error", "message": "JS Error", "timestamp": 123456},
{"type": "log", "message": "Page loaded", "timestamp": 123457}
],
"networkRequests": [
{"url": "https://example.com/api", "status": 500, "duration": 100},
{"url": "https://example.com/asset", "status": 200, "duration": 50}
],
"performanceMetrics": {
"domContentLoaded": 4000, # Slow
"loadComplete": 6000, # Slow
"failedResources": 1
}
}
}
# Step 1: Start DevTools monitoring
monitor_result = await server._execute_tool("start_devtools_monitoring", {
"url": "https://example.com"
})
assert monitor_result["success"] is True
assert monitor_result["monitoring_active"] is True
# Step 2: Collect DevTools data
data_result = await server._execute_tool("collect_devtools_data", {})
assert data_result["success"] is True
assert data_result["data"]["total_console_entries"] == 2
assert data_result["data"]["total_network_requests"] == 2
# Step 3: Analyze anomalies
anomaly_result = await server._execute_tool("analyze_devtools_anomalies", {})
assert anomaly_result["success"] is True
anomalies = anomaly_result["anomalies"]
# Verify anomalies were detected
anomaly_types = [a["type"] for a in anomalies]
assert "console_errors" in anomaly_types
assert "network_failures" in anomaly_types
assert "slow_dom_load" in anomaly_types
assert "slow_page_load" in anomaly_types
# Step 4: Generate comprehensive report
report_result = await server._execute_tool("generate_devtools_report", {
"include_recommendations": True
})
assert report_result["success"] is True
report = report_result["report"]
# Verify report structure
assert "summary" in report
assert "anomalies" in report
assert "recommendations" in report
assert report["summary"]["overall_health"] == "poor" # Due to multiple issues
# Verify recommendations were generated
recommendations = report["recommendations"]
assert len(recommendations) > 0
categories = [rec["category"] for rec in recommendations]
assert "JavaScript Errors" in categories
assert "Network Issues" in categories
assert "Performance" in categories
@pytest.mark.asyncio
async def test_test_generation_and_execution_workflow(self, mcp_server):
"""Test complete test generation and execution workflow."""
server, mock_ai_router = mcp_server
# Mock AI response for test generation
from src.percepta_mcp.ai_router import AIResponse
test_script = """
import pytest
from playwright.async_api import async_playwright
@pytest.mark.asyncio
async def test_example_page():
async with async_playwright() as p:
browser = await p.chromium.launch()
page = await browser.new_page()
await page.goto("https://example.com")
assert "Example" in await page.title()
await browser.close()
"""
mock_ai_response = AIResponse(
content=test_script,
provider="test-provider",
model="gpt-3.5-turbo",
tokens_used=200,
cost=0.002,
response_time=1.5,
error=None
)
mock_ai_router.generate.return_value = mock_ai_response
# Mock browser responses
with patch.object(server.browser_automation, 'navigate') as mock_navigate, \
patch.object(server.browser_automation, 'get_page_info') as mock_page_info, \
patch.object(server.browser_automation, 'extract_text') as mock_extract:
mock_navigate.return_value = {"success": True}
mock_page_info.return_value = {"title": "Example Page", "url": "https://example.com"}
mock_extract.return_value = "Welcome to Example Page"
# Step 1: Generate test case
generation_result = await server._execute_tool("generate_test_case", {
"url": "https://example.com",
"description": "Test navigation to example page",
"test_type": "navigation"
})
assert generation_result["success"] is True
test_case = generation_result["test_case"]
assert test_case["url"] == "https://example.com"
assert test_case["type"] == "navigation"
assert "import pytest" in test_case["script"]
# Step 2: Mock test execution
with patch('asyncio.create_subprocess_shell') as mock_subprocess:
mock_process = AsyncMock()
mock_process.communicate.return_value = (
b"test_example_page PASSED [100%]\n1 passed in 2.34s",
b""
)
mock_process.returncode = 0
mock_subprocess.return_value = mock_process
execution_result = await server._execute_tool("execute_test_case", {
"test_case_path": test_case["path"]
})
assert execution_result["success"] is True
assert execution_result["exit_code"] == 0
assert "PASSED" in execution_result["output"]
@pytest.mark.asyncio
async def test_browser_automation_chain(self, mcp_server):
"""Test chained browser automation operations."""
server, mock_ai_router = mcp_server
# Mock browser operations
with patch.object(server.browser_automation, 'navigate') as mock_navigate, \
patch.object(server.browser_automation, 'click') as mock_click, \
patch.object(server.browser_automation, 'fill') as mock_fill, \
patch.object(server.browser_automation, 'screenshot') as mock_screenshot, \
patch.object(server.browser_automation, 'extract_text') as mock_extract:
# Setup mock responses
mock_navigate.return_value = {"success": True, "url": "https://example.com/login"}
mock_fill.return_value = {"success": True, "value": "test_user"}
mock_click.return_value = {"success": True, "element": "button#login"}
mock_screenshot.return_value = {"success": True, "screenshot": "base64_image_data"}
mock_extract.return_value = "Welcome, test_user!"
# Step 1: Navigate to login page
nav_result = await server._execute_tool("browser_navigate", {
"url": "https://example.com/login"
})
assert nav_result["success"] is True
# Step 2: Fill username field
fill_result = await server._execute_tool("browser_fill", {
"selector": "#username",
"value": "test_user"
})
assert fill_result["success"] is True
# Step 3: Fill password field
pass_result = await server._execute_tool("browser_fill", {
"selector": "#password",
"value": "test_password"
})
assert pass_result["success"] is True
# Step 4: Click login button
click_result = await server._execute_tool("browser_click", {
"selector": "button#login"
})
assert click_result["success"] is True
# Step 5: Take screenshot
screenshot_result = await server._execute_tool("browser_screenshot", {
"full_page": True
})
assert screenshot_result["success"] is True
assert "screenshot" in screenshot_result
# Step 6: Extract welcome message
extract_result = await server._execute_tool("browser_extract_text", {
"selector": ".welcome-message"
})
assert "Welcome, test_user!" in extract_result
@pytest.mark.asyncio
async def test_ai_analysis_integration(self, mcp_server):
"""Test AI analysis tools integration."""
server, mock_ai_router = mcp_server
# Mock AI responses
from src.percepta_mcp.ai_router import AIResponse
analysis_response = AIResponse(
content="The page appears to be a standard login form with good usability.",
provider="test-provider",
model="gpt-3.5-turbo",
tokens_used=50,
cost=0.001,
response_time=0.8,
error=None
)
generation_response = AIResponse(
content="Here's a user-friendly error message: 'Please check your credentials and try again.'",
provider="test-provider",
model="gpt-3.5-turbo",
tokens_used=30,
cost=0.0005,
response_time=0.6,
error=None
)
chat_response = AIResponse(
content="I can help you analyze web pages and generate automated tests. What would you like to test?",
provider="test-provider",
model="gpt-3.5-turbo",
tokens_used=25,
cost=0.0003,
response_time=0.5,
error=None
)
mock_ai_router.generate.side_effect = [analysis_response, generation_response, chat_response]
# Test AI analysis
analysis_result = await server._execute_tool("ai_analyze", {
"content": "Login form HTML content",
"task": "Analyze the usability of this login form"
})
assert analysis_result["analysis"] == analysis_response.content
assert analysis_result["provider"] == "test-provider"
# Test AI generation
generation_result = await server._execute_tool("ai_generate", {
"prompt": "Generate a user-friendly error message for failed login"
})
assert generation_result["content"] == generation_response.content
# Test AI chat
chat_result = await server._execute_tool("ai_chat", {
"message": "Hello, what can you help me with?",
"conversation_id": "test-session"
})
assert chat_result["reply"] == chat_response.content
@pytest.mark.asyncio
async def test_web_scraping_integration(self, mcp_server):
"""Test web scraping tools integration."""
server, mock_ai_router = mcp_server
# Mock web scraper responses
with patch.object(server.web_scraper, 'scrape') as mock_scrape:
mock_scrape.side_effect = [
{ # Text extraction
"success": True,
"data": "Sample page content with important information",
"url": "https://example.com",
"title": "Example Page"
},
{ # Structured data extraction
"success": True,
"data": {
"json_ld": [{"@type": "WebPage", "name": "Example"}],
"microdata": [{"type": "Product", "name": "Sample Product"}]
},
"url": "https://example.com"
}
]
# Test web scraping
scrape_result = await server._execute_tool("scrape_website", {
"url": "https://example.com",
"selectors": [".content"],
"max_pages": 1
})
assert scrape_result["success"] is True
assert "Sample page content" in scrape_result["data"]
# Test structured data extraction
structured_result = await server._execute_tool("extract_structured_data", {
"url": "https://example.com"
})
assert structured_result["success"] is True
assert "json_ld" in structured_result["data"]
assert "microdata" in structured_result["data"]
@pytest.mark.asyncio
async def test_error_handling_and_recovery(self, mcp_server):
"""Test error handling and recovery mechanisms."""
server, mock_ai_router = mcp_server
# Test browser automation error handling
with patch.object(server.browser_automation, 'navigate') as mock_navigate:
mock_navigate.side_effect = Exception("Network timeout")
with pytest.raises(Exception, match="Network timeout"):
await server._execute_tool("browser_navigate", {
"url": "https://unreachable.com"
})
# Test AI router error handling
from src.percepta_mcp.ai_router import AIResponse
error_response = AIResponse(
content="",
provider="test-provider",
model="gpt-3.5-turbo",
tokens_used=0,
cost=0.0,
response_time=0.0,
error="API rate limit exceeded"
)
mock_ai_router.generate.return_value = error_response
analysis_result = await server._execute_tool("ai_analyze", {
"content": "Test content",
"task": "Analyze this content"
})
assert analysis_result["error"] == "API rate limit exceeded"
# Test unknown tool handling
with pytest.raises(ValueError, match="Unknown tool"):
await server._execute_tool("nonexistent_tool", {})
@pytest.mark.asyncio
async def test_concurrent_operations(self, mcp_server):
"""Test concurrent tool execution."""
server, mock_ai_router = mcp_server
# Mock multiple operations
with patch.object(server.browser_automation, 'extract_text') as mock_extract, \
patch.object(server.web_scraper, 'scrape') as mock_scrape:
mock_extract.return_value = "Browser extracted text"
mock_scrape.return_value = {
"success": True,
"data": "Scraper extracted content",
"url": "https://example.com"
}
# Execute multiple operations concurrently
tasks = [
server._execute_tool("browser_extract_text", {"selector": ".content"}),
server._execute_tool("scrape_website", {"url": "https://example.com"}),
server._execute_tool("browser_extract_text", {"selector": ".header"}),
]
results = await asyncio.gather(*tasks, return_exceptions=True)
# Verify all operations completed
assert len(results) == 3
assert all(not isinstance(r, Exception) for r in results)
assert "Browser extracted text" in str(results[0])
assert results[1]["success"] is True
@pytest.mark.asyncio
async def test_performance_monitoring(self, mcp_server):
"""Test performance monitoring capabilities."""
server, mock_ai_router = mcp_server
# Mock DevTools monitoring with performance data
with patch.object(server.devtools_analyzer, 'monitor_and_analyze') as mock_monitor:
mock_monitor.return_value = {
"success": True,
"monitoring_duration": 30,
"url": "https://example.com",
"report": {
"summary": {
"overall_health": "fair",
"total_anomalies": 3,
"high_severity": 1,
"medium_severity": 2
},
"anomalies": [
{
"type": "slow_page_load",
"severity": "medium",
"value": 6000,
"description": "Page load completed slowly: 6000ms"
},
{
"type": "failed_resources",
"severity": "high",
"count": 2,
"description": "Found 2 failed resource loads"
}
],
"recommendations": [
{
"category": "Performance",
"priority": "medium",
"suggestion": "Optimize page load performance"
}
]
}
}
# Execute performance monitoring
result = await server._execute_tool("monitor_and_analyze_page", {
"url": "https://example.com",
"duration_seconds": 30,
"include_performance": True
})
assert result["success"] is True
assert result["monitoring_duration"] == 30
report = result["report"]
assert report["summary"]["overall_health"] == "fair"
assert len(report["anomalies"]) == 2
assert len(report["recommendations"]) == 1
# Verify performance metrics are captured
anomaly_types = [a["type"] for a in report["anomalies"]]
assert "slow_page_load" in anomaly_types
assert "failed_resources" in anomaly_types