"""
Tests for the DevTools analyzer module.
"""
import asyncio
import json
import pytest
from unittest.mock import AsyncMock, Mock, patch
from typing import Any
from src.percepta_mcp.tools.devtools_analyzer import DevToolsAnalyzer
from src.percepta_mcp.config import Settings
@pytest.fixture
def mock_settings():
"""Create mock settings."""
return Mock(spec=Settings)
@pytest.fixture
def mock_browser_automation():
"""Create mock browser automation."""
browser = AsyncMock()
browser.navigate = AsyncMock()
browser.evaluate_script = AsyncMock()
browser.page = Mock()
browser.page.url = "https://example.com"
return browser
@pytest.fixture
def devtools_analyzer(mock_settings, mock_browser_automation):
"""Create DevToolsAnalyzer instance."""
return DevToolsAnalyzer(mock_settings, mock_browser_automation)
@pytest.fixture
def sample_console_logs():
"""Sample console logs for testing."""
return [
{
"type": "log",
"message": "Page loaded successfully",
"timestamp": 1640995200000,
"url": "https://example.com"
},
{
"type": "error",
"message": "TypeError: Cannot read property 'foo' of undefined",
"timestamp": 1640995210000,
"url": "https://example.com"
},
{
"type": "warn",
"message": "Deprecated API usage detected",
"timestamp": 1640995220000,
"url": "https://example.com"
}
]
@pytest.fixture
def sample_network_logs():
"""Sample network logs for testing."""
return [
{
"url": "https://example.com/api/data",
"method": "GET",
"status": 200,
"statusText": "OK",
"duration": 150,
"timestamp": 1640995200000
},
{
"url": "https://example.com/api/slow",
"method": "GET",
"status": 200,
"statusText": "OK",
"duration": 6000, # Slow request
"timestamp": 1640995210000
},
{
"url": "https://example.com/api/error",
"method": "POST",
"status": 500,
"statusText": "Internal Server Error",
"duration": 100,
"timestamp": 1640995220000
}
]
@pytest.fixture
def sample_performance_metrics():
"""Sample performance metrics for testing."""
return {
"domContentLoaded": 2000,
"loadComplete": 4000,
"totalResources": 25,
"slowResources": 2,
"failedResources": 1,
"timestamp": 1640995200000
}
class TestDevToolsAnalyzer:
"""Test cases for DevToolsAnalyzer class."""
def test_init(self, mock_settings):
"""Test DevToolsAnalyzer initialization."""
analyzer = DevToolsAnalyzer(mock_settings)
assert analyzer.settings == mock_settings
assert analyzer.browser_automation is not None
assert analyzer.console_logs == []
assert analyzer.network_logs == []
assert analyzer.performance_metrics == {}
assert analyzer.anomalies == []
@pytest.mark.asyncio
async def test_start_monitoring_success(self, devtools_analyzer, mock_browser_automation):
"""Test successful monitoring startup."""
# Setup successful navigation
mock_browser_automation.navigate.return_value = {"success": True}
mock_browser_automation.evaluate_script.return_value = {"success": True}
# Execute test
result = await devtools_analyzer.start_monitoring("https://example.com")
# Verify results
assert result["success"] is True
assert result["monitoring_active"] is True
assert result["url"] == "https://example.com"
assert "timestamp" in result
# Verify mocks were called
mock_browser_automation.navigate.assert_called_once_with("https://example.com")
mock_browser_automation.evaluate_script.assert_called_once()
@pytest.mark.asyncio
async def test_start_monitoring_navigation_failure(self, devtools_analyzer, mock_browser_automation):
"""Test monitoring startup when navigation fails."""
# Setup navigation failure
mock_browser_automation.navigate.return_value = {
"success": False,
"error": "Failed to load page"
}
# Execute test
result = await devtools_analyzer.start_monitoring("https://invalid-url.com")
# Verify failure
assert result["success"] is False
assert result["monitoring_active"] is False
assert "Failed to navigate to https://invalid-url.com" in result["error"]
@pytest.mark.asyncio
async def test_start_monitoring_script_injection_failure(self, devtools_analyzer, mock_browser_automation):
"""Test monitoring startup when script injection fails."""
# Setup successful navigation but failed script injection
mock_browser_automation.navigate.return_value = {"success": True}
mock_browser_automation.evaluate_script.side_effect = Exception("Script injection failed")
# Execute test
result = await devtools_analyzer.start_monitoring("https://example.com")
# Verify failure
assert result["success"] is False
assert result["monitoring_active"] is False
assert "Failed to start monitoring" in result["error"]
@pytest.mark.asyncio
async def test_collect_devtools_data_success(self, devtools_analyzer, mock_browser_automation,
sample_console_logs, sample_network_logs, sample_performance_metrics):
"""Test successful DevTools data collection."""
# Setup mock data
mock_devtools_data = {
"consoleLogs": sample_console_logs,
"networkRequests": sample_network_logs,
"performanceMetrics": sample_performance_metrics
}
mock_browser_automation.evaluate_script.return_value = {
"success": True,
"data": mock_devtools_data
}
# Execute test
result = await devtools_analyzer.collect_devtools_data()
# Verify results
assert result["success"] is True
assert "data" in result
data = result["data"]
assert data["total_console_entries"] == 3
assert data["total_network_requests"] == 3
assert len(data["console_logs"]) == 3
assert len(data["network_logs"]) == 3
assert data["performance_metrics"] == sample_performance_metrics
# Verify internal state was updated
assert devtools_analyzer.console_logs == sample_console_logs
assert devtools_analyzer.network_logs == sample_network_logs
assert devtools_analyzer.performance_metrics == sample_performance_metrics
@pytest.mark.asyncio
async def test_collect_devtools_data_no_data(self, devtools_analyzer, mock_browser_automation):
"""Test DevTools data collection when no data is available."""
# Setup no data available
mock_browser_automation.evaluate_script.return_value = {
"success": False,
"data": None
}
# Execute test
result = await devtools_analyzer.collect_devtools_data()
# Verify failure
assert result["success"] is False
assert "No DevTools data available" in result["error"]
assert result["data"] is None
@pytest.mark.asyncio
async def test_collect_devtools_data_script_error(self, devtools_analyzer, mock_browser_automation):
"""Test DevTools data collection when script execution fails."""
# Setup script execution failure
mock_browser_automation.evaluate_script.side_effect = Exception("Script execution failed")
# Execute test
result = await devtools_analyzer.collect_devtools_data()
# Verify failure
assert result["success"] is False
assert "Failed to collect DevTools data" in result["error"]
@pytest.mark.asyncio
async def test_analyze_anomalies_console_errors(self, devtools_analyzer, sample_console_logs):
"""Test anomaly analysis for console errors."""
# Set up test data
devtools_analyzer.console_logs = sample_console_logs
devtools_analyzer.network_logs = []
devtools_analyzer.performance_metrics = {}
# Execute test
result = await devtools_analyzer.analyze_anomalies()
# Verify results
assert result["success"] is True
anomalies = result["anomalies"]
# Find console error anomaly
error_anomaly = next((a for a in anomalies if a["type"] == "console_errors"), None)
assert error_anomaly is not None
assert error_anomaly["severity"] == "high"
assert error_anomaly["count"] == 1
# Find console warning anomaly
warn_anomaly = next((a for a in anomalies if a["type"] == "console_warnings"), None)
assert warn_anomaly is not None
assert warn_anomaly["severity"] == "medium"
assert warn_anomaly["count"] == 1
# Verify summary
summary = result["summary"]
assert summary["total_anomalies"] > 0
assert summary["high_severity"] >= 1
assert summary["medium_severity"] >= 1
@pytest.mark.asyncio
async def test_analyze_anomalies_network_issues(self, devtools_analyzer, sample_network_logs):
"""Test anomaly analysis for network issues."""
# Set up test data
devtools_analyzer.console_logs = []
devtools_analyzer.network_logs = sample_network_logs
devtools_analyzer.performance_metrics = {}
# Execute test
result = await devtools_analyzer.analyze_anomalies()
# Verify results
assert result["success"] is True
anomalies = result["anomalies"]
# Find network failure anomaly
failure_anomaly = next((a for a in anomalies if a["type"] == "network_failures"), None)
assert failure_anomaly is not None
assert failure_anomaly["severity"] == "high"
assert failure_anomaly["count"] == 1
# Find slow request anomaly
slow_anomaly = next((a for a in anomalies if a["type"] == "slow_requests"), None)
assert slow_anomaly is not None
assert slow_anomaly["severity"] == "medium"
assert slow_anomaly["count"] == 1
@pytest.mark.asyncio
async def test_analyze_anomalies_performance_issues(self, devtools_analyzer):
"""Test anomaly analysis for performance issues."""
# Set up test data with performance issues
devtools_analyzer.console_logs = []
devtools_analyzer.network_logs = []
devtools_analyzer.performance_metrics = {
"domContentLoaded": 4000, # Slow DOM load (>3s)
"loadComplete": 8000, # Slow page load (>5s)
"failedResources": 3 # Failed resources
}
# Execute test
result = await devtools_analyzer.analyze_anomalies()
# Verify results
assert result["success"] is True
anomalies = result["anomalies"]
# Check for performance anomalies
dom_anomaly = next((a for a in anomalies if a["type"] == "slow_dom_load"), None)
assert dom_anomaly is not None
assert dom_anomaly["severity"] == "medium"
assert dom_anomaly["value"] == 4000
load_anomaly = next((a for a in anomalies if a["type"] == "slow_page_load"), None)
assert load_anomaly is not None
assert load_anomaly["severity"] == "medium"
assert load_anomaly["value"] == 8000
resource_anomaly = next((a for a in anomalies if a["type"] == "failed_resources"), None)
assert resource_anomaly is not None
assert resource_anomaly["severity"] == "high"
assert resource_anomaly["count"] == 3
@pytest.mark.asyncio
async def test_analyze_anomalies_no_issues(self, devtools_analyzer):
"""Test anomaly analysis when no issues are found."""
# Set up clean test data
devtools_analyzer.console_logs = [
{"type": "log", "message": "All good", "timestamp": 123456}
]
devtools_analyzer.network_logs = [
{"status": 200, "duration": 100}
]
devtools_analyzer.performance_metrics = {
"domContentLoaded": 1000, # Fast
"loadComplete": 2000, # Fast
"failedResources": 0 # No failures
}
# Execute test
result = await devtools_analyzer.analyze_anomalies()
# Verify results
assert result["success"] is True
anomalies = result["anomalies"]
assert len(anomalies) == 0
summary = result["summary"]
assert summary["total_anomalies"] == 0
assert summary["high_severity"] == 0
assert summary["medium_severity"] == 0
@pytest.mark.asyncio
async def test_analyze_anomalies_error_handling(self, devtools_analyzer):
"""Test error handling in anomaly analysis."""
# Set invalid data to trigger an exception during processing
devtools_analyzer.console_logs = "invalid_data" # Should be a list
result = await devtools_analyzer.analyze_anomalies()
assert result["success"] is False
assert "Failed to analyze anomalies" in result["error"]
assert result["anomalies"] == []
assert "summary" in result
@pytest.mark.asyncio
async def test_generate_feedback_report_success(self, devtools_analyzer, mock_browser_automation,
sample_console_logs, sample_network_logs):
"""Test successful feedback report generation."""
# Setup test data
devtools_analyzer.console_logs = sample_console_logs
devtools_analyzer.network_logs = sample_network_logs
devtools_analyzer.performance_metrics = {"domContentLoaded": 2000}
# Mock the collect and analyze methods
with patch.object(devtools_analyzer, 'collect_devtools_data') as mock_collect, \
patch.object(devtools_analyzer, 'analyze_anomalies') as mock_analyze:
mock_collect.return_value = {"success": True, "data": {}}
mock_analyze.return_value = {
"success": True,
"anomalies": [
{"type": "console_errors", "severity": "high", "count": 1}
]
}
# Execute test
result = await devtools_analyzer.generate_feedback_report()
# Verify results
assert result["success"] is True
assert "report" in result
report = result["report"]
assert "summary" in report
assert "anomalies" in report
assert "recommendations" in report
assert "raw_data" in report
# Check summary
summary = report["summary"]
assert summary["url"] == "https://example.com"
assert "scan_timestamp" in summary
assert summary["overall_health"] in ["good", "fair", "poor"]
# Check recommendations are generated
recommendations = report["recommendations"]
assert len(recommendations) > 0
assert any("JavaScript Errors" in rec["category"] for rec in recommendations)
@pytest.mark.asyncio
async def test_generate_feedback_report_collection_failure(self, devtools_analyzer):
"""Test feedback report generation when data collection fails."""
with patch.object(devtools_analyzer, 'collect_devtools_data') as mock_collect:
mock_collect.return_value = {"success": False, "error": "Collection failed"}
result = await devtools_analyzer.generate_feedback_report()
assert result["success"] is False
assert "Failed to collect DevTools data" in result["error"]
assert result["report"] is None
@pytest.mark.asyncio
async def test_monitor_and_analyze_complete_workflow(self, devtools_analyzer, mock_browser_automation):
"""Test complete monitoring and analysis workflow."""
# Mock all the required methods
with patch.object(devtools_analyzer, 'start_monitoring') as mock_start, \
patch.object(devtools_analyzer, 'generate_feedback_report') as mock_report, \
patch('asyncio.sleep') as mock_sleep:
mock_start.return_value = {"success": True, "monitoring_active": True}
mock_report.return_value = {
"success": True,
"report": {"summary": {"overall_health": "good"}}
}
# Execute test
result = await devtools_analyzer.monitor_and_analyze("https://example.com", 10)
# Verify results
assert result["success"] is True
assert result["monitoring_duration"] == 10
assert result["url"] == "https://example.com"
assert "report" in result
# Verify methods were called
mock_start.assert_called_once_with("https://example.com")
mock_sleep.assert_called_once_with(10)
mock_report.assert_called_once()
@pytest.mark.asyncio
async def test_monitor_and_analyze_monitoring_failure(self, devtools_analyzer):
"""Test monitoring and analysis when monitoring startup fails."""
with patch.object(devtools_analyzer, 'start_monitoring') as mock_start:
mock_start.return_value = {"success": False, "error": "Monitoring failed"}
result = await devtools_analyzer.monitor_and_analyze("https://example.com", 5)
assert result["success"] is False
assert "Monitoring failed" in result["error"]
@pytest.mark.asyncio
async def test_monitor_and_analyze_error_handling(self, devtools_analyzer):
"""Test error handling in complete workflow."""
with patch.object(devtools_analyzer, 'start_monitoring', side_effect=Exception("Unexpected error")):
result = await devtools_analyzer.monitor_and_analyze("https://example.com", 5)
assert result["success"] is False
assert "Failed to complete monitoring and analysis" in result["error"]
assert "Unexpected error" in result["error"]
@pytest.mark.asyncio
async def test_recommendation_generation(self, devtools_analyzer):
"""Test that appropriate recommendations are generated for different anomaly types."""
# Set up various anomalies
devtools_analyzer.anomalies = [
{"type": "console_errors", "severity": "high", "count": 5},
{"type": "network_failures", "severity": "high", "count": 3},
{"type": "slow_requests", "severity": "medium", "count": 2},
{"type": "slow_page_load", "severity": "medium", "value": 6000}
]
# Mock data collection and analysis
with patch.object(devtools_analyzer, 'collect_devtools_data') as mock_collect, \
patch.object(devtools_analyzer, 'analyze_anomalies') as mock_analyze:
mock_collect.return_value = {"success": True, "data": {}}
mock_analyze.return_value = {"success": True, "anomalies": devtools_analyzer.anomalies}
result = await devtools_analyzer.generate_feedback_report()
assert result["success"] is True
recommendations = result["report"]["recommendations"]
# Check that recommendations cover different categories
categories = [rec["category"] for rec in recommendations]
assert "JavaScript Errors" in categories
assert "Network Issues" in categories
assert "Performance" in categories
# Check priority levels
priorities = [rec["priority"] for rec in recommendations]
assert "high" in priorities
assert "medium" in priorities
@pytest.mark.asyncio
async def test_performance_thresholds(self, devtools_analyzer):
"""Test that performance thresholds are correctly applied."""
test_cases = [
# (dom_time, load_time, expected_anomalies)
(2000, 3000, []), # Good performance
(4000, 3000, ["slow_dom_load"]), # Slow DOM
(2000, 6000, ["slow_page_load"]), # Slow load
(4000, 6000, ["slow_dom_load", "slow_page_load"]) # Both slow
]
for dom_time, load_time, expected in test_cases:
devtools_analyzer.console_logs = []
devtools_analyzer.network_logs = []
devtools_analyzer.performance_metrics = {
"domContentLoaded": dom_time,
"loadComplete": load_time,
"failedResources": 0
}
result = await devtools_analyzer.analyze_anomalies()
anomaly_types = [a["type"] for a in result["anomalies"]]
for expected_type in expected:
assert expected_type in anomaly_types, f"Expected {expected_type} for DOM={dom_time}, Load={load_time}"