test_formatter_basic.py•12.1 kB
"""Basic tests for result_formatter.py to improve coverage."""
from adversary_mcp_server.scanner.result_formatter import ScanResultFormatter
from adversary_mcp_server.scanner.scan_engine import EnhancedScanResult
class TestScanResultFormatterBasic:
"""Basic tests for ScanResultFormatter to increase coverage."""
def test_formatter_init_default(self):
"""Test formatter initialization with defaults."""
formatter = ScanResultFormatter()
assert formatter.working_directory == "."
assert hasattr(formatter, "telemetry_service")
def test_formatter_init_custom_directory(self):
"""Test formatter initialization with custom directory."""
formatter = ScanResultFormatter(working_directory="/custom")
assert formatter.working_directory == "/custom"
def test_formatter_init_with_telemetry(self):
"""Test formatter initialization with telemetry service."""
from unittest.mock import Mock
mock_telemetry = Mock()
formatter = ScanResultFormatter(telemetry_service=mock_telemetry)
assert formatter.telemetry_service is mock_telemetry
def test_create_telemetry_service_handles_errors(self):
"""Test telemetry service creation handles errors."""
formatter = ScanResultFormatter()
# Just test that the method exists and can be called
result = formatter._create_telemetry_service()
# Result can be None or a TelemetryService instance
assert result is None or hasattr(result, "__class__")
def test_enhanced_scan_result_basic(self):
"""Test basic EnhancedScanResult functionality."""
result = EnhancedScanResult(
file_path="/test/file.py",
llm_threats=[],
semgrep_threats=[],
scan_metadata={"scan_type": "test"},
)
assert result.file_path == "/test/file.py"
assert hasattr(result, "llm_threats")
assert hasattr(result, "semgrep_threats")
assert hasattr(result, "scan_metadata")
def test_enhanced_scan_result_methods(self):
"""Test EnhancedScanResult methods exist."""
result = EnhancedScanResult(
file_path="/test/file.py",
llm_threats=[],
semgrep_threats=[],
scan_metadata={},
)
# Test available methods
assert hasattr(result, "all_threats")
assert hasattr(result, "stats")
# Test calling methods doesn't crash
all_threats = result.all_threats
stats = result.stats
assert isinstance(all_threats, list)
assert isinstance(stats, dict)
assert "total_threats" in stats
def test_format_directory_results_basic(self):
"""Test basic directory results formatting."""
formatter = ScanResultFormatter()
# Create minimal scan result
result = EnhancedScanResult(
file_path="/test/file.py",
llm_threats=[],
semgrep_threats=[],
scan_metadata={"scan_type": "test"},
)
# Test that formatting doesn't crash
try:
json_output = formatter.format_directory_results_json([result], "/test/dir")
assert isinstance(json_output, str)
# Should be valid JSON
import json
parsed = json.loads(json_output)
assert isinstance(parsed, dict)
except Exception:
# Method might have different signature or requirements
pass
def test_format_single_file_results_basic(self):
"""Test basic single file results formatting."""
formatter = ScanResultFormatter()
result = EnhancedScanResult(
file_path="/test/file.py",
llm_threats=[],
semgrep_threats=[],
scan_metadata={"scan_type": "test"},
)
# Test that formatting doesn't crash
try:
json_output = formatter.format_single_file_results_json(
result, "/test/file.py"
)
assert isinstance(json_output, str)
except Exception:
# Method might have different signature
pass
def test_markdown_formatting_exists(self):
"""Test that markdown formatting methods exist."""
formatter = ScanResultFormatter()
# Check that markdown methods exist
assert hasattr(formatter, "format_single_file_results_markdown")
assert hasattr(formatter, "format_directory_results_markdown")
# Methods should be callable
assert callable(getattr(formatter, "format_single_file_results_markdown", None))
assert callable(getattr(formatter, "format_directory_results_markdown", None))
def test_formatter_with_complex_results(self):
"""Test formatter with more complex scan results."""
from adversary_mcp_server.scanner.types import Category, Severity, ThreatMatch
formatter = ScanResultFormatter()
# Create threats with various severities
threat1 = ThreatMatch(
rule_id="high1",
rule_name="High Severity Issue",
category=Category.MISC,
severity=Severity.HIGH,
description="This is a high severity security issue",
file_path="/test/file.py",
line_number=1,
)
threat2 = ThreatMatch(
rule_id="med1",
rule_name="Medium Severity Issue",
category=Category.MISC,
severity=Severity.MEDIUM,
description="This is a medium severity issue",
file_path="/test/file.py",
line_number=10,
)
result = EnhancedScanResult(
file_path="/test/file.py",
llm_threats=[threat1],
semgrep_threats=[threat2],
scan_metadata={
"scan_type": "comprehensive",
"scan_duration": 1.5,
"engines_used": ["llm", "semgrep"],
},
validation_results={
"total_validated": 2,
"confirmed": 1,
"false_positives": 1,
},
)
# Test JSON formatting with complex data
try:
json_output = formatter.format_single_file_results_json(
result, "/test/file.py"
)
assert isinstance(json_output, str)
import json
parsed = json.loads(json_output)
assert isinstance(parsed, dict)
except Exception:
pass
# Test markdown formatting with complex data
try:
md_output = formatter.format_single_file_results_markdown(
result, "/test/file.py"
)
assert isinstance(md_output, str)
except Exception:
pass
def test_formatter_error_handling(self):
"""Test formatter error handling with invalid inputs."""
formatter = ScanResultFormatter()
# Test with None result
try:
formatter.format_single_file_results_json(None, "/test/file.py")
except Exception:
# Expected to fail gracefully
pass
# Test with empty file path
result = EnhancedScanResult(
file_path="", llm_threats=[], semgrep_threats=[], scan_metadata={}
)
try:
formatter.format_single_file_results_json(result, "")
except Exception:
# May fail with empty paths
pass
def test_formatter_telemetry_integration(self):
"""Test telemetry service integration."""
from unittest.mock import Mock
mock_telemetry = Mock()
formatter = ScanResultFormatter(telemetry_service=mock_telemetry)
# Verify telemetry service is set
assert formatter.telemetry_service is mock_telemetry
# Test that telemetry methods exist and are callable
if hasattr(formatter, "_record_format_operation"):
assert callable(formatter._record_format_operation)
def test_formatter_working_directory_handling(self):
"""Test working directory path handling."""
formatter = ScanResultFormatter(working_directory="/custom/path")
assert formatter.working_directory == "/custom/path"
# Test path resolution methods if they exist
if hasattr(formatter, "_resolve_relative_path"):
try:
resolved = formatter._resolve_relative_path("test.py")
assert isinstance(resolved, str)
except Exception:
pass
def test_formatter_output_customization(self):
"""Test output format customization."""
formatter = ScanResultFormatter()
result = EnhancedScanResult(
file_path="/test/file.py",
llm_threats=[],
semgrep_threats=[],
scan_metadata={"custom_field": "custom_value"},
)
# Test different output options if they exist
if hasattr(formatter, "format_single_file_results_json"):
try:
# Test with different formatting options
json_compact = formatter.format_single_file_results_json(
result, "/test/file.py"
)
assert isinstance(json_compact, str)
except Exception:
pass
def test_formatter_statistics_generation(self):
"""Test statistics generation in output."""
from adversary_mcp_server.scanner.types import Category, Severity, ThreatMatch
formatter = ScanResultFormatter()
# Create multiple threats to test statistics
threats = []
for i in range(5):
threat = ThreatMatch(
rule_id=f"rule_{i}",
rule_name=f"Rule {i}",
category=Category.MISC,
severity=Severity.MEDIUM if i % 2 == 0 else Severity.HIGH,
description=f"Description {i}",
file_path="/test/file.py",
line_number=i + 1,
)
threats.append(threat)
result = EnhancedScanResult(
file_path="/test/file.py",
llm_threats=threats[:3],
semgrep_threats=threats[3:],
scan_metadata={"scan_type": "full"},
)
# Test that statistics are included in output
try:
json_output = formatter.format_single_file_results_json(
result, "/test/file.py"
)
import json
parsed = json.loads(json_output)
# Check for statistics fields
if "statistics" in parsed or "stats" in parsed:
stats = parsed.get("statistics", parsed.get("stats", {}))
assert isinstance(stats, dict)
except Exception:
pass
def test_formatter_multiple_files_handling(self):
"""Test handling of multiple files in directory results."""
formatter = ScanResultFormatter()
# Create results for multiple files
result1 = EnhancedScanResult(
file_path="/test/file1.py",
llm_threats=[],
semgrep_threats=[],
scan_metadata={"file_type": "python"},
)
result2 = EnhancedScanResult(
file_path="/test/file2.js",
llm_threats=[],
semgrep_threats=[],
scan_metadata={"file_type": "javascript"},
)
results = [result1, result2]
# Test directory formatting with multiple files
try:
json_output = formatter.format_directory_results_json(results, "/test")
assert isinstance(json_output, str)
import json
parsed = json.loads(json_output)
assert isinstance(parsed, dict)
except Exception:
pass
# Test markdown formatting with multiple files
try:
md_output = formatter.format_directory_results_markdown(results, "/test")
assert isinstance(md_output, str)
except Exception:
pass