"""
Capture Tool Integration Tests using mcp-pytest fixtures.
Tests the editor_capture_* tools via the mcp-pytest plugin.
Requires a UE5 project with a valid level to capture.
Usage:
pytest tests/test_capture_mcp.py -v -s
Note: These tests require UE5 to be installed and will launch the editor.
"""
import json
import tempfile
from pathlib import Path
from typing import Any
import pytest
from mcp_pytest import ToolCaller, ToolCallResult
# Use the same test level path as conftest.py for consistency
TEST_LEVEL_PATH = "/Game/Tests/AutoGeneratedTestLevel"
def parse_tool_result(result: ToolCallResult) -> dict[str, Any]:
"""Parse tool result text content as JSON."""
text = result.text_content
if not text:
return {"is_error": result.is_error, "content": str(result.result.content)}
try:
return json.loads(text)
except json.JSONDecodeError:
return {"raw_text": text}
@pytest.mark.integration
class TestCaptureTools:
"""Integration tests for capture tools."""
@pytest.mark.asyncio
async def test_list_tools_includes_capture(self, tool_caller: ToolCaller):
"""Test that capture tools are listed."""
tools = await tool_caller.list_tools()
# Check capture tools are present
assert "editor_capture_pie" in tools
assert "editor_capture_window" in tools
assert "editor_level_screenshot" in tools
@pytest.mark.asyncio
async def test_capture_pie_with_editor(self, initialized_tool_caller: ToolCaller):
"""Test capture.pie with editor running."""
pytest.skip(
"PIE capture test skipped - PIE screenshots may not be saved to test directories"
)
@pytest.mark.asyncio
async def test_capture_window_with_editor(
self, running_editor: ToolCaller, test_output_dir: Path, test_level_path: str
):
"""Test capture.window with editor running."""
# Editor is already running via running_editor fixture
# Create test-specific output directory
window_dir = test_output_dir / "window"
window_dir.mkdir(exist_ok=True)
output_file = window_dir / "test_window_capture.png"
result = await running_editor.call(
"editor_capture_window",
{
"level": test_level_path,
"output_file": str(output_file),
"mode": "window",
},
timeout=120,
)
data = parse_tool_result(result)
print(f"\n=== Window Capture Debug ===")
print(f"Output file path: {output_file}")
print(f"Output file exists: {output_file.exists()}")
print(f"Return data: {data}")
print(f"=============================\n")
assert data.get("success"), f"Capture window failed: {data}"
# Check if capture was successful and file exists
if data.get("captured"):
assert output_file.exists(), (
f"Captured flag is True but file does not exist: {output_file}"
)
file_size = output_file.stat().st_size
assert file_size > 1024, (
f"Captured file is empty or too small (size: {file_size} bytes)"
)
else:
# If captured is False, file should not exist
assert not output_file.exists(), f"Captured is False but file exists: {output_file}"
print("Note: captured=False, skipping file verification")
@pytest.mark.asyncio
async def test_capture_window_batch_mode_with_editor(self, initialized_tool_caller: ToolCaller):
"""Test capture.window batch mode with editor running."""
pytest.skip("Batch capture test skipped - requires investigation of batch mode")
@pytest.mark.asyncio
async def test_level_screenshot_default(
self, running_editor: ToolCaller, test_output_dir: Path
):
"""Test editor_level_screenshot with default parameters."""
# Create test-specific output directory
screenshot_dir = test_output_dir / "level_screenshot"
screenshot_dir.mkdir(exist_ok=True)
result = await running_editor.call(
"editor_level_screenshot",
{
"output_dir": str(screenshot_dir),
},
timeout=120,
)
data = parse_tool_result(result)
print(f"\n=== Level Screenshot Default Debug ===")
print(f"Output dir: {screenshot_dir}")
print(f"Return data: {data}")
print(f"======================================\n")
assert data.get("success"), f"Level screenshot failed: {data}"
assert data.get("screenshot_count", 0) >= 1, "Expected at least 1 screenshot"
# Verify that screenshot files were created
screenshot_files = list(screenshot_dir.glob("**/*.png"))
assert len(screenshot_files) >= 1, f"No screenshot files were created in {screenshot_dir}"
@pytest.mark.asyncio
async def test_level_screenshot_custom_cameras(
self, running_editor: ToolCaller, test_output_dir: Path
):
"""Test editor_level_screenshot with custom camera positions."""
# Create test-specific output directory
screenshot_dir = test_output_dir / "level_screenshot_custom"
screenshot_dir.mkdir(exist_ok=True)
result = await running_editor.call(
"editor_level_screenshot",
{
"cameras": ["front@500,0,300", "back@-500,0,300"],
"target": "0,0,100",
"resolution": "640x480",
"output_dir": str(screenshot_dir),
},
timeout=120,
)
data = parse_tool_result(result)
print(f"\n=== Level Screenshot Custom Cameras Debug ===")
print(f"Output dir: {screenshot_dir}")
print(f"Return data: {data}")
print(f"==============================================\n")
assert data.get("success"), f"Level screenshot failed: {data}"
assert data.get("screenshot_count") == 2, f"Expected 2 screenshots, got {data.get('screenshot_count')}"
# Verify that screenshot files were created
screenshot_files = list(screenshot_dir.glob("**/*.png"))
assert len(screenshot_files) == 2, f"Expected 2 screenshot files, found {len(screenshot_files)} in {screenshot_dir}"
# Verify file sizes
for img_file in screenshot_files:
file_size = img_file.stat().st_size
assert file_size > 1024, (
f"Screenshot {img_file.name} is empty or too small (size: {file_size} bytes)"
)
@pytest.mark.asyncio
async def test_level_screenshot_with_level_parameter(
self, running_editor: ToolCaller, test_output_dir: Path
):
"""Test editor_level_screenshot with level parameter to load a specific level."""
# Use existing level that we know exists in the test project
level_path = "/Game/BlueSkyLevel"
# Create test-specific output directory
screenshot_dir = test_output_dir / "level_screenshot_with_level"
screenshot_dir.mkdir(exist_ok=True)
result = await running_editor.call(
"editor_level_screenshot",
{
"level": level_path,
"cameras": ["front@500,0,500"],
"target": "0,0,0",
"output_dir": str(screenshot_dir),
},
timeout=120,
)
data = parse_tool_result(result)
print(f"\n=== Level Screenshot With Level Parameter Debug ===")
print(f"Level path: {level_path}")
print(f"Output dir: {screenshot_dir}")
print(f"Return data: {data}")
print(f"===================================================\n")
assert data.get("success"), f"Level screenshot with level parameter failed: {data}"
assert data.get("screenshot_count") >= 1, "Expected at least 1 screenshot"
# Verify that screenshot files were created
screenshot_files = list(screenshot_dir.glob("**/*.png"))
assert len(screenshot_files) >= 1, f"No screenshot files were created in {screenshot_dir}"
@pytest.mark.integration
class TestCaptureToolValidation:
"""Test input validation for capture tools."""
@pytest.mark.asyncio
async def test_capture_window_mode_validation(self, initialized_tool_caller: ToolCaller):
"""Test capture.window validates output_file for window mode."""
# window mode without output_file should fail
result = await initialized_tool_caller.call(
"editor_capture_window",
{
"level": TEST_LEVEL_PATH,
"mode": "window",
# output_file is missing
},
timeout=120,
)
data = parse_tool_result(result)
assert data.get("success") is False
assert "output_file" in data.get("error", "").lower()
@pytest.mark.asyncio
async def test_capture_window_asset_mode_validation(self, initialized_tool_caller: ToolCaller):
"""Test capture.window validates asset_path for asset mode."""
# asset mode without asset_path should fail
result = await initialized_tool_caller.call(
"editor_capture_window",
{
"level": TEST_LEVEL_PATH,
"mode": "asset",
"output_file": "/tmp/test.png",
# asset_path is missing
},
timeout=120,
)
data = parse_tool_result(result)
assert data.get("success") is False
assert "asset_path" in data.get("error", "").lower()
@pytest.mark.asyncio
async def test_capture_window_batch_mode_validation(self, initialized_tool_caller: ToolCaller):
"""Test capture.window validates asset_list and output_dir for batch mode."""
# batch mode without asset_list should fail
result = await initialized_tool_caller.call(
"editor_capture_window",
{
"level": TEST_LEVEL_PATH,
"mode": "batch",
"output_dir": "/tmp/output",
# asset_list is missing
},
timeout=120,
)
data = parse_tool_result(result)
assert data.get("success") is False
assert (
"asset_list" in data.get("error", "").lower()
or "output_dir" in data.get("error", "").lower()
)
@pytest.mark.asyncio
async def test_capture_pie_missing_level(self, tool_caller: ToolCaller):
"""Test capture.pie with missing level parameter."""
# Missing required 'level' parameter should cause error
try:
result = await tool_caller.call(
"editor_capture_pie",
{
"output_dir": "/tmp/output",
# "level" is missing
"duration_seconds": 5.0,
},
timeout=120,
)
data = parse_tool_result(result)
# If we get here, check for error in result
assert "error" in data or "raw_text" in data or data.get("success") is False
except Exception as e:
# Expected - missing required parameter
assert "level" in str(e).lower() or "required" in str(e).lower()
@pytest.mark.asyncio
async def test_capture_pie_missing_output_dir(self, tool_caller: ToolCaller):
"""Test capture.pie with missing output_dir parameter."""
# Missing required 'output_dir' parameter should cause error
try:
result = await tool_caller.call(
"editor_capture_pie",
{
"level": TEST_LEVEL_PATH,
# "output_dir" is missing
"duration_seconds": 5.0,
},
timeout=120,
)
data = parse_tool_result(result)
# If we get here, check for error in result
assert "error" in data or "raw_text" in data or data.get("success") is False
except Exception as e:
# Expected - missing required parameter
assert "output_dir" in str(e).lower() or "required" in str(e).lower()
@pytest.mark.asyncio
async def test_level_screenshot_invalid_camera_format(
self, running_editor: ToolCaller, test_output_dir: Path
):
"""Test editor_level_screenshot handles invalid camera format gracefully."""
screenshot_dir = test_output_dir / "level_screenshot_invalid"
screenshot_dir.mkdir(exist_ok=True)
# Invalid camera format (missing @)
result = await running_editor.call(
"editor_level_screenshot",
{
"cameras": ["invalid_camera_format"],
"output_dir": str(screenshot_dir),
},
timeout=120,
)
data = parse_tool_result(result)
# Should still succeed but with 0 screenshots (invalid camera skipped)
# or fail gracefully
print(f"\n=== Invalid Camera Format Debug ===")
print(f"Return data: {data}")
print(f"===================================\n")
# The script should handle invalid format and skip it
# Result depends on implementation: either success=False or success=True with 0 screenshots
if data.get("success"):
assert data.get("screenshot_count", 0) == 0, "Invalid camera should be skipped"
# If success is False, that's also acceptable behavior