Skip to main content
Glama
TwT23333
by TwT23333

run_tests

Execute tests using detected frameworks like pytest, jest, or maven. Specify a test file, directory, or additional arguments, or detect frameworks without running tests.

Instructions

Run tests using detected testing frameworks (pytest, jest, maven, etc.)

Input Schema

TableJSON Schema
NameRequiredDescriptionDefault
argsNoAdditional arguments to pass to the test runner
detect_onlyNoOnly detect available frameworks without running tests
frameworkNoTesting framework to use (auto-detected if not specified)
test_pathNoSpecific test file or directory to run

Implementation Reference

  • RunTestsTool class: MCPTool implementation with name 'run_tests', input schema, description, and execute method that handles test detection and execution via TestingFramework.
    class RunTestsTool(MCPTool): """Tool for running tests using various testing frameworks.""" @property def name(self) -> str: return "run_tests" @property def description(self) -> str: return "Run tests using detected testing frameworks (pytest, jest, maven, etc.)" @property def input_schema(self) -> Dict[str, Any]: return { "type": "object", "properties": { "framework": { "type": "string", "description": "Testing framework to use (auto-detected if not specified)" }, "test_path": { "type": "string", "description": "Specific test file or directory to run" }, "args": { "type": "array", "items": {"type": "string"}, "description": "Additional arguments to pass to the test runner" }, "detect_only": { "type": "boolean", "description": "Only detect available frameworks without running tests", "default": False } } } async def execute(self, arguments: Dict[str, Any]) -> ToolResult: """Execute the run_tests tool.""" try: framework = arguments.get("framework") test_path = arguments.get("test_path") args = arguments.get("args", []) detect_only = arguments.get("detect_only", False) testing_framework = TestingFramework(self.workspace.config, self.workspace.workspace_path) # If detect_only is True, just return framework detection if detect_only: result = await testing_framework.detect_test_framework() message = "Detected testing frameworks:\n\n" if result['detected_frameworks']: for fw_name in result['detected_frameworks']: fw_info = result['frameworks'][fw_name] message += f"✓ {fw_name.title()}\n" if fw_info.get('config_files'): message += f" Config files: {', '.join(fw_info['config_files'])}\n" if fw_info.get('test_command'): message += f" Test command: {fw_info['test_command']}\n" if fw_info.get('test_files'): message += f" Test files: {len(fw_info['test_files'])} found\n" message += "\n" else: message += "No testing frameworks detected." return ToolResult( success=True, message=message, properties=result ) # Run tests result = await testing_framework.run_tests(framework, test_path, args) if "error" in result: return self.format_error(result["error"]) # Format the response message = f"Test Results (Framework: {result['framework']})\n" message += f"Command: {result['command']}\n" message += f"Exit Code: {result['return_code']}\n\n" summary = result.get('summary', {}) if summary: message += "Summary:\n" if summary.get('total'): message += f" Total tests: {summary['total']}\n" if summary.get('passed'): message += f" ✓ Passed: {summary['passed']}\n" if summary.get('failed'): message += f" ✗ Failed: {summary['failed']}\n" if summary.get('skipped'): message += f" ⊝ Skipped: {summary['skipped']}\n" if summary.get('errors'): message += f" ⚠ Errors: {summary['errors']}\n" if summary.get('duration'): message += f" Duration: {summary['duration']}s\n" message += "\n" # Add output if tests failed or if verbose if not result['success'] or result.get('stderr'): message += "Output:\n" if result.get('stderr'): message += f"STDERR:\n{result['stderr']}\n" if result.get('stdout'): # Truncate long output stdout = result['stdout'] if len(stdout) > 2000: stdout = stdout[:2000] + "\n... (output truncated)" message += f"STDOUT:\n{stdout}\n" return ToolResult( success=result['success'], message=message, properties=result ) except Exception as e: logger.error(f"Error in run_tests: {e}") return self.format_error(str(e))
  • ToolRegistry registers RunTestsTool instance in _register_default_tools method.
    from moatless_mcp.tools.advanced_tools import ( FindClassTool, FindFunctionTool, ViewCodeTool, SemanticSearchTool, RunTestsTool ) from moatless_mcp.tools.vector_tools import ( BuildVectorIndexTool, VectorIndexStatusTool, ClearVectorIndexTool ) from moatless_mcp.tools.project_understand import ProjectUnderstandTool logger = logging.getLogger(__name__) class ToolRegistry: """Registry for managing MCP tools""" def __init__(self, workspace): self.workspace = workspace self.tools: Dict[str, MCPTool] = {} self._register_default_tools() def _register_default_tools(self): """Register all default tools""" # File operation tools tools = [ ReadFileTool(self.workspace), WriteFileTool(self.workspace), ListFilesTool(self.workspace), StringReplaceTool(self.workspace), # Search tools GrepTool(self.workspace), FindFilesTool(self.workspace), WorkspaceInfoTool(self.workspace), # Advanced tools FindClassTool(self.workspace), FindFunctionTool(self.workspace), ViewCodeTool(self.workspace), SemanticSearchTool(self.workspace), RunTestsTool(self.workspace), # Vector database tools BuildVectorIndexTool(self.workspace), VectorIndexStatusTool(self.workspace), ClearVectorIndexTool(self.workspace), # Project Understand tools ProjectUnderstandTool(self.workspace), ] for tool in tools: self.register_tool(tool) logger.info(f"Registered {len(self.tools)} tools") def register_tool(self, tool: MCPTool): """Register a new tool""" self.tools[tool.name] = tool logger.debug(f"Registered tool: {tool.name}")
  • TestingFramework.run_tests: Core method that detects framework, builds command, executes subprocess, parses output - called by RunTestsTool handler.
    async def run_tests(self, framework: Optional[str] = None, test_path: Optional[str] = None, args: Optional[List[str]] = None) -> Dict[str, Any]: """Run tests using the specified framework. Args: framework: Testing framework to use (pytest, jest, maven, etc.) test_path: Specific test file or directory to run args: Additional arguments to pass to the test runner Returns: Dictionary with test results and output """ try: # Auto-detect framework if not specified if not framework: detected = await self.detect_test_framework() available_frameworks = detected["detected_frameworks"] if not available_frameworks: return {"error": "No testing framework detected"} framework = available_frameworks[0] # Use first detected framework # Build test command command = await self._build_test_command(framework, test_path, args) if not command: return {"error": f"Unsupported or unconfigured framework: {framework}"} # Execute tests logger.info(f"Running tests with command: {' '.join(command)}") try: process = await asyncio.create_subprocess_exec( *command, cwd=str(self.workspace_root), stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, env=None # Use current environment ) stdout, stderr = await asyncio.wait_for(process.communicate(), timeout=300) # 5 minute timeout return_code = process.returncode except asyncio.TimeoutError: return {"error": "Test execution timed out after 5 minutes"} except Exception as e: return {"error": f"Failed to execute tests: {str(e)}"} # Parse test results result = await self._parse_test_output(framework, stdout.decode('utf-8'), stderr.decode('utf-8'), return_code) result.update({ "framework": framework, "command": ' '.join(command), "return_code": return_code }) return result except Exception as e: logger.error(f"Error in run_tests: {e}") return {"error": f"Test execution failed: {str(e)}"}
Install Server

Other Tools

Related Tools

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/TwT23333/mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server