run_tests_very_verbose
Execute all tests with maximum verbosity (level 2) to generate detailed outputs, enabling thorough analysis and debugging of test results in the Log Analyzer MCP environment.
Instructions
Run all tests with very verbose output (verbosity level 2).
Input Schema
TableJSON Schema
| Name | Required | Description | Default |
|---|---|---|---|
No arguments | |||
Implementation Reference
- The handler function decorated with @mcp.tool(), which registers and implements the tool by calling the _run_tests helper with verbosity level 2 and coverage enabled.@mcp.tool() async def run_tests_very_verbose() -> dict[str, Any]: """Run all tests with very verbose output (verbosity level 2).""" logger.info("Running tests with verbosity 2...") return await _run_tests(verbosity=2, run_with_coverage=True)
- The core helper function that executes the pytest tests via 'hatch test', constructs command with verbosity, ignores recursive tests, handles coverage, captures and saves output to log file, returns structured results including success status and log path.async def _run_tests( verbosity: Any | None = None, agent: str | None = None, pattern: str | None = None, run_with_coverage: bool = False, ) -> dict[str, Any]: """Internal helper function to run tests using hatch. Args: verbosity: Optional verbosity level (0=minimal, 1=normal, 2=detailed for pytest) agent: Optional agent name to run only tests for that agent (e.g., 'qa_agent') pattern: Optional pattern to filter test files (e.g., 'test_qa_*.py') run_with_coverage: Whether to run tests with coverage enabled via 'hatch test --cover'. """ logger.info( "Preparing to run tests via hatch (verbosity=%s, agent=%s, pattern=%s, coverage=%s)...", verbosity, agent, pattern, run_with_coverage, ) hatch_base_cmd = ["hatch", "test"] pytest_args = [] # ALWAYS add arguments to ignore the server integration tests to prevent recursion # when tests are run *by this tool*. pytest_args.extend( [ "--ignore=tests/log_analyzer_mcp/test_log_analyzer_mcp_server.py", "--ignore=tests/log_analyzer_mcp/test_analyze_runtime_errors.py", ] ) logger.debug("Added ignore patterns for server integration tests (tool-invoked run).") if run_with_coverage: hatch_base_cmd.append("--cover") logger.debug("Coverage enabled for hatch test run.") # Tell pytest not to activate its own coverage plugin, as 'coverage run' is handling it. pytest_args.append("-p") pytest_args.append("no:cov") logger.debug("Added '-p no:cov' to pytest arguments for coverage run.") # Verbosity for pytest: -q (0), (1), -v (2), -vv (3+) if verbosity is not None: try: v_int = int(verbosity) if v_int == 0: pytest_args.append("-q") elif v_int == 2: pytest_args.append("-v") elif v_int >= 3: pytest_args.append("-vv") # Default (verbosity=1) means no specific pytest verbosity arg, relies on hatch default except ValueError: logger.warning("Invalid verbosity value '%s', using default.", verbosity) # Construct pytest -k argument if agent or pattern is specified k_expressions = [] if agent: # Assuming agent name can be part of test names like test_agent_... or ..._agent_... k_expressions.append(f"{agent}") # This f-string is for constructing a command argument, not direct logging. logger.debug("Added agent '%s' to -k filter expressions.", agent) if pattern: k_expressions.append(pattern) logger.debug("Added pattern '%s' to -k filter expressions.", pattern) if k_expressions: pytest_args.extend(["-k", " or ".join(k_expressions)]) # pytest -k "expr1 or expr2" hatch_cmd = hatch_base_cmd if pytest_args: # Pass pytest arguments after -- hatch_cmd.extend(["--"] + pytest_args) logger.info("Constructed hatch command: %s", " ".join(hatch_cmd)) # Ensure the log file is cleared or managed before test run if it's always written to the same path # For now, assuming log_analyzer.py handles this or we analyze the latest run. test_log_output_path = os.path.join(logs_base_dir, "run_all_tests.log") # RESTORED logs_base_dir logger.debug("Expected test output log path for analysis: %s", test_log_output_path) try: # Run the command using anyio.to_thread to avoid blocking asyncio event loop # Ensure text=True for automatic decoding of stdout/stderr to string process = await anyio.to_thread.run_sync( # type: ignore[attr-defined] functools.partial( subprocess.run, hatch_cmd, capture_output=True, text=True, # Decode stdout/stderr as text (usually UTF-8) check=False, # Don't raise exception for non-zero exit, handle manually timeout=120, # Add timeout ) ) stdout_output: str = process.stdout stderr_output: str = process.stderr rc = process.returncode if rc not in [0, 1, 5]: logger.error("Hatch test command failed with unexpected pytest return code: %s", rc) logger.error("STDOUT:\n%s", stdout_output) logger.error("STDERR:\n%s", stderr_output) return { "success": False, "error": f"Test execution failed with code {rc}", "test_output": stdout_output + "\n" + stderr_output, "analysis_log_path": None, } logger.debug("Saving combined stdout/stderr from hatch test to %s", test_log_output_path) with open(test_log_output_path, "w", encoding="utf-8") as f: f.write(stdout_output) f.write("\n") f.write(stderr_output) logger.debug("Content saved to %s", test_log_output_path) # _run_tests now only runs tests and saves the log. # Analysis is done by the analyze_tests tool or by the caller if needed. # The old log_analyzer.main() call is removed. # If an agent was specified, the caller of _run_tests might want to know. # We can still populate this in the result. if agent: # analysis_to_return is None, so we can create a small dict or add to a base dict # For now, let's just focus on returning the essential info pass return { "success": True, "return_code": rc, "test_output": stdout_output + "\n" + stderr_output, "analysis_log_path": test_log_output_path, # Provide path to the log for analysis # "analysis" field is removed from here as _run_tests no longer parses. } except subprocess.TimeoutExpired as e: stdout_output = e.stdout.decode("utf-8", errors="replace") if e.stdout else "" stderr_output = e.stderr.decode("utf-8", errors="replace") if e.stderr else "" stderr_output += f"\nError: Test execution timed out after 170 seconds." rc = 1 # Indicate failure logger.error("Test execution in _run_tests timed out: %s", e) return { "success": False, "error": stderr_output, "test_output": stdout_output + "\n" + stderr_output, "analysis_log_path": None, } except Exception as e: # pylint: disable=broad-exception-caught logger.error("An unexpected error occurred in _run_tests: %s", e, exc_info=True) # Capture output if process started final_stdout = "" final_stderr = "" if "stdout_output" in locals() and "stderr_output" in locals(): # Check if communicate() was reached final_stdout = stdout_output final_stderr = stderr_output # else: process might not have been initialized or communicate not called. # No direct access to process.stdout/stderr here as it's out of 'with' scope. return { "success": False, "error": f"Unexpected error: {e}", "test_output": final_stdout + "\n" + final_stderr, "analysis_log_path": None, }