run_tests_no_verbosity
Execute all tests with minimal output to quickly verify functionality without detailed logs.
Instructions
Run all tests with minimal output (verbosity level 0).
Input Schema
TableJSON Schema
| Name | Required | Description | Default |
|---|---|---|---|
No arguments | |||
Implementation Reference
- The main handler function for the 'run_tests_no_verbosity' tool. It is registered via the @mcp.tool() decorator and delegates to the _run_tests helper with verbosity set to '0' for minimal output.@mcp.tool() async def run_tests_no_verbosity() -> dict[str, Any]: """Run all tests with minimal output (verbosity level 0).""" return await _run_tests("0")
- The supporting utility function _run_tests that contains the core logic for executing tests using the 'hatch test' command, configuring verbosity, ignoring specific test files to prevent recursion, capturing output, saving to a log file, and handling errors and timeouts.async def _run_tests( verbosity: Any | None = None, agent: str | None = None, pattern: str | None = None, run_with_coverage: bool = False, ) -> dict[str, Any]: """Internal helper function to run tests using hatch. Args: verbosity: Optional verbosity level (0=minimal, 1=normal, 2=detailed for pytest) agent: Optional agent name to run only tests for that agent (e.g., 'qa_agent') pattern: Optional pattern to filter test files (e.g., 'test_qa_*.py') run_with_coverage: Whether to run tests with coverage enabled via 'hatch test --cover'. """ logger.info( "Preparing to run tests via hatch (verbosity=%s, agent=%s, pattern=%s, coverage=%s)...", verbosity, agent, pattern, run_with_coverage, ) hatch_base_cmd = ["hatch", "test"] pytest_args = [] # ALWAYS add arguments to ignore the server integration tests to prevent recursion # when tests are run *by this tool*. pytest_args.extend( [ "--ignore=tests/log_analyzer_mcp/test_log_analyzer_mcp_server.py", "--ignore=tests/log_analyzer_mcp/test_analyze_runtime_errors.py", ] ) logger.debug("Added ignore patterns for server integration tests (tool-invoked run).") if run_with_coverage: hatch_base_cmd.append("--cover") logger.debug("Coverage enabled for hatch test run.") # Tell pytest not to activate its own coverage plugin, as 'coverage run' is handling it. pytest_args.append("-p") pytest_args.append("no:cov") logger.debug("Added '-p no:cov' to pytest arguments for coverage run.") # Verbosity for pytest: -q (0), (1), -v (2), -vv (3+) if verbosity is not None: try: v_int = int(verbosity) if v_int == 0: pytest_args.append("-q") elif v_int == 2: pytest_args.append("-v") elif v_int >= 3: pytest_args.append("-vv") # Default (verbosity=1) means no specific pytest verbosity arg, relies on hatch default except ValueError: logger.warning("Invalid verbosity value '%s', using default.", verbosity) # Construct pytest -k argument if agent or pattern is specified k_expressions = [] if agent: # Assuming agent name can be part of test names like test_agent_... or ..._agent_... k_expressions.append(f"{agent}") # This f-string is for constructing a command argument, not direct logging. logger.debug("Added agent '%s' to -k filter expressions.", agent) if pattern: k_expressions.append(pattern) logger.debug("Added pattern '%s' to -k filter expressions.", pattern) if k_expressions: pytest_args.extend(["-k", " or ".join(k_expressions)]) # pytest -k "expr1 or expr2" hatch_cmd = hatch_base_cmd if pytest_args: # Pass pytest arguments after -- hatch_cmd.extend(["--"] + pytest_args) logger.info("Constructed hatch command: %s", " ".join(hatch_cmd)) # Ensure the log file is cleared or managed before test run if it's always written to the same path # For now, assuming log_analyzer.py handles this or we analyze the latest run. test_log_output_path = os.path.join(logs_base_dir, "run_all_tests.log") # RESTORED logs_base_dir logger.debug("Expected test output log path for analysis: %s", test_log_output_path) try: # Run the command using anyio.to_thread to avoid blocking asyncio event loop # Ensure text=True for automatic decoding of stdout/stderr to string process = await anyio.to_thread.run_sync( # type: ignore[attr-defined] functools.partial( subprocess.run, hatch_cmd, capture_output=True, text=True, # Decode stdout/stderr as text (usually UTF-8) check=False, # Don't raise exception for non-zero exit, handle manually timeout=120, # Add timeout ) ) stdout_output: str = process.stdout stderr_output: str = process.stderr rc = process.returncode if rc not in [0, 1, 5]: logger.error("Hatch test command failed with unexpected pytest return code: %s", rc) logger.error("STDOUT:\n%s", stdout_output) logger.error("STDERR:\n%s", stderr_output) return { "success": False, "error": f"Test execution failed with code {rc}", "test_output": stdout_output + "\n" + stderr_output, "analysis_log_path": None, } logger.debug("Saving combined stdout/stderr from hatch test to %s", test_log_output_path) with open(test_log_output_path, "w", encoding="utf-8") as f: f.write(stdout_output) f.write("\n") f.write(stderr_output) logger.debug("Content saved to %s", test_log_output_path) # _run_tests now only runs tests and saves the log. # Analysis is done by the analyze_tests tool or by the caller if needed. # The old log_analyzer.main() call is removed. # If an agent was specified, the caller of _run_tests might want to know. # We can still populate this in the result. if agent: # analysis_to_return is None, so we can create a small dict or add to a base dict # For now, let's just focus on returning the essential info pass return { "success": True, "return_code": rc, "test_output": stdout_output + "\n" + stderr_output, "analysis_log_path": test_log_output_path, # Provide path to the log for analysis # "analysis" field is removed from here as _run_tests no longer parses. } except subprocess.TimeoutExpired as e: stdout_output = e.stdout.decode("utf-8", errors="replace") if e.stdout else "" stderr_output = e.stderr.decode("utf-8", errors="replace") if e.stderr else "" stderr_output += f"\nError: Test execution timed out after 170 seconds." rc = 1 # Indicate failure logger.error("Test execution in _run_tests timed out: %s", e) return { "success": False, "error": stderr_output, "test_output": stdout_output + "\n" + stderr_output, "analysis_log_path": None, } except Exception as e: # pylint: disable=broad-exception-caught logger.error("An unexpected error occurred in _run_tests: %s", e, exc_info=True) # Capture output if process started final_stdout = "" final_stderr = "" if "stdout_output" in locals() and "stderr_output" in locals(): # Check if communicate() was reached final_stdout = stdout_output final_stderr = stderr_output # else: process might not have been initialized or communicate not called. # No direct access to process.stdout/stderr here as it's out of 'with' scope. return { "success": False, "error": f"Unexpected error: {e}", "test_output": final_stdout + "\n" + final_stderr, "analysis_log_path": None, }