"""MCP server implementation for code checking functionality."""
import logging
import os
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Protocol, TypeVar
import structlog
# Import all code checking modules at the top
from mcp_code_checker.code_checker_mypy import MypyResult, get_mypy_prompt
from mcp_code_checker.code_checker_pylint import PylintMessageType, get_pylint_prompt
from mcp_code_checker.code_checker_pytest.reporting import (
MAX_FAILURES,
MAX_OUTPUT_LINES,
SMALL_TEST_RUN_THRESHOLD,
create_prompt_for_failed_tests,
should_show_details,
)
from mcp_code_checker.code_checker_pytest.runners import check_code_with_pytest
from mcp_code_checker.log_utils import log_function_call
from mcp_code_checker.utils.data_files import find_data_file
from mcp_code_checker.utils.subprocess_runner import execute_command
# Type definitions for FastMCP
T = TypeVar("T")
class ToolDecorator(Protocol):
def __call__(self, func: Callable[..., T]) -> Callable[..., T]: ...
class FastMCPProtocol(Protocol):
def tool(self) -> ToolDecorator: ...
def run(self) -> None: ...
# Initialize loggers
logger = logging.getLogger(__name__)
structured_logger = structlog.get_logger(__name__)
class CodeCheckerServer:
"""MCP server for code checking functionality."""
def __init__(
self,
project_dir: Path,
python_executable: Optional[str] = None,
venv_path: Optional[str] = None,
test_folder: str = "tests",
keep_temp_files: bool = False,
) -> None:
"""
Initialize the server with the project directory and Python configuration.
Args:
project_dir: Path to the project directory to check
python_executable: Optional path to Python interpreter to use for running tests. If None, defaults to sys.executable.
venv_path: Optional path to a virtual environment to activate for running tests. When specified, the Python executable from this venv will be used instead of python_executable.
test_folder: Path to the test folder (relative to project_dir). Defaults to 'tests'.
keep_temp_files: Whether to keep temporary files after test execution. Useful for debugging when tests fail.
"""
self.project_dir = project_dir
self.python_executable = python_executable
self.venv_path = venv_path
self.test_folder = test_folder
self.keep_temp_files = keep_temp_files
# Import FastMCP
from mcp.server.fastmcp import FastMCP
self.mcp: FastMCPProtocol = FastMCP("Code Checker Service")
self._register_tools()
def _format_pylint_result(self, pylint_prompt: Optional[str]) -> str:
"""Format pylint check result."""
if pylint_prompt is None:
return "Pylint check completed. No issues found that require attention."
return f"Pylint found issues that need attention:\n\n{pylint_prompt}"
def _format_pytest_result(self, test_results: dict[str, Any]) -> str:
"""Format pytest check result."""
# This method is kept for backward compatibility
# It uses the new _format_pytest_result_with_details with show_details=False
return self._format_pytest_result_with_details(test_results, show_details=False)
def _format_pytest_result_with_details(
self, test_results: dict[str, Any], show_details: bool
) -> str:
"""Enhanced formatting that respects show_details parameter."""
if not test_results["success"]:
return f"Error running pytest: {test_results.get('error', 'Unknown error')}"
summary = test_results.get("summary", {})
if not isinstance(summary, dict):
return "Error: Invalid test summary format"
# Handle None values properly
failed_count = summary.get("failed") or 0
error_count = summary.get("error") or 0
passed_count = summary.get("passed") or 0
collected = summary.get("collected") or 0
# Determine if we have failures that need attention
failures_exist = (failed_count > 0 or error_count > 0) and test_results.get(
"test_results"
)
if failures_exist:
should_show = should_show_details(test_results, show_details)
if should_show:
# Use enhanced create_prompt_for_failed_tests with new parameters
failed_tests_prompt = create_prompt_for_failed_tests(
test_results["test_results"],
max_number_of_tests_reported=MAX_FAILURES, # Use constant
include_print_output=True,
max_failures=MAX_FAILURES,
max_output_lines=MAX_OUTPUT_LINES, # Use constant
)
return (
f"Pytest found issues that need attention:\n\n{failed_tests_prompt}"
)
else:
# Check if we should suggest show_details=True for small test runs
hint = (
" Try show_details=True for more information."
if collected <= SMALL_TEST_RUN_THRESHOLD
else ""
)
return f"Pytest completed with failures.{hint}"
else:
# Success case - use existing logic
if test_results.get("summary_text"):
return f"Pytest check completed. {test_results['summary_text']}"
else:
return f"Pytest check completed. All {passed_count} tests passed successfully."
def _format_mypy_result(self, mypy_prompt: str | None) -> str:
"""Format mypy check result."""
if mypy_prompt is None:
return "Mypy check completed. No type errors found."
return f"Mypy found type issues that need attention:\n\n{mypy_prompt}"
def _parse_pylint_categories(
self, categories: Optional[List[str]]
) -> Optional[set[PylintMessageType]]:
"""Parse string categories into PylintMessageType enum values."""
if not categories:
return None
pylint_categories = set()
for category in categories:
try:
pylint_categories.add(PylintMessageType(category.lower()))
except ValueError:
logger.warning(f"Unknown pylint category: {category}")
return pylint_categories if pylint_categories else None
def _find_sleep_script(self) -> Path:
"""
Find the sleep script location, supporting both development and installed environments.
Returns:
Path to the sleep_script.py file
Raises:
FileNotFoundError: If the script cannot be found in any expected location
"""
return find_data_file(
package_name="mcp_code_checker.resources",
relative_path="sleep_script.py",
development_base_dir=self.project_dir,
)
def _register_tools(self) -> None:
"""Register all tools with the MCP server."""
@self.mcp.tool()
@log_function_call
def run_pylint_check(
categories: Optional[List[str]] = None,
disable_codes: Optional[List[str]] = None,
target_directories: Optional[List[str]] = None,
) -> str:
"""
Run pylint on the project code and generate smart prompts for LLMs.
Args:
categories: Optional list of pylint message categories to include.
Available categories: 'convention', 'refactor', 'warning', 'error', 'fatal'
Defaults to ['error', 'fatal'] if not specified.
Examples:
- ['error', 'fatal'] - Only critical issues (default)
- ['error', 'fatal', 'warning'] - Include warnings
- ['convention', 'refactor'] - Only style and refactoring suggestions
- ['convention', 'refactor', 'warning', 'error', 'fatal'] - All categories
disable_codes: Optional list of pylint error codes to disable during analysis.
Common codes to disable include:
- C0114: Missing module docstring
- C0116: Missing function docstring
- C0301: Line too long
- W0611: Unused import
- W1514: Using open without explicitly specifying an encoding
target_directories: Optional list of directories to analyze relative to project_dir.
Defaults to ["src"] and conditionally "tests" if it exists.
Examples:
- ["src"] - Analyze only source code
- ["src", "tests"] - Analyze both source and tests (default)
- ["mypackage", "tests"] - Custom package structure
- ["."] - Analyze entire project (may be slow)
Returns:
A string containing either pylint results or a prompt for an LLM to interpret
"""
try:
logger.info(
f"Running pylint check on project directory: {self.project_dir}"
)
structured_logger.info(
"Starting pylint check",
project_dir=str(self.project_dir),
categories=categories,
disable_codes=disable_codes,
target_directories=target_directories,
)
# Convert categories and run pylint
pylint_categories = self._parse_pylint_categories(categories)
pylint_prompt = get_pylint_prompt(
str(self.project_dir),
categories=pylint_categories,
disable_codes=disable_codes,
python_executable=self.python_executable,
target_directories=target_directories,
)
result = self._format_pylint_result(pylint_prompt)
structured_logger.info(
"Pylint check completed",
issues_found=pylint_prompt is not None,
result_length=len(result),
)
return result
except Exception as e:
logger.error(f"Error running pylint check: {str(e)}")
structured_logger.error(
"Pylint check failed",
error=str(e),
error_type=type(e).__name__,
project_dir=str(self.project_dir),
)
raise
@self.mcp.tool()
@log_function_call
def run_pytest_check(
markers: Optional[List[str]] = None,
verbosity: int = 2,
extra_args: Optional[List[str]] = None,
env_vars: Optional[Dict[str, str]] = None,
show_details: bool = False,
) -> str:
"""
Run pytest on the project code and generate smart prompts for LLMs.
Args:
markers: Optional list of pytest markers to filter tests. Examples: ['slow', 'integration']
verbosity: Integer for pytest verbosity level (0-3), default 2.
Controls pytest's native -v/-vv/-vvv flags for test execution detail.
extra_args: Optional list of additional pytest arguments for flexible test selection.
Examples: ['tests/test_file.py::test_function']
See "Flexible Test Selection" section below for common patterns.
env_vars: Optional dictionary of environment variables for the subprocess.
show_details: Show detailed output including print statements from tests (default: False).
- False: Only show summary for large test runs, helpful hints for small runs
- True: Show detailed output for up to 10 failing tests, or all details if ≤3 tests total
- Automatically adds `-s` flag to enable print statement visibility
- Collection errors always shown regardless of setting
- Output limited to 300 lines total with truncation indicator
Smart behavior: provides hints when show_details=True would be beneficial.
Returns:
A string containing either pytest results or a prompt for an LLM to interpret
Flexible Test Selection:
Use extra_args to run specific tests or control pytest behavior:
# Specific tests
extra_args=["tests/test_math.py::test_addition"]
extra_args=["tests/test_auth.py"] # Entire file
extra_args=["-k", "calculation"] # Pattern matching
# Output control
extra_args=["-s"] # Show print statements
extra_args=["--tb=short"] # Short tracebacks
# Execution control
extra_args=["-x"] # Stop on first failure
Examples:
# Standard CI run - minimal output
run_pytest_check()
# Debug specific test with full details
run_pytest_check(
extra_args=["tests/test_math.py::test_calculation"],
show_details=True
)
# Integration test run with summary only
run_pytest_check(markers=["integration"], show_details=False)
# Get print statements with automatic -s flag
run_pytest_check(show_details=True) # Automatically includes -s
"""
try:
logger.info(
f"Running pytest check on project directory: {self.project_dir}"
)
structured_logger.info(
"Starting pytest check",
project_dir=str(self.project_dir),
test_folder=self.test_folder,
markers=markers,
verbosity=verbosity,
extra_args=extra_args,
)
# Automatically add -s flag when show_details=True
final_extra_args = list(extra_args) if extra_args else []
if show_details and "-s" not in final_extra_args:
final_extra_args.append("-s")
# Run pytest
test_results = check_code_with_pytest(
project_dir=str(self.project_dir),
test_folder=self.test_folder,
python_executable=self.python_executable,
markers=markers,
verbosity=verbosity,
extra_args=final_extra_args,
env_vars=env_vars,
venv_path=self.venv_path,
keep_temp_files=self.keep_temp_files,
)
result = self._format_pytest_result_with_details(
test_results, show_details
)
if test_results.get("success"):
summary = test_results.get("summary", {})
structured_logger.info(
"Pytest execution completed",
passed=summary.get("passed", 0) or 0,
failed=summary.get("failed", 0) or 0,
errors=summary.get("error", 0) or 0,
duration=summary.get("duration", 0) or 0,
)
else:
structured_logger.error(
"Pytest execution failed",
error=test_results.get("error", "Unknown error"),
)
return result
except Exception as e:
logger.error(f"Error running pytest check: {str(e)}")
structured_logger.error(
"Pytest check failed",
error=str(e),
error_type=type(e).__name__,
project_dir=str(self.project_dir),
)
raise
@self.mcp.tool()
@log_function_call
def run_mypy_check(
strict: bool = True,
disable_error_codes: list[str] | None = None,
target_directories: list[str] | None = None,
follow_imports: str | None = None,
cache_dir: str | None = None,
) -> str:
"""
Run mypy type checking on the project code.
Args:
strict: Use strict mode settings (default: True).
When True, applies comprehensive type checking with flags like
--strict, --warn-redundant-casts, --warn-unused-ignores, etc.
disable_error_codes: Optional list of mypy error codes to ignore.
Common codes to disable:
- 'import': Import-related errors
- 'arg-type': Argument type mismatches
- 'no-untyped-def': Missing type annotations
- 'attr-defined': Attribute not defined errors
- 'var-annotated': Missing variable annotations
target_directories: Optional list of directories to check relative to project_dir.
Defaults to ["src"] and conditionally "tests" if it exists.
Examples:
- ["src"] - Check only source code
- ["src", "tests"] - Check both source and tests
- ["mypackage"] - Check custom package
- ["."] - Check entire project
follow_imports: How to handle imports during type checking.
Options:
- 'normal' (default): Follow and type check imported modules
- 'silent': Follow imports but suppress errors in imported modules
- 'skip': Don't follow imports, only check specified files
- 'error': Error if imports cannot be followed
cache_dir: Optional custom cache directory for incremental checking.
Mypy uses caching to speed up subsequent runs.
Defaults to .mypy_cache in the project directory.
Returns:
A string containing mypy results or a prompt for an LLM to interpret
"""
try:
logger.info(
f"Running mypy check on project directory: {self.project_dir}"
)
structured_logger.info(
"Starting mypy check",
project_dir=str(self.project_dir),
strict=strict,
disable_error_codes=disable_error_codes,
target_directories=target_directories,
)
# Run mypy check
mypy_prompt = get_mypy_prompt(
str(self.project_dir),
strict=strict,
disable_error_codes=disable_error_codes,
python_executable=self.python_executable,
target_directories=target_directories,
follow_imports=follow_imports,
cache_dir=cache_dir,
)
# Format result
result = self._format_mypy_result(mypy_prompt)
structured_logger.info(
"Mypy check completed",
issues_found=mypy_prompt is not None,
result_length=len(result),
)
return result
except Exception as e:
logger.error(f"Error running mypy check: {str(e)}")
structured_logger.error(
"Mypy check failed",
error=str(e),
error_type=type(e).__name__,
project_dir=str(self.project_dir),
)
raise
@self.mcp.tool()
@log_function_call
def run_all_checks(
markers: Optional[List[str]] = None,
verbosity: int = 2,
extra_args: Optional[List[str]] = None,
env_vars: Optional[Dict[str, str]] = None,
categories: Optional[List[str]] = None,
target_directories: Optional[List[str]] = None,
mypy_strict: bool = True,
mypy_disable_codes: list[str] | None = None,
) -> str:
"""
Run all code checks (pylint, pytest, and mypy) and generate combined results.
Args:
markers: Optional list of pytest markers to filter tests. Examples: ['slow', 'integration']
verbosity: Integer for pytest verbosity level (0-3), default 2. Higher values provide more detailed output.
extra_args: Optional list of additional pytest arguments. Examples: ['-xvs', '--no-header']
env_vars: Optional dictionary of environment variables for the subprocess. Example: {'DEBUG': '1', 'PYTHONPATH': '/custom/path'}
categories: Optional list of pylint message categories to include.
Available categories: 'convention', 'refactor', 'warning', 'error', 'fatal'
Defaults to ['error', 'fatal'] if not specified.
target_directories: Optional list of directories to analyze relative to project_dir.
Defaults to ["src"] and conditionally "tests" if it exists.
Examples:
- ["src"] - Analyze only source code
- ["src", "tests"] - Analyze both source and tests (default)
- ["mypackage", "tests"] - Custom package structure
- ["."] - Analyze entire project (may be slow)
mypy_strict: Use strict mode for mypy (default: True)
mypy_disable_codes: Optional list of mypy error codes to disable
Returns:
A string containing results from all checks and/or LLM prompts
"""
try:
logger.info(
f"Running all code checks on project directory: {self.project_dir}"
)
structured_logger.info(
"Starting all code checks",
project_dir=str(self.project_dir),
test_folder=self.test_folder,
)
# Run pylint
pylint_categories = self._parse_pylint_categories(categories)
pylint_prompt = get_pylint_prompt(
str(self.project_dir),
categories=pylint_categories,
python_executable=self.python_executable,
target_directories=target_directories,
)
# Run pytest
test_results = check_code_with_pytest(
project_dir=str(self.project_dir),
test_folder=self.test_folder,
python_executable=self.python_executable,
markers=markers,
verbosity=verbosity,
extra_args=extra_args,
env_vars=env_vars,
venv_path=self.venv_path,
keep_temp_files=self.keep_temp_files,
)
# Run mypy
mypy_prompt = get_mypy_prompt(
str(self.project_dir),
strict=mypy_strict,
disable_error_codes=mypy_disable_codes,
python_executable=self.python_executable,
target_directories=target_directories,
)
# Format results
pylint_result = self._format_pylint_result(pylint_prompt)
pytest_result = self._format_pytest_result(test_results)
mypy_result = self._format_mypy_result(mypy_prompt)
# Combine results
result = "All code checks completed:\n\n"
result += f"1. Pylint: {pylint_result}\n\n"
result += f"2. Pytest: {pytest_result}\n\n"
result += f"3. Mypy: {mypy_result}"
structured_logger.info(
"All code checks completed",
pylint_issues_found=pylint_prompt is not None,
pytest_success=test_results.get("success", False),
mypy_issues_found=mypy_prompt is not None,
)
return result
except Exception as e:
logger.error(f"Error running all code checks: {str(e)}")
structured_logger.error(
"All code checks failed",
error=str(e),
error_type=type(e).__name__,
project_dir=str(self.project_dir),
)
raise
@self.mcp.tool()
@log_function_call
def second_sleep(sleep_seconds: float = 5.0) -> str:
"""
Sleep for specified seconds using Python script.
Args:
sleep_seconds: Number of seconds to sleep (default: 5.0, max: 300 for safety)
Returns:
A string indicating the sleep operation result
"""
# Validate input
if not 0 <= sleep_seconds <= 300:
raise ValueError("Sleep seconds must be between 0 and 300")
try:
# Find sleep script (supports both development and installed environments)
sleep_script = self._find_sleep_script()
structured_logger.info(
"Sleep script found successfully",
sleep_script=str(sleep_script),
exists=sleep_script.exists(),
)
except FileNotFoundError as e:
structured_logger.error(
"Sleep script not found - detailed search failure",
error=str(e),
package_name="mcp_code_checker.resources",
relative_path="sleep_script.py",
project_dir=str(self.project_dir),
)
raise FileNotFoundError(f"Sleep script not found: {str(e)}") from e
# Build command
python_exe = self.python_executable or "python"
command = [python_exe, "-u", str(sleep_script), str(sleep_seconds)]
# Execute with timeout buffer
env = os.environ.copy()
env["PYTHONUNBUFFERED"] = "1"
result = execute_command(
command,
cwd=str(self.project_dir),
timeout_seconds=int(sleep_seconds) + 30,
env=env,
)
# Always show the actual stdout to help with debugging
if result.return_code == 0:
stdout_content = result.stdout.strip()
structured_logger.info(
"Sleep operation completed successfully",
sleep_seconds=sleep_seconds,
return_code=result.return_code,
stdout_length=len(result.stdout),
stderr_length=len(result.stderr),
timed_out=result.timed_out,
execution_error=result.execution_error,
)
return f"Sleep operation stdout: {stdout_content}"
else:
structured_logger.error(
"Sleep operation failed",
sleep_seconds=sleep_seconds,
return_code=result.return_code,
stdout_length=len(result.stdout),
stderr_length=len(result.stderr),
timed_out=result.timed_out,
execution_error=result.execution_error,
)
return f"Sleep failed (code {result.return_code}): stdout='{result.stdout}', stderr='{result.stderr}'"
@log_function_call
def run(self) -> None:
"""Run the MCP server."""
logger.info("Starting MCP server")
structured_logger.info("Starting MCP server")
self.mcp.run()
@log_function_call
def create_server(
project_dir: Path,
python_executable: Optional[str] = None,
venv_path: Optional[str] = None,
test_folder: str = "tests",
keep_temp_files: bool = False,
) -> CodeCheckerServer:
"""
Create a new CodeCheckerServer instance.
Args:
project_dir: Path to the project directory to check
python_executable: Optional path to Python interpreter to use for running tests. If None, defaults to sys.executable.
venv_path: Optional path to a virtual environment to activate for running tests. When specified, the Python executable from this venv will be used instead of python_executable.
test_folder: Path to the test folder (relative to project_dir). Defaults to 'tests'.
keep_temp_files: Whether to keep temporary files after test execution. Useful for debugging when tests fail.
Returns:
A new CodeCheckerServer instance
"""
return CodeCheckerServer(
project_dir,
python_executable=python_executable,
venv_path=venv_path,
test_folder=test_folder,
keep_temp_files=keep_temp_files,
)