"""Gradle wrapper interface for executing Gradle commands."""
import asyncio
import logging
import os
import re
import subprocess
import sys
from dataclasses import dataclass
from pathlib import Path
from typing import TYPE_CHECKING
from pydantic import BaseModel
if TYPE_CHECKING:
from fastmcp import Context
from gradle_mcp.dashboard.daemon_monitor import DaemonMonitor
# Import log_store at runtime (not just type checking) for actual logging
try:
from gradle_mcp.dashboard import log_store as _log_store
from gradle_mcp.dashboard import emit_daemons_changed as _emit_daemons_changed
except ImportError:
_log_store = None
_emit_daemons_changed = None
# Configure logger for Gradle output
logger = logging.getLogger("gradle_mcp.gradle")
logger.setLevel(logging.DEBUG)
# Add handler to stderr if not already configured
if not logger.handlers:
handler = logging.StreamHandler(sys.stderr)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
@dataclass
class GradleProject:
"""Represents a Gradle project."""
name: str
path: str
description: str | None = None
@dataclass
class GradleTask:
"""Represents a Gradle task."""
name: str
project: str
description: str | None = None
group: str | None = None
@dataclass
class TaskWithDescription:
"""Represents a task with its description."""
name: str
description: str
@dataclass
class GroupedTasks:
"""Represents tasks grouped by their group name.
When include_descriptions is True, tasks is a list of TaskWithDescription.
When include_descriptions is False, tasks is a list of task names (strings).
"""
group: str
tasks: list[TaskWithDescription] | list[str]
class CompilationError(BaseModel):
"""A single compilation error."""
file: str # Full path without file:// prefix
line: int
column: int | None
message: str
class FailedTask(BaseModel):
"""Information about a failed task."""
name: str # e.g., ":composeApp:compileTestKotlinIosArm64"
reason: str # e.g., "Compilation finished with errors"
class ErrorInfo(BaseModel):
"""Structured error information."""
summary: str # e.g., "Build failed: 2 tasks failed with 12 compilation errors"
failed_tasks: list[FailedTask] # List of failed tasks
compilation_errors: list[CompilationError] # Deduplicated, first occurrence only
def _classify_gradle_log_level(message: str, is_stderr: bool = False) -> str:
"""Classify the log level of a Gradle output line.
With --console=plain, Gradle outputs clean text that's easier to parse.
This function analyzes the message content to determine the appropriate
log level based on Gradle's standard output patterns.
Args:
message: The log message to classify.
is_stderr: Whether the message came from stderr (used as a hint, not definitive).
Returns:
Log level: 'ERROR', 'WARN', 'INFO', or 'DEBUG'.
"""
msg_stripped = message.strip()
# Empty lines are just INFO
if not msg_stripped:
return "INFO"
# Task status lines: "> Task :app:build FAILED" -> ERROR
if msg_stripped.startswith("> Task"):
if "FAILED" in msg_stripped:
return "ERROR"
return "INFO"
# Gradle failure sections
if msg_stripped.startswith("FAILURE:") or msg_stripped.startswith("* What went wrong:"):
return "ERROR"
# Build result lines
if "BUILD FAILED" in msg_stripped:
return "ERROR"
if "BUILD SUCCESSFUL" in msg_stripped:
return "INFO"
# Compilation/execution errors (e.g., "e: file.kt:10:5 error message")
if msg_stripped.startswith("e:") or msg_stripped.startswith("E:"):
return "ERROR"
# Warnings (e.g., "w: file.kt:10:5 warning message")
if msg_stripped.startswith("w:") or msg_stripped.startswith("W:"):
return "WARN"
# Deprecation warnings typically contain these phrases
msg_lower = msg_stripped.lower()
if "deprecated" in msg_lower or "deprecation" in msg_lower:
return "WARN"
# Explicit error/warning markers from various tools
if ": error:" in msg_lower or ": error " in msg_lower:
return "ERROR"
if ": warning:" in msg_lower or ": warning " in msg_lower:
return "WARN"
# Exception stack traces
if msg_stripped.startswith("Caused by:") or msg_stripped.startswith("at "):
return "ERROR"
if "Exception" in msg_stripped and (":" in msg_stripped or "at " in message):
return "ERROR"
# Everything else is INFO - don't assume stderr means error
return "INFO"
class GradleWrapper:
"""Interface for executing Gradle commands using the Gradle wrapper."""
# Cleaning task patterns that should not be allowed in run_task
CLEANING_TASK_PATTERNS = [
r"^clean.*",
r".*clean$",
r"^cleanBuild.*",
r"^cleanTest.*",
]
# Allow-list of safe Gradle arguments that can be passed to run_task
# These are carefully selected to avoid command injection vulnerabilities
SAFE_GRADLE_ARGS = {
# Logging options
"--debug",
"-d",
"--info",
"-i",
"--warn",
"-w",
"--quiet",
"-q",
"--stacktrace",
"-s",
"--full-stacktrace",
"-S",
"--scan",
"--no-scan",
# Performance options
"--build-cache",
"--no-build-cache",
"--configure-on-demand",
"--no-configure-on-demand",
"--max-workers",
"--parallel",
"--no-parallel",
# Execution options
"--continue",
"--dry-run",
"-m",
"--refresh-dependencies",
"--rerun-tasks",
"--profile",
# Task exclusion (safe as it only limits what runs)
"-x",
"--exclude-task",
# Daemon options
"--daemon",
"--no-daemon",
"--foreground",
"--stop",
"--status",
}
# Dangerous arguments that should never be allowed
# These can lead to arbitrary code execution or file system access
DANGEROUS_GRADLE_ARGS = {
"--init-script",
"-I", # Can execute arbitrary Groovy/Kotlin code
"--project-prop",
"-P", # Can inject properties
"--system-prop",
"-D", # Can set system properties
"--settings-file",
"-c", # Can load arbitrary settings
"--build-file",
"-b", # Can load arbitrary build files
"--gradle-user-home",
"-g", # Can access arbitrary directories
"--project-dir",
"-p", # Can access arbitrary directories
"--include-build", # Can include arbitrary builds
"--write-verification-metadata", # Can write files to arbitrary locations
}
def __init__(self, project_root: str | None = None) -> None:
"""Initialize Gradle wrapper.
Args:
project_root: Root directory of the Gradle project. Defaults to current directory.
"""
self.project_root = Path(project_root or ".")
self.wrapper_script = self._find_wrapper_script()
self._gradle_properties: dict[str, str] | None = None
self._wrapper_properties: dict[str, str] | None = None
def _find_wrapper_script(self) -> Path:
"""Find the Gradle wrapper script.
Returns:
Path to the gradlew script.
Raises:
FileNotFoundError: If Gradle wrapper is not found.
"""
gradle_wrapper = self.project_root / "gradlew"
if not gradle_wrapper.exists():
raise FileNotFoundError(
f"Gradle wrapper not found at {gradle_wrapper}. "
"Please ensure gradlew script exists in the project root."
)
return gradle_wrapper
def _read_gradle_properties(self) -> dict[str, str]:
"""Read gradle.properties from project root.
Parses key=value pairs from the gradle.properties file,
ignoring comments (lines starting with #) and empty lines.
Returns:
Dictionary of property key-value pairs.
Returns empty dict if file doesn't exist.
"""
properties_path = self.project_root / "gradle.properties"
try:
properties: dict[str, str] = {}
with open(properties_path, encoding="utf-8") as f:
for line in f:
line = line.strip()
# Skip empty lines and comments
if not line or line.startswith("#"):
continue
# Parse key=value pairs
if "=" in line:
key, _, value = line.partition("=")
properties[key.strip()] = value.strip()
return properties
except FileNotFoundError:
return {}
def _read_wrapper_properties(self) -> dict[str, str]:
"""Read gradle-wrapper.properties from gradle/wrapper directory.
Parses key=value pairs from the gradle-wrapper.properties file,
ignoring comments (lines starting with #) and empty lines.
Returns:
Dictionary of property key-value pairs.
Returns empty dict if file doesn't exist.
"""
properties_path = self.project_root / "gradle" / "wrapper" / "gradle-wrapper.properties"
try:
properties: dict[str, str] = {}
with open(properties_path, encoding="utf-8") as f:
for line in f:
line = line.strip()
# Skip empty lines and comments
if not line or line.startswith("#"):
continue
# Parse key=value pairs
if "=" in line:
key, _, value = line.partition("=")
properties[key.strip()] = value.strip()
return properties
except FileNotFoundError:
return {}
@property
def gradle_properties(self) -> dict[str, str]:
"""Lazily load and return gradle.properties.
Returns:
Dictionary of property key-value pairs from gradle.properties.
"""
if self._gradle_properties is None:
self._gradle_properties = self._read_gradle_properties()
return self._gradle_properties
@property
def wrapper_properties(self) -> dict[str, str]:
"""Lazily load and return gradle-wrapper.properties.
Returns:
Dictionary of property key-value pairs from gradle-wrapper.properties.
"""
if self._wrapper_properties is None:
self._wrapper_properties = self._read_wrapper_properties()
return self._wrapper_properties
def _build_execution_environment(self) -> dict[str, str]:
"""Build the environment dictionary for subprocess execution.
Creates a proper execution environment by:
- Starting with a copy of the current environment
- Reading org.gradle.jvmargs from gradle.properties and setting GRADLE_OPTS
- Merging any existing GRADLE_OPTS (append, don't replace)
- Setting GRADLE_USER_HOME if not already set
Returns:
Complete environment dictionary for subprocess calls.
"""
env = os.environ.copy()
# Read JVM args from gradle.properties
jvm_args = self.gradle_properties.get("org.gradle.jvmargs")
if jvm_args:
existing_opts = env.get("GRADLE_OPTS", "")
if existing_opts:
# Append to existing GRADLE_OPTS
env["GRADLE_OPTS"] = f"{existing_opts} {jvm_args}"
else:
env["GRADLE_OPTS"] = jvm_args
# Set GRADLE_USER_HOME if not already set
if "GRADLE_USER_HOME" not in env:
gradle_user_home = os.path.expanduser("~/.gradle")
env["GRADLE_USER_HOME"] = gradle_user_home
return env
def _is_cleaning_task(self, task: str) -> bool:
"""Check if a task is a cleaning task.
Args:
task: Task name to check.
Returns:
True if the task is a cleaning task, False otherwise.
"""
task_lower = task.lower()
for pattern in self.CLEANING_TASK_PATTERNS:
if re.match(pattern, task_lower):
return True
return False
def _validate_gradle_args(self, args: list[str], tasks: list[str] | None = None) -> None:
"""Validate that all provided Gradle arguments are safe.
This method prevents command injection by ensuring that only safe,
pre-approved arguments can be passed to Gradle. Any dangerous arguments
that could lead to arbitrary code execution or file system access are blocked.
Args:
args: List of arguments to validate.
Raises:
ValueError: If any dangerous or unknown arguments are detected.
"""
if not args:
return
i = 0
while i < len(args):
arg = args[i]
# Check if this is a dangerous argument
if arg in self.DANGEROUS_GRADLE_ARGS:
raise ValueError(
f"Argument '{arg}' is not allowed due to security concerns. "
f"It could enable arbitrary code execution or unauthorized file access."
)
# Check for dangerous arguments that might be prefix of a longer string
# This catches both --arg=value and -Xvalue patterns
for dangerous in self.DANGEROUS_GRADLE_ARGS:
if arg.startswith(dangerous + "=") or (
len(dangerous) == 2 and arg.startswith(dangerous) and len(arg) > 2
):
raise ValueError(
f"Argument '{arg}' is not allowed due to security concerns. "
f"It could enable arbitrary code execution or unauthorized file access."
)
# Check if this is a safe argument
if arg in self.SAFE_GRADLE_ARGS:
# Some arguments take values, skip the next arg if it doesn't start with -
if arg in {"--max-workers", "-x", "--exclude-task"}:
i += 1 # Skip next arg (the value)
if i < len(args) and args[i].startswith("-"):
i -= 1 # Actually it was another flag, don't skip
i += 1
continue
# Check for arguments with = syntax (e.g., --max-workers=4)
base_arg = arg.split("=")[0]
if base_arg in self.SAFE_GRADLE_ARGS:
i += 1
continue
# Special-case: allow --tests (and --tests=...) only when running test tasks
if base_arg == "--tests":
# Allow only if the requested tasks include at least one test task
if tasks:
def _contains_test_task(task_name: str) -> bool:
# consider ":module:test" or "test" as test tasks
parts = task_name.split(":")
return "test" in parts
if any(_contains_test_task(t) for t in tasks):
# If --tests is provided without '=' the next arg is its value
if arg == "--tests":
i += 1
if i < len(args) and args[i].startswith("-"):
i -= 1
i += 1
continue
# Not a test task -> reject
raise ValueError(
f"Argument '{arg}' is not in the allow-list of safe Gradle arguments. "
f"Allowed arguments are: {', '.join(sorted(self.SAFE_GRADLE_ARGS))}"
)
# Unknown argument - reject it for safety
raise ValueError(
f"Argument '{arg}' is not in the allow-list of safe Gradle arguments. "
f"Allowed arguments are: {', '.join(sorted(self.SAFE_GRADLE_ARGS))}"
)
def _extract_error_message(
self, stdout: str, stderr: str, default_message: str = "Task failed"
) -> str:
"""Extract comprehensive error message from Gradle output.
This method searches for failed tasks and captures all error details
by searching backwards from FAILURE: or BUILD FAILED markers.
Args:
stdout: Standard output from Gradle.
stderr: Standard error from Gradle.
default_message: Default message if no error markers found.
Returns:
Extracted error message with full context.
"""
# Combine stdout and stderr since Gradle splits output between them
# Task failures and error details go to stdout
# FAILURE: summary goes to stderr
combined_output = (
stdout + "\n" + stderr if stdout and stderr else (stdout or stderr or default_message)
)
error_lines = combined_output.strip().split("\n")
# Strategy: Find where actual errors start by searching backwards
# Gradle output structure for failures:
# 1. Failed tasks with their errors appear first (in stdout)
# 2. Then "FAILURE:" section with summaries (in stderr)
# 3. Finally "BUILD FAILED" summary (in stderr)
# We want to capture from the first failed task onwards
first_failure_idx = -1
failure_marker_idx = -1
build_failed_idx = -1
# Find key markers
for i, line in enumerate(error_lines):
if "FAILED" in line and "> Task" in line:
# Track first failed task
if first_failure_idx == -1:
first_failure_idx = i
if "FAILURE:" in line or "* What went wrong:" in line:
if failure_marker_idx == -1:
failure_marker_idx = i
if "BUILD FAILED" in line:
build_failed_idx = i
# If we found FAILURE: or BUILD FAILED, search backwards for the first task failure
if failure_marker_idx >= 0 or build_failed_idx >= 0:
marker_idx = failure_marker_idx if failure_marker_idx >= 0 else build_failed_idx
# Search backwards from the marker to find ALL failed tasks
# Keep updating first_failure_idx to get the earliest one
for i in range(marker_idx - 1, -1, -1):
line = error_lines[i]
if "FAILED" in line and "> Task" in line:
# Update to capture the earliest failed task
first_failure_idx = i
# Stop if we hit successful/skipped tasks (but not failed ones)
elif "> Task" in line and "FAILED" not in line:
# Hit a non-failed task (UP-TO-DATE, NO-SOURCE, FROM-CACHE, etc.)
# Stop searching backwards
break
elif any(marker in line for marker in ["Configuration cache", "BUILD SUCCESSFUL"]):
# Hit build start indicators (but NOT "Reusing configuration" which appears at the top)
break
# Use the first failure we found
if first_failure_idx >= 0:
return "\n".join(error_lines[first_failure_idx:])
# Fallback: include substantial context before BUILD FAILED or from the end
elif build_failed_idx >= 0:
start_idx = max(0, build_failed_idx - 100)
return "\n".join(error_lines[start_idx:])
else:
# Last resort: include last 50 lines
return "\n".join(error_lines[-50:]) if len(error_lines) > 50 else combined_output
def _parse_compilation_errors(self, output: str) -> list[CompilationError]:
"""Parse compilation errors from Gradle output.
Extracts errors from lines starting with `e:` or `E:`.
Removes `file://` prefix from paths and deduplicates by
(file, line, column, message) tuple.
Args:
output: Combined stdout/stderr from Gradle.
Returns:
List of unique CompilationError objects.
"""
errors: list[CompilationError] = []
seen: set[tuple[str, int, int | None, str]] = set()
# Pattern: e: file://path:line:column message
# or: e: path:line:column message
# Column is optional in some error formats
error_pattern = re.compile(
r"^[eE]:\s*(?:file://)?(.+?):(\d+):(\d+)?\s*(.+)$"
)
for line in output.split("\n"):
line = line.strip()
if not line.startswith(("e:", "E:")):
continue
match = error_pattern.match(line)
if match:
file_path = match.group(1)
line_num = int(match.group(2))
column = int(match.group(3)) if match.group(3) else None
message = match.group(4).strip()
# Deduplicate by (file, line, column, message)
key = (file_path, line_num, column, message)
if key not in seen:
seen.add(key)
errors.append(
CompilationError(
file=file_path,
line=line_num,
column=column,
message=message,
)
)
return errors
def _parse_failed_tasks(self, output: str) -> list[FailedTask]:
"""Parse failed tasks from Gradle output.
Extracts task names from lines like `> Task :app:build FAILED`.
Args:
output: Combined stdout/stderr from Gradle.
Returns:
List of FailedTask objects.
"""
failed_tasks: list[FailedTask] = []
seen_tasks: set[str] = set()
# Pattern: > Task :project:taskName FAILED
task_pattern = re.compile(r"^>\s*Task\s+(:\S+)\s+FAILED")
for line in output.split("\n"):
line = line.strip()
match = task_pattern.match(line)
if match:
task_name = match.group(1)
if task_name not in seen_tasks:
seen_tasks.add(task_name)
# Default reason; could be enhanced to parse "What went wrong" section
failed_tasks.append(
FailedTask(
name=task_name,
reason="Compilation finished with errors",
)
)
return failed_tasks
def _generate_error_summary(
self,
failed_tasks: list[FailedTask],
compilation_errors: list[CompilationError],
) -> str:
"""Generate a concise error summary.
Args:
failed_tasks: List of failed tasks.
compilation_errors: List of compilation errors.
Returns:
Summary string like "Build failed: 2 tasks failed with 12 compilation errors in 1 file".
"""
task_count = len(failed_tasks)
error_count = len(compilation_errors)
# Count unique files
unique_files = {e.file for e in compilation_errors}
file_count = len(unique_files)
parts = ["Build failed:"]
if task_count > 0:
parts.append(f"{task_count} task{'s' if task_count != 1 else ''} failed")
if error_count > 0:
error_part = f"{error_count} compilation error{'s' if error_count != 1 else ''}"
if file_count > 0:
error_part += f" in {file_count} file{'s' if file_count != 1 else ''}"
if task_count > 0:
parts.append(f"with {error_part}")
else:
parts.append(error_part)
if task_count == 0 and error_count == 0:
return "Build failed"
return " ".join(parts)
def _extract_structured_error(
self, stdout: str, stderr: str, default_message: str = "Task failed"
) -> ErrorInfo:
"""Extract structured error information from Gradle output.
This method parses the Gradle output to extract:
- Failed task names
- Compilation errors (deduplicated)
- A concise summary
Args:
stdout: Standard output from Gradle.
stderr: Standard error from Gradle.
default_message: Default message if no error markers found.
Returns:
ErrorInfo object with structured error data.
"""
combined_output = (
stdout + "\n" + stderr if stdout and stderr else (stdout or stderr or default_message)
)
# Parse failed tasks and compilation errors
failed_tasks = self._parse_failed_tasks(combined_output)
compilation_errors = self._parse_compilation_errors(combined_output)
# Generate summary
summary = self._generate_error_summary(failed_tasks, compilation_errors)
# If no specific errors found, include a generic message
if not failed_tasks and not compilation_errors:
# Fall back to extracting key error info from raw output
raw_error = self._extract_error_message(stdout, stderr, default_message)
# Create a simplified summary from the raw error
if "BUILD FAILED" in raw_error:
summary = "Build failed"
else:
summary = default_message
return ErrorInfo(
summary=summary,
failed_tasks=failed_tasks,
compilation_errors=compilation_errors,
)
def list_projects(self) -> list[GradleProject]:
"""List all Gradle projects in the workspace.
Returns:
List of GradleProject objects.
Raises:
subprocess.CalledProcessError: If Gradle command fails.
"""
try:
result = subprocess.run(
[str(self.wrapper_script), "projects", "-q"],
cwd=str(self.project_root),
capture_output=True,
text=True,
check=True,
env=self._build_execution_environment(),
)
projects = []
root_added = False
for line in result.stdout.split("\n"):
line = line.strip()
# Add root project (only once)
if "Root project" in line and not root_added:
projects.append(
GradleProject(
name=":", path=str(self.project_root), description="Root project"
)
)
root_added = True
continue
# Look for subproject lines like "+--- Project ':app'" or "Project ':app'"
# But skip if it's the root project line we already handled
if "Project '" in line and "Root project" not in line:
# Extract project name from various formats
match = re.search(r"Project '([^']+)'", line)
if match:
project_name = match.group(1)
# Skip root project if it appears again
if project_name != ":":
projects.append(
GradleProject(
name=project_name,
path=str(self.project_root),
)
)
return projects
except subprocess.CalledProcessError as e:
raise RuntimeError(f"Failed to list projects: {e.stderr}") from e
def list_tasks(
self,
project: str = ":",
include_descriptions: bool = True,
group: str | None = None,
) -> list[GroupedTasks]:
"""List all available tasks for a specific Gradle project.
Returns a nested structure grouped by task group. When include_descriptions
is True, each task includes its description. When False, only task names
are returned for a more compact response.
Args:
project: Project name (e.g., ':app'). Use ':' or empty string for root project.
include_descriptions: If True, include task descriptions. If False, return
only task names for a more compact response.
group: Optional group name to filter tasks. If provided, only tasks from
this group will be returned. Case-insensitive matching.
Returns:
List of GroupedTasks objects, each containing a group name and its tasks.
Tasks are either TaskWithDescription objects (if include_descriptions=True)
or plain task name strings (if include_descriptions=False).
Raises:
subprocess.CalledProcessError: If Gradle command fails.
"""
try:
# Use tasks --all to get all tasks including inherited ones
# For root project (: or empty), use just "tasks", for subprojects use "project:tasks"
is_root = project == ":" or project == "" or project is None
task_cmd = "tasks" if is_root else f"{project}:tasks"
result = subprocess.run(
[str(self.wrapper_script), task_cmd, "--all"],
cwd=str(self.project_root),
capture_output=True,
text=True,
check=True,
env=self._build_execution_environment(),
)
# Use dict to group tasks by group name, preserving order
groups: dict[str, list[TaskWithDescription] | list[str]] = {}
in_task_section = False
current_group: str | None = None
for line in result.stdout.split("\n"):
line_stripped = line.strip()
# Skip empty lines
if not line_stripped:
continue
# Look for task group headers (end with "tasks")
if line_stripped.endswith(" tasks") and line_stripped[0].isupper():
in_task_section = True
current_group = line_stripped.replace(" tasks", "").strip()
if current_group not in groups:
groups[current_group] = []
continue
# Skip separators and rules
if line_stripped.startswith("-") or "Pattern:" in line_stripped:
continue
# Stop at help text
if "To see all tasks" in line_stripped or line_stripped.startswith("BUILD"):
break
# Parse task lines when in a task section
if in_task_section and current_group is not None:
# Task lines format: "taskName - description"
task_match = re.match(r"^(\w+)\s+-\s+(.+)$", line_stripped)
if task_match:
task_name = task_match.group(1)
description = task_match.group(2)
if include_descriptions:
groups[current_group].append(
TaskWithDescription(name=task_name, description=description)
)
else:
groups[current_group].append(task_name)
# Also handle tasks without description
elif re.match(r"^(\w+)$", line_stripped):
task_name = line_stripped
if include_descriptions:
groups[current_group].append(
TaskWithDescription(name=task_name, description="")
)
else:
groups[current_group].append(task_name)
# Convert to list of GroupedTasks, optionally filtering by group name
result = []
for group_name, task_list in groups.items():
# Filter by group if specified (case-insensitive)
if group is not None and group_name.lower() != group.lower():
continue
result.append(GroupedTasks(group=group_name, tasks=task_list))
return result
except subprocess.CalledProcessError as e:
raise RuntimeError(f"Failed to list tasks for project {project}: {e.stderr}") from e
async def run_task(
self,
tasks: str | list[str],
args: list[str] | None = None,
ctx: "Context | None" = None,
timeout: int | None = None,
daemon_monitor: "DaemonMonitor | None" = None,
) -> dict:
"""Run one or more Gradle tasks with real-time progress reporting.
Args:
tasks: Task(s) to run. Single string or list of task names.
args: Additional Gradle arguments. Daemon is enabled by default.
ctx: Optional FastMCP Context for progress reporting.
timeout: Optional timeout in seconds. If specified, the task will be
terminated if it exceeds this duration.
Returns:
Dictionary with 'success' (bool) and 'error' (str or None).
Raises:
ValueError: If any task is a cleaning task.
"""
# Normalize to list
task_list = tasks if isinstance(tasks, list) else [tasks]
# Check all tasks for cleaning patterns
for task in task_list:
if self._is_cleaning_task(task):
raise ValueError(
f"Task '{task}' is a cleaning task and cannot be run via run_task. "
"Please use the clean tool instead."
)
# Validate arguments to prevent command injection
if args:
self._validate_gradle_args(args, task_list)
# Build command with all tasks - daemon is enabled by default in Gradle
# Use --console=plain for clean, parseable output without ANSI codes
cmd = [str(self.wrapper_script), "--console=plain"] + task_list
if args:
cmd.extend(args)
logger.info(f"Executing: {' '.join(cmd)}")
# Track this build in the dashboard
build_id = None
try:
build_id = _log_store.start_build(" ".join(task_list))
except Exception:
pass
process: asyncio.subprocess.Process | None = None
try:
process = await asyncio.create_subprocess_exec(
*cmd,
cwd=str(self.project_root),
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=self._build_execution_environment(),
)
# Notify dashboard that a task started (may have spawned a new daemon)
if _emit_daemons_changed:
try:
_emit_daemons_changed()
except Exception:
pass
# Stream output in real-time while collecting it
stdout_lines: list[str] = []
stderr_lines: list[str] = []
async def read_stream(
stream: asyncio.StreamReader, lines: list[str], is_stderr: bool = False
) -> None:
while True:
line = await stream.readline()
if not line:
break
decoded = line.decode().rstrip()
lines.append(decoded)
# Log to shared log_store for dashboard
if _log_store and decoded.strip():
level = _classify_gradle_log_level(decoded, is_stderr)
_log_store.add_log(decoded, level)
# Handle timeout with streaming
try:
if timeout:
await asyncio.wait_for(
asyncio.gather(
read_stream(process.stdout, stdout_lines, False),
read_stream(process.stderr, stderr_lines, True),
),
timeout=timeout,
)
else:
await asyncio.gather(
read_stream(process.stdout, stdout_lines, False),
read_stream(process.stderr, stderr_lines, True),
)
await process.wait()
except asyncio.TimeoutError:
# Graceful termination first
logger.warning(f"Task timed out after {timeout} seconds, terminating process...")
process.terminate()
try:
await asyncio.wait_for(process.wait(), timeout=5)
except asyncio.TimeoutError:
# Force kill if graceful termination fails
logger.warning("Process did not terminate gracefully, killing...")
process.kill()
await process.wait()
if daemon_monitor:
daemon_monitor.add_log("gradle", f"Task timed out after {timeout} seconds", "ERROR")
return {"success": False, "error": f"Task timed out after {timeout} seconds"}
stdout = "\n".join(stdout_lines)
stderr = "\n".join(stderr_lines)
# Log output for debugging
for line in stdout_lines:
if line.strip():
logger.debug(line)
for line in stderr_lines:
if line.strip():
logger.debug(line)
# Report progress if context is available (final progress)
if ctx:
await ctx.report_progress(progress=100, total=100)
if process.returncode == 0:
logger.info(f"Task {task_list} completed successfully")
# End build tracking
if build_id and _log_store:
try:
_log_store.end_build(build_id)
except Exception:
pass
return {"success": True, "error": None}
else:
# Extract structured error information
error_info = self._extract_structured_error(stdout, stderr, "Task failed")
logger.error(f"Task {task_list} failed: {error_info.summary}")
# End build tracking
if build_id and _log_store:
try:
_log_store.end_build(build_id)
except Exception:
pass
return {"success": False, "error": error_info}
except Exception as e:
# Ensure process cleanup on any exception
if process and process.returncode is None:
logger.warning(f"Exception occurred, cleaning up process: {e}")
process.terminate()
try:
await asyncio.wait_for(process.wait(), timeout=5)
except asyncio.TimeoutError:
process.kill()
await process.wait()
logger.error(f"Task {task_list} failed with exception: {e}")
# End build tracking
if build_id and _log_store:
try:
_log_store.end_build(build_id)
except Exception:
pass
# Return structured error for exceptions
return {
"success": False,
"error": ErrorInfo(
summary=str(e),
failed_tasks=[],
compilation_errors=[],
),
}
async def clean(
self,
project: str | None = None,
ctx: "Context | None" = None,
timeout: int | None = None,
daemon_monitor: "DaemonMonitor | None" = None,
) -> dict:
"""Run the clean task for a project.
Args:
project: Project path (e.g., ':app'). Use ':' or empty string or None for root project.
ctx: FastMCP context for progress reporting and logging.
timeout: Optional timeout in seconds. If specified, the task will be
terminated if it exceeds this duration.
daemon_monitor: Optional DaemonMonitor for logging to the dashboard.
Returns:
Dictionary with 'success', 'error' keys.
- success (bool): True if clean completed successfully
- error (str or None): Error message if clean failed, None otherwise
Raises:
subprocess.CalledProcessError: If Gradle command fails.
"""
# Root project if project is None, empty, or ":"
is_root = project is None or project == "" or project == ":"
project_arg = "" if is_root else f"{project}:"
# Build command - daemon is enabled by default
# Use --console=plain for clean, parseable output without ANSI codes
cmd = [str(self.wrapper_script), "--console=plain", f"{project_arg}clean"]
logger.info(f"Executing: {' '.join(cmd)}")
# Track this build in the dashboard
build_id = None
try:
build_id = _log_store.start_build(f"{project_arg}clean")
except Exception:
pass
process: asyncio.subprocess.Process | None = None
try:
process = await asyncio.create_subprocess_exec(
*cmd,
cwd=str(self.project_root),
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=self._build_execution_environment(),
)
# Notify dashboard that a task started (may have spawned a new daemon)
if _emit_daemons_changed:
try:
_emit_daemons_changed()
except Exception:
pass
# Stream output in real-time while collecting it
stdout_lines: list[str] = []
stderr_lines: list[str] = []
async def read_stream(
stream: asyncio.StreamReader, lines: list[str], is_stderr: bool = False
) -> None:
while True:
line = await stream.readline()
if not line:
break
decoded = line.decode().rstrip()
lines.append(decoded)
# Log to shared log_store for dashboard
if _log_store and decoded.strip():
level = _classify_gradle_log_level(decoded, is_stderr)
_log_store.add_log(decoded, level)
# Handle timeout with streaming
try:
if timeout:
await asyncio.wait_for(
asyncio.gather(
read_stream(process.stdout, stdout_lines, False),
read_stream(process.stderr, stderr_lines, True),
),
timeout=timeout,
)
else:
await asyncio.gather(
read_stream(process.stdout, stdout_lines, False),
read_stream(process.stderr, stderr_lines, True),
)
await process.wait()
except asyncio.TimeoutError:
# Graceful termination first
logger.warning(f"Clean timed out after {timeout} seconds, terminating process...")
process.terminate()
try:
await asyncio.wait_for(process.wait(), timeout=5)
except asyncio.TimeoutError:
# Force kill if graceful termination fails
logger.warning("Process did not terminate gracefully, killing...")
process.kill()
await process.wait()
if daemon_monitor:
daemon_monitor.add_log("gradle", f"Clean timed out after {timeout} seconds", "ERROR")
return {"success": False, "error": f"Clean timed out after {timeout} seconds"}
stdout = "\n".join(stdout_lines)
stderr = "\n".join(stderr_lines)
# Log output for debugging
for line in stdout_lines:
if line.strip():
logger.debug(line)
for line in stderr_lines:
if line.strip():
# Use appropriate log level based on content, not just stderr
level = _classify_gradle_log_level(line, is_stderr=True)
if level == "ERROR":
logger.error(line)
elif level == "WARN":
logger.warning(line)
else:
logger.debug(line)
# Report progress if context is available (final progress)
if ctx:
await ctx.report_progress(progress=100, total=100)
if process.returncode == 0:
logger.info(f"Clean completed successfully for project {project or 'root'}")
# End build tracking
if build_id and _log_store:
try:
_log_store.end_build(build_id)
except Exception:
pass
return {"success": True, "error": None}
else:
# Extract structured error information
error_info = self._extract_structured_error(stdout, stderr, "Clean failed")
logger.error(f"Clean failed: {error_info.summary}")
# End build tracking
if build_id and _log_store:
try:
_log_store.end_build(build_id)
except Exception:
pass
return {"success": False, "error": error_info}
except Exception as e:
# Ensure process cleanup on any exception
if process and process.returncode is None:
logger.warning(f"Exception occurred, cleaning up process: {e}")
process.terminate()
try:
await asyncio.wait_for(process.wait(), timeout=5)
except asyncio.TimeoutError:
process.kill()
await process.wait()
logger.error(f"Clean failed with exception: {e}")
# End build tracking
if build_id and _log_store:
try:
_log_store.end_build(build_id)
except Exception:
pass
# Return structured error for exceptions
return {
"success": False,
"error": ErrorInfo(
summary=str(e),
failed_tasks=[],
compilation_errors=[],
),
}
async def daemon_status(self) -> dict:
"""Get status of Gradle daemons.
Returns:
dict with 'success' bool, 'output' str (daemon status info), and optional 'error' str.
"""
cmd = [str(self.wrapper_script), "--status"]
logger.info(f"Executing: {' '.join(cmd)}")
try:
process = await asyncio.create_subprocess_exec(
*cmd,
cwd=str(self.project_root),
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=self._build_execution_environment(),
)
stdout, stderr = await process.communicate()
stdout_str = stdout.decode() if stdout else ""
stderr_str = stderr.decode() if stderr else ""
if process.returncode == 0:
logger.info("Daemon status retrieved successfully")
return {"success": True, "output": stdout_str, "error": None}
else:
error_message = stderr_str or stdout_str or "Failed to get daemon status"
logger.error(f"Daemon status failed: {error_message}")
return {"success": False, "output": stdout_str, "error": error_message}
except Exception as e:
logger.error(f"Daemon status failed with exception: {e}")
return {"success": False, "output": "", "error": str(e)}
async def stop_daemon(self) -> dict:
"""Stop all Gradle daemons.
Returns:
dict with 'success' bool and optional 'error' str.
"""
cmd = [str(self.wrapper_script), "--stop"]
logger.info(f"Executing: {' '.join(cmd)}")
try:
process = await asyncio.create_subprocess_exec(
*cmd,
cwd=str(self.project_root),
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=self._build_execution_environment(),
)
stdout, stderr = await process.communicate()
stdout_str = stdout.decode() if stdout else ""
stderr_str = stderr.decode() if stderr else ""
if process.returncode == 0:
logger.info("Gradle daemons stopped successfully")
return {"success": True, "error": None}
else:
error_message = self._extract_error_message(
stdout_str, stderr_str, "Failed to stop daemons"
)
logger.error(f"Stop daemon failed: {error_message}")
return {"success": False, "error": error_message}
except Exception as e:
logger.error(f"Stop daemon failed with exception: {e}")
return {"success": False, "error": str(e)}
def get_config(self) -> dict:
"""Get current Gradle configuration including memory settings.
Returns:
dict with:
- jvm_args: str | None (from gradle.properties org.gradle.jvmargs)
- daemon_enabled: bool | None (from org.gradle.daemon)
- parallel_enabled: bool | None (from org.gradle.parallel)
- caching_enabled: bool | None (from org.gradle.caching)
- max_workers: int | None (from org.gradle.workers.max)
- distribution_url: str | None (from wrapper properties)
- gradle_version: str | None (extracted from distribution URL)
"""
def parse_bool(value: str | None) -> bool | None:
"""Parse a boolean string value."""
if value is None:
return None
return value.lower() == "true"
def parse_int(value: str | None) -> int | None:
"""Parse an integer string value."""
if value is None:
return None
try:
return int(value)
except ValueError:
return None
def extract_gradle_version(distribution_url: str | None) -> str | None:
"""Extract Gradle version from distribution URL.
Examples:
- https://services.gradle.org/distributions/gradle-8.5-bin.zip -> 8.5
- https://services.gradle.org/distributions/gradle-8.5-all.zip -> 8.5
"""
if distribution_url is None:
return None
# Match patterns like gradle-8.5-bin.zip or gradle-8.5-all.zip
match = re.search(r"gradle-([\d.]+(?:-\w+)?)-(?:bin|all)\.zip", distribution_url)
if match:
return match.group(1)
return None
gradle_props = self.gradle_properties
wrapper_props = self.wrapper_properties
distribution_url = wrapper_props.get("distributionUrl")
return {
"jvm_args": gradle_props.get("org.gradle.jvmargs"),
"daemon_enabled": parse_bool(gradle_props.get("org.gradle.daemon")),
"parallel_enabled": parse_bool(gradle_props.get("org.gradle.parallel")),
"caching_enabled": parse_bool(gradle_props.get("org.gradle.caching")),
"max_workers": parse_int(gradle_props.get("org.gradle.workers.max")),
"distribution_url": distribution_url,
"gradle_version": extract_gradle_version(distribution_url),
}
def _parse_jvm_args(self, command_line: str) -> list[str]:
"""Parse JVM arguments from a command line string.
Extracts arguments that start with -X, -D, or --add-opens from the
process command line.
Args:
command_line: The full command line string from the process.
Returns:
List of JVM argument strings.
"""
jvm_args: list[str] = []
if not command_line:
return jvm_args
# Split by whitespace, handling potential null characters
parts = command_line.replace("\x00", " ").split()
for part in parts:
part = part.strip()
if not part:
continue
# JVM args typically start with -X (heap, gc options), -D (system props),
# or --add-opens (module access)
if part.startswith(("-Xmx", "-Xms", "-Xss", "-XX:", "-D", "--add-opens", "--add-exports")):
jvm_args.append(part)
return jvm_args
def get_daemon_specific_config(self, pid: str) -> dict:
"""Get configuration specific to a running daemon process.
Extracts the actual JVM arguments from the running daemon process
and combines them with the project configuration.
Args:
pid: Process ID of the running daemon.
Returns:
dict containing:
- runtime_jvm_args: list[str] (actual JVM args from the running process)
- All fields from get_config()
"""
import platform
jvm_args: list[str] = []
if platform.system() == "Darwin": # macOS
try:
result = subprocess.run(
["ps", "-p", pid, "-o", "command="],
capture_output=True,
text=True,
timeout=5,
)
if result.returncode == 0:
jvm_args = self._parse_jvm_args(result.stdout)
except (subprocess.TimeoutExpired, subprocess.SubprocessError):
pass
elif platform.system() == "Linux":
try:
with open(f"/proc/{pid}/cmdline", "r") as f:
cmdline = f.read()
jvm_args = self._parse_jvm_args(cmdline)
except (FileNotFoundError, PermissionError, OSError):
pass
elif platform.system() == "Windows":
# Windows: use wmic to get command line
try:
result = subprocess.run(
["wmic", "process", "where", f"ProcessId={pid}", "get", "CommandLine"],
capture_output=True,
text=True,
timeout=5,
)
if result.returncode == 0:
# Skip header line
lines = result.stdout.strip().split("\n")
if len(lines) > 1:
jvm_args = self._parse_jvm_args(lines[1])
except (subprocess.TimeoutExpired, subprocess.SubprocessError):
pass
return {
"runtime_jvm_args": jvm_args,
**self.get_config()
}