import os
import subprocess
from typing import Optional
from fastmcp import FastMCP
# Initialize FastMCP server
mcp = FastMCP("Development Helper")
@mcp.tool()
def read_code(path: str) -> str:
"""
Reads the content of a file. Supported extensions: .py, .ts, .tsx, .js, .json, .md.
"""
try:
# Resolve absolute path to be safe, though usually handled by MCP context
abs_path = os.path.abspath(path)
with open(abs_path, 'r', encoding='utf-8') as f:
return f.read()
except Exception as e:
return f"Error reading file {path}: {str(e)}"
@mcp.tool()
def write_code(path: str, content: str) -> str:
"""
Writes or overwrites content to a file.
"""
try:
abs_path = os.path.abspath(path)
os.makedirs(os.path.dirname(abs_path), exist_ok=True)
with open(abs_path, 'w', encoding='utf-8') as f:
f.write(content)
return f"Successfully wrote to {path}"
except Exception as e:
return f"Error writing to file {path}: {str(e)}"
@mcp.tool()
def run_terminal(command: str) -> str:
"""
Executes a shell command and returns stdout and stderr.
Example: npm run dev, pytest, etc.
"""
try:
# Using shell=True to allow complex commands/pipes if needed
# In a production environment, this should be more restricted
process = subprocess.run(
command,
shell=True,
capture_output=True,
text=True
)
output = []
if process.stdout:
output.append(f"STDOUT:\n{process.stdout}")
if process.stderr:
output.append(f"STDERR:\n{process.stderr}")
if not output:
return "Command executed successfully with no output."
return "\n".join(output)
except Exception as e:
return f"Error executing command: {str(e)}"
@mcp.tool()
def list_models_with_limits() -> str:
"""
Lists available Gemini models sorted by input token limit (context window).
Provides a proxy for 'capacity' since exact quota is not available via API.
"""
import google.generativeai as genai
from dotenv import load_dotenv
load_dotenv()
api_key = os.getenv("GEMINI_API_KEY")
if not api_key:
return "Error: GEMINI_API_KEY no encontrada en .env"
try:
genai.configure(api_key=api_key)
models = genai.list_models()
# Filtrar y extraer info relevante
model_info = []
for m in models:
if 'generateContent' in m.supported_generation_methods:
model_info.append({
'name': m.name,
'input_limit': getattr(m, 'input_token_limit', 0),
'output_limit': getattr(m, 'output_token_limit', 0)
})
# Ordenar por límite de entrada (ventana de contexto) descendente
model_info.sort(key=lambda x: x['input_limit'], reverse=True)
res = "## Modelos Disponibles (Ordenados por Capacidad de Contexto)\n\n"
res += "> [!NOTE]\n> La API no expone la cuota restante (RPM/TPM) directamente. Estos límites representan la capacidad del modelo.\n\n"
for m in model_info:
res += f"- **{m['name']}**\n"
res += f" - Contexto (Input): {m['input_limit']:,} tokens\n"
res += f" - Salida (Output): {m['output_limit']:,} tokens\n"
return res
except Exception as e:
return f"Error al listar modelos: {str(e)}"
@mcp.tool()
def notify_dev(message: str) -> str:
"""
Sends a desktop notification. Supports Linux (notify-send).
"""
# Detect if we are on Linux
if os.name == 'posix':
try:
# Check if notify-send exists
subprocess.run(['notify-send', '--version'], capture_output=True, check=True)
subprocess.run(['notify-send', 'MCP Agent', message])
return f"Notification sent: {message}"
except (subprocess.CalledProcessError, FileNotFoundError):
return f"Notification failed: notify-send not found. Message was: {message}"
else:
# Fallback for Windows or other systems if needed
# For now, just logging to stdout as a mock
print(f"NOTIFICATION: {message}")
return f"Notification (fallback): {message}"
if __name__ == "__main__":
mcp.run()