"""
Main MCP server providing thinking augmentation tools.
This module implements the Model Context Protocol server that exposes
the thinking augmentation system as tools for LLM clients.
"""
import logging
from mcp.server.fastmcp import FastMCP
from .agents import process_proposal
from .models import CritiqueRequest, ThinkingAugmentationResult
# Configure logging
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger(__name__)
# Create the MCP server
app = FastMCP("Elrond - Thinking Augmentation")
@app.tool()
async def consult_the_council(proposal: str) -> ThinkingAugmentationResult:
"""
Analyze a proposal using hierarchical LLM critique and synthesis.
This tool runs three specialized critique agents (positive, neutral, negative)
in parallel to analyze the proposal, then synthesizes their perspectives
into a comprehensive analysis with recommendations.
Args:
proposal: Markdown-formatted proposal outlining the salient points
of the solution to be analyzed
Returns:
Complete analysis including all critiques and synthesis with
consensus view, recommendations, and next steps
"""
try:
logger.info("Received thinking augmentation request")
# Validate input
if not proposal.strip():
raise ValueError("Proposal cannot be empty")
if len(proposal.strip()) < 10:
raise ValueError("Proposal must be at least 10 characters long")
# Create request object and process
request = CritiqueRequest(proposal=proposal)
result = await process_proposal(request)
logger.info("Thinking augmentation completed successfully")
return result
except ValueError as e:
logger.error(f"Validation error: {e}")
raise
except Exception as e:
logger.error(f"Error in thinking augmentation: {e}")
raise RuntimeError(f"Failed to process proposal: {str(e)}") from e
@app.tool()
async def check_system_status() -> dict:
"""
Check the status of the thinking augmentation system.
Returns:
Dictionary containing system status information including
API key availability, model configurations, and health status
"""
try:
# Check API key from environment
import os
api_key = os.getenv("GEMINI_API_KEY") or os.getenv("GOOGLE_API_KEY")
if api_key:
api_key_status = "configured"
api_key_masked = (
f"{api_key[:8]}...{api_key[-4:]}" if len(api_key) > 12 else "***"
)
else:
api_key_status = "missing"
api_key_masked = "not_configured"
# System is functional (no global state)
system_initialized = True
status = {
"system_status": "healthy"
if api_key_status == "configured"
else "unhealthy",
"api_key_status": api_key_status,
"api_key_masked": api_key_masked,
"system_initialized": system_initialized,
"models": {
"critique_model": "gemini-2.5-flash",
"synthesis_model": "gemini-2.5-pro",
},
"capabilities": [
"parallel_critique_analysis",
"structured_synthesis",
"markdown_proposal_processing",
],
}
logger.info("System status check completed")
return status
except Exception as e:
logger.error(f"Error checking system status: {e}")
return {"system_status": "error", "error": str(e), "system_initialized": False}
def main():
"""Main entry point for the MCP server."""
try:
logger.info("Starting Elrond MCP Server")
# Check API key on startup
import os
api_key = os.getenv("GEMINI_API_KEY") or os.getenv("GOOGLE_API_KEY")
if api_key:
logger.info("Google AI API key found and configured")
else:
logger.warning("API key not configured in environment variables")
logger.warning("The server will start but tools may fail without API key")
# Run the server
app.run()
except KeyboardInterrupt:
logger.info("Server shutdown requested")
except Exception as e:
logger.error(f"Server error: {e}")
raise
if __name__ == "__main__":
main()