"""
Interactive MCP Chat Client
This module provides an interactive chat interface that connects to the
weather MCP server and uses an LLM (Groq) to answer user questions. The agent
can autonomously call MCP tools (e.g., get_alerts) when needed to answer
questions about weather.
Features:
- Conversation memory: remembers previous messages in the session
- Commands: 'exit'/'quit' to end, 'clear' to reset conversation history
- Automatic tool use: the agent decides when to call get_alerts based on user queries
"""
import asyncio
import os
from dotenv import load_dotenv
from langchain_groq import ChatGroq
from mcp_use import MCPAgent, MCPClient
async def run_memory_chat():
"""
Run the interactive chat loop with MCP agent and conversation memory.
The client loads configuration from a JSON file, spawns/connects to the
MCP server, and creates an agent that uses the LLM to process user input
and optionally invoke MCP tools.
"""
# Load environment variables from .env file (e.g., GROQ_API_KEY)
load_dotenv()
os.environ["GROQ_API_KEY"] = os.getenv("GROQ_API_KEY")
# Path to MCP config JSON - defines how to connect to the weather server
# weather_mcp.json uses stdio: client spawns "uv run server/weather.py"
config_file = "server/weather_mcp.json"
print("Initializing chat...")
# Create MCPClient: parses config and spawns/connects to server
# Sessions are created lazily when the agent first runs
client = MCPClient.from_config_file(config_file)
# Create LLM instance: Groq's Llama-3.3 70B model for reasoning
llm = ChatGroq(model="llama-3.3-70b-versatile")
# Create MCPAgent: combines LLM with MCP tools into an autonomous agent
# memory_enabled=True: agent remembers conversation history for context
# max_steps=15: limits how many tool calls per turn to prevent infinite loops
agent = MCPAgent(
llm=llm,
client=client,
max_steps=15,
memory_enabled=True, # Enable built-in conversation memory
)
print("\n===== Interactive MCP Chat =====")
print("Type 'exit' or 'quit' to end the conversation")
print("Type 'clear' to clear conversation history")
print("==================================\n")
try:
# Main chat loop: read user input, get agent response, repeat
while True:
# Get user input from stdin
user_input = input("\nYou: ")
# Check for exit command
if user_input.lower() in ["exit", "quit"]:
print("Ending conversation...")
break
# Check for clear history command
if user_input.lower() == "clear":
agent.clear_conversation_history()
print("Conversation history cleared.")
continue
# Get response from agent (may involve tool calls like get_alerts)
print("\nAssistant: ", end="", flush=True)
try:
# Run agent: it processes user input, may call tools, returns final answer
# memory_enabled=True means previous messages are included in context
response = await agent.run(user_input)
print(response)
except Exception as e:
print(f"\nError: {e}")
finally:
# Clean up: close all MCP sessions and terminate spawned server processes
if client and client.sessions:
await client.close_all_sessions()
if __name__ == "__main__":
# Start the async chat loop
asyncio.run(run_memory_chat())