debug_agent.pyβ’5.63 kB
#!/usr/bin/env python3
"""
Debug version of the Cal.com agent to understand what tools are being called.
This shows you exactly what the agent is thinking and doing.
"""
import asyncio
import os
from datetime import datetime
from dotenv import load_dotenv
from typing import List, Dict, Any
from langchain_mcp_adapters.client import MultiServerMCPClient
from langgraph.prebuilt import create_react_agent
from langchain_openai import ChatOpenAI
from langchain_core.messages import HumanMessage, AIMessage, ToolMessage
from langchain_core.tools import BaseTool
class DebugAgent:
"""Agent wrapper that logs all tool calls for debugging."""
def __init__(self):
self.client = None
self.agent = None
self.tools = None
self.conversation_history = []
async def initialize(self):
"""Initialize with debugging enabled."""
load_dotenv(override=True)
print("π§ Initializing Debug Agent...")
# Initialize MCP client
self.client = MultiServerMCPClient({
"cal": {
"command": "uv",
"args": ["--directory", os.getcwd(), "run", "myserver.py"],
"transport": "stdio",
}
})
# Load tools
self.tools = await self.client.get_tools()
print(f"π Loaded {len(self.tools)} tools:")
for tool in self.tools:
print(f" β’ {tool.name}")
# Create agent with debug-friendly LLM
llm = ChatOpenAI(
model="gpt-4o-mini",
temperature=0,
# Add extra instructions
model_kwargs={
"frequency_penalty": 0.1, # Reduce repetitive behavior
}
)
self.agent = create_react_agent(llm, self.tools)
print("β
Debug agent ready!\n")
async def debug_query(self, user_input: str) -> str:
"""Process query with detailed debugging output."""
print(f"π― USER INPUT: {user_input}")
print("=" * 60)
# Store the query
self.conversation_history.append(f"User: {user_input}")
try:
# Run the agent
response = await self.agent.ainvoke({
"messages": [HumanMessage(content=user_input)]
})
# Analyze what happened
messages = response["messages"]
print(f"\nπ ANALYSIS: {len(messages)} messages in conversation")
tool_calls_made = []
for i, msg in enumerate(messages):
print(f"\nMessage {i+1}: {type(msg).__name__}")
if isinstance(msg, HumanMessage):
print(f" π€ User: {msg.content}")
elif isinstance(msg, AIMessage):
if hasattr(msg, 'tool_calls') and msg.tool_calls:
print(f" π€ AI decided to call {len(msg.tool_calls)} tool(s):")
for tc in msg.tool_calls:
tool_name = tc.get('name', 'Unknown')
tool_args = tc.get('args', {})
tool_calls_made.append((tool_name, tool_args))
print(f" π§ {tool_name}({tool_args})")
else:
print(f" π€ AI response: {msg.content[:100]}...")
elif isinstance(msg, ToolMessage):
print(f" βοΈ Tool result: {str(msg.content)[:100]}...")
# Summary
print(f"\nπ SUMMARY:")
print(f" β’ Tools called: {len(tool_calls_made)}")
for tool_name, args in tool_calls_made:
print(f" - {tool_name}: {args}")
final_response = messages[-1].content if messages else "No response"
print(f" β’ Final response: {final_response[:100]}...")
# Store the response
self.conversation_history.append(f"Agent: {final_response}")
print("=" * 60)
return final_response
except Exception as e:
error_msg = f"β Error: {e}"
print(error_msg)
return error_msg
def show_conversation_history(self):
"""Show the full conversation history."""
print("\nπ CONVERSATION HISTORY:")
print("=" * 40)
for entry in self.conversation_history:
print(entry)
print("=" * 40)
async def main():
"""Main debugging session."""
print("π CAL.COM AGENT DEBUGGER")
print("This will show you exactly what tools the agent calls and why.")
print("=" * 60)
agent = DebugAgent()
await agent.initialize()
# Interactive debugging session
while True:
try:
user_input = input("\nπ€ Your query (or 'quit' to exit, 'history' to see conversation): ").strip()
if user_input.lower() in ['quit', 'exit']:
print("π Goodbye!")
break
if user_input.lower() == 'history':
agent.show_conversation_history()
continue
if not user_input:
continue
# Process with full debugging
await agent.debug_query(user_input)
except KeyboardInterrupt:
print("\nπ Goodbye!")
break
if __name__ == "__main__":
asyncio.run(main())