Skip to main content
Glama

Tavily Web Search MCP Server

by BQ31X
debug_agent.pyβ€’5.63 kB
#!/usr/bin/env python3 """ Debug version of the Cal.com agent to understand what tools are being called. This shows you exactly what the agent is thinking and doing. """ import asyncio import os from datetime import datetime from dotenv import load_dotenv from typing import List, Dict, Any from langchain_mcp_adapters.client import MultiServerMCPClient from langgraph.prebuilt import create_react_agent from langchain_openai import ChatOpenAI from langchain_core.messages import HumanMessage, AIMessage, ToolMessage from langchain_core.tools import BaseTool class DebugAgent: """Agent wrapper that logs all tool calls for debugging.""" def __init__(self): self.client = None self.agent = None self.tools = None self.conversation_history = [] async def initialize(self): """Initialize with debugging enabled.""" load_dotenv(override=True) print("πŸ”§ Initializing Debug Agent...") # Initialize MCP client self.client = MultiServerMCPClient({ "cal": { "command": "uv", "args": ["--directory", os.getcwd(), "run", "myserver.py"], "transport": "stdio", } }) # Load tools self.tools = await self.client.get_tools() print(f"πŸ“‹ Loaded {len(self.tools)} tools:") for tool in self.tools: print(f" β€’ {tool.name}") # Create agent with debug-friendly LLM llm = ChatOpenAI( model="gpt-4o-mini", temperature=0, # Add extra instructions model_kwargs={ "frequency_penalty": 0.1, # Reduce repetitive behavior } ) self.agent = create_react_agent(llm, self.tools) print("βœ… Debug agent ready!\n") async def debug_query(self, user_input: str) -> str: """Process query with detailed debugging output.""" print(f"🎯 USER INPUT: {user_input}") print("=" * 60) # Store the query self.conversation_history.append(f"User: {user_input}") try: # Run the agent response = await self.agent.ainvoke({ "messages": [HumanMessage(content=user_input)] }) # Analyze what happened messages = response["messages"] print(f"\nπŸ“Š ANALYSIS: {len(messages)} messages in conversation") tool_calls_made = [] for i, msg in enumerate(messages): print(f"\nMessage {i+1}: {type(msg).__name__}") if isinstance(msg, HumanMessage): print(f" πŸ‘€ User: {msg.content}") elif isinstance(msg, AIMessage): if hasattr(msg, 'tool_calls') and msg.tool_calls: print(f" πŸ€– AI decided to call {len(msg.tool_calls)} tool(s):") for tc in msg.tool_calls: tool_name = tc.get('name', 'Unknown') tool_args = tc.get('args', {}) tool_calls_made.append((tool_name, tool_args)) print(f" πŸ”§ {tool_name}({tool_args})") else: print(f" πŸ€– AI response: {msg.content[:100]}...") elif isinstance(msg, ToolMessage): print(f" βš™οΈ Tool result: {str(msg.content)[:100]}...") # Summary print(f"\nπŸ“ˆ SUMMARY:") print(f" β€’ Tools called: {len(tool_calls_made)}") for tool_name, args in tool_calls_made: print(f" - {tool_name}: {args}") final_response = messages[-1].content if messages else "No response" print(f" β€’ Final response: {final_response[:100]}...") # Store the response self.conversation_history.append(f"Agent: {final_response}") print("=" * 60) return final_response except Exception as e: error_msg = f"❌ Error: {e}" print(error_msg) return error_msg def show_conversation_history(self): """Show the full conversation history.""" print("\nπŸ“š CONVERSATION HISTORY:") print("=" * 40) for entry in self.conversation_history: print(entry) print("=" * 40) async def main(): """Main debugging session.""" print("πŸ› CAL.COM AGENT DEBUGGER") print("This will show you exactly what tools the agent calls and why.") print("=" * 60) agent = DebugAgent() await agent.initialize() # Interactive debugging session while True: try: user_input = input("\n🎀 Your query (or 'quit' to exit, 'history' to see conversation): ").strip() if user_input.lower() in ['quit', 'exit']: print("πŸ‘‹ Goodbye!") break if user_input.lower() == 'history': agent.show_conversation_history() continue if not user_input: continue # Process with full debugging await agent.debug_query(user_input) except KeyboardInterrupt: print("\nπŸ‘‹ Goodbye!") break if __name__ == "__main__": asyncio.run(main())

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/BQ31X/MCP-Session-Code'

If you have feedback or need assistance with the MCP directory API, please join our Discord server