#!/usr/bin/env python3
"""
LangGraph application that interacts with MCP Server.
This creates an intelligent agent that can use your MCP tools (web_search,
roll_dice, get_crypto_price) to answer questions.
"""
import os
from dotenv import load_dotenv
from typing import TypedDict, Annotated, Sequence
from langchain_core.messages import BaseMessage, HumanMessage, AIMessage, ToolMessage
from langchain_core.tools import tool
from langchain_openai import ChatOpenAI
from langgraph.graph import StateGraph, END
from operator import add
load_dotenv()
# Configuration
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
if not OPENAI_API_KEY:
raise ValueError("OPENAI_API_KEY must be set in .env file. Get one at https://platform.openai.com/api-keys")
# State definition
class AgentState(TypedDict):
messages: Annotated[Sequence[BaseMessage], add]
# Import MCP tools directly from server
from server import web_search, roll_dice, get_crypto_price
# Wrap MCP tools as LangChain tools
@tool
def search_web(query: str) -> str:
"""Search the web for information about a given query using Tavily API."""
return web_search(query)
@tool
def roll_dice_tool(notation: str, num_rolls: int = 1) -> str:
"""Roll dice with standard notation (e.g., '2d20k1' for 2 twenty-sided dice, keep highest 1)."""
return roll_dice(notation, num_rolls)
@tool
def get_crypto_price_tool(coin_id: str, vs_currency: str = "usd") -> str:
"""Get the current price and market data for a cryptocurrency (e.g., 'bitcoin', 'ethereum', 'dogecoin')."""
return get_crypto_price(coin_id, vs_currency)
# Create list of tools
tools = [search_web, roll_dice_tool, get_crypto_price_tool]
# Initialize LLM with tools
llm = ChatOpenAI(
model="gpt-4o-mini",
temperature=0,
api_key=OPENAI_API_KEY
)
# Bind tools to LLM
llm_with_tools = llm.bind_tools(tools)
# Define the graph nodes
def should_continue(state: AgentState) -> str:
"""Determine if we should continue or end"""
messages = state["messages"]
last_message = messages[-1]
# If no tool calls, we're done
if not hasattr(last_message, "tool_calls") or not last_message.tool_calls:
return "end"
# Otherwise, use tools
return "continue"
def call_tools(state: AgentState):
"""Execute tools and return results"""
messages = state["messages"]
last_message = messages[-1]
# Ensure last message is an AI message with tool calls
if not isinstance(last_message, AIMessage):
return {"messages": []}
# Get tool calls from the last message
tool_calls = last_message.tool_calls if hasattr(last_message, "tool_calls") and last_message.tool_calls else []
if not tool_calls:
return {"messages": []}
# Execute each tool call
tool_messages = []
for tool_call in tool_calls:
# Handle both dict and object-style tool calls
if isinstance(tool_call, dict):
tool_name = tool_call.get("name") or tool_call.get("function", {}).get("name")
tool_input = tool_call.get("args") or tool_call.get("function", {}).get("arguments", {})
tool_call_id = tool_call.get("id")
else:
tool_name = getattr(tool_call, "name", None)
tool_input = getattr(tool_call, "args", {})
tool_call_id = getattr(tool_call, "id", None)
if not tool_name:
continue
# Parse tool_input if it's a string (JSON)
if isinstance(tool_input, str):
import json
try:
tool_input = json.loads(tool_input)
except:
tool_input = {}
print(f"š§ Calling tool: {tool_name} with input: {tool_input}")
# Find and execute the tool
tool_func = None
for t in tools:
if t.name == tool_name:
tool_func = t
break
if tool_func:
try:
result = tool_func.invoke(tool_input)
except Exception as e:
result = f"Error: {str(e)}"
import traceback
print(f"Tool error: {traceback.format_exc()}")
else:
result = f"Tool {tool_name} not found"
# Create tool message with proper ID
tool_message = ToolMessage(
content=str(result),
tool_call_id=tool_call_id
)
tool_messages.append(tool_message)
return {"messages": tool_messages}
def call_model(state: AgentState):
"""Call the LLM with the current state"""
messages = state["messages"]
# Ensure messages is a list
if not isinstance(messages, list):
messages = list(messages)
# Get response from LLM
response = llm_with_tools.invoke(messages)
return {"messages": [response]}
# Build the graph
workflow = StateGraph(AgentState)
# Add nodes
workflow.add_node("agent", call_model)
workflow.add_node("tools", call_tools)
# Add edges
workflow.set_entry_point("agent")
workflow.add_conditional_edges(
"agent",
should_continue,
{
"continue": "tools",
"end": END
}
)
workflow.add_edge("tools", "agent")
# Compile the graph
app = workflow.compile()
# Main function to run the agent
def run_agent(query: str):
"""Run the agent with a query"""
print(f"\nš¤ Agent Question: {query}\n")
# Initialize state
initial_state = {
"messages": [HumanMessage(content=query)]
}
# Run the graph
result = None
for state in app.stream(initial_state):
result = state
# Print intermediate steps
for node_name, node_output in state.items():
if node_name == "agent" and node_output.get("messages"):
last_msg = node_output["messages"][-1]
if hasattr(last_msg, "tool_calls") and last_msg.tool_calls:
print(f"š Agent wants to use tools: {[tc['name'] for tc in last_msg.tool_calls]}")
# Get final answer
if result:
final_messages = []
for node_output in result.values():
final_messages.extend(node_output.get("messages", []))
# Find the last AI message (the final answer)
for msg in reversed(final_messages):
if isinstance(msg, AIMessage):
if not hasattr(msg, "tool_calls") or not msg.tool_calls:
print(f"\nā
Final Answer:\n{msg.content}\n")
return msg.content
print(f"\nā
Final Answer:\n{final_messages[-1].content}\n")
return final_messages[-1].content
return "No response generated"
# Interactive mode
def interactive_mode():
"""Run in interactive mode"""
print("=" * 60)
print("š LangGraph MCP Agent is ready!")
print("=" * 60)
print("\nYou can ask questions like:")
print(" - 'What's the price of bitcoin?'")
print(" - 'Search for latest AI news'")
print(" - 'Roll 2d20 dice'")
print(" - 'Is Bitcoin having a good day? Check price and news'")
print("\nType 'exit' or 'quit' to stop.\n")
while True:
try:
query = input("\nš¬ Your question: ").strip()
if query.lower() in ['exit', 'quit', 'q']:
print("\nš Goodbye!\n")
break
if not query:
continue
run_agent(query)
except KeyboardInterrupt:
print("\n\nš Goodbye!\n")
break
except Exception as e:
print(f"\nā Error: {e}\n")
import traceback
traceback.print_exc()
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
# Run with query from command line
query = " ".join(sys.argv[1:])
run_agent(query)
else:
# Run in interactive mode
interactive_mode()