orchestrator.py•2.12 kB
# orchestrator.py
import asyncio
import os
from langchain_mcp_adapters.client import MultiServerMCPClient
from langgraph.prebuilt import create_react_agent
from langchain_openai.chat_models.azure import AzureChatOpenAI
from dotenv import load_dotenv
load_dotenv()
print("[Orchestrator] Starting up...")
# Initialize LLM for the scheduling/dispatcher tool
dispatcher_llm = AzureChatOpenAI(
azure_deployment=os.getenv("AZURE_DEPLOYMENT"),
openai_api_version=os.getenv("OPENAI_API_VERSION"),
temperature=0.0,
verbose=True,
)
async def main():
print("[Orchestrator] Connecting MCP agent_tools server...")
client = MultiServerMCPClient({
"orchestrator": {
"command": "python",
"args": ["agent_tools.py"],
"transport": "stdio",
},
})
tools = await client.get_tools()
print(f"[Orchestrator] Loaded tools: {list(tools.keys())}")
# Build a ReAct agent over the orchestrator MCP tools
agent = create_react_agent(dispatcher_llm, list(tools.values()))
# Function to run queries
async def run_query(user_query: str, custom_prompt: str = None):
print("\n[User] ", user_query)
if custom_prompt:
prompt_msg = f"{custom_prompt}\nUser query: {user_query}"
else:
prompt_msg = user_query
print("[Orchestrator] Prompt→\n", prompt_msg)
resp = await agent.ainvoke(
{"messages": [{"role": "user", "content": prompt_msg}]}
)
reply = resp["messages"][-1].content
print("[Orchestrator] Agent reply→\n", reply)
return reply
# Demonstration examples
await run_query("Calculate the square root of 1521 and multiply by 3.")
await run_query("Give me a summary of quantum computing developments.",
custom_prompt="You are the Research Agent; answer scientifically.")
await run_query("What's the weather in some small village in Karnataka?",
custom_prompt="You are the Meteorological Agent; find nearest city if needed.")
if __name__ == "__main__":
asyncio.run(main())