cli.py•2.56 kB
import os, json
import httpx
import openai
from dotenv import load_dotenv
load_dotenv()
# we connect to the MCP server from here and also the LLM
openai.api_key = os.getenv("OPENAI_API_KEY")
MCP_SERVER = os.getenv("MCP_SERVER")
TOOLS_ENDPOINT = f"{MCP_SERVER}/tools"
# 1) Fetch the tools available and cache it locally
response = httpx.get(TOOLS_ENDPOINT)
TOOLS = response.json()
if not TOOLS:
raise RuntimeError("Failed to load tools from server")
print("✅ Loaded tools:", [t["name"] for t in TOOLS])
# 2) Call the LLM and connect with the tools
def call_llm(prompt):
response = openai.chat.completions.create(
model="gpt-4o-mini",
messages=[
{"role": "user", "content": prompt},
],
functions=TOOLS,
function_call="auto",
)
return response.choices[0].message
# after i get the tools and the llm now know what tools to call it sends it to the server to now run those requests with everything
def invoke_tool(name: str, args: dict, req_id: int):
payload = {
"jsonrpc": "2.0",
"id": req_id,
"method": name,
"params": args,
}
resp = httpx.post(f"{MCP_SERVER}/rpc", json=payload)
# Check for HTTP errors
if resp.status_code != 200:
raise RuntimeError(f"Server error ({resp.status_code}): {resp.text}")
# Safely parse JSON response
try:
return resp.json()
except json.JSONDecodeError:
raise RuntimeError(f"Invalid JSON response from server: {resp.text}")
def main():
print("🛠 MCP-CLI ready. Type ‘exit’ to quit.\n")
req_id = 1
while True:
user_text = input("> ")
if user_text.lower() in ("exit", "quit"):
break
# 1) Ask the LLM what to do
llm_msg = call_llm(user_text)
# 2) If the model wants to call a tool...
if llm_msg.function_call:
fn = llm_msg.function_call
print("🔍 LLM wants to call:", fn.name)
print("🔍 Raw arguments:", fn.arguments)
args = json.loads(fn.arguments)
print("🔍 Parsed args dict:", args)
# 3) Forward to your MCP server
server_resp = invoke_tool(fn.name, args, req_id)
result = server_resp.get("result") or server_resp.get("error")
print(f"🔧 {fn.name} → {json.dumps(result, indent=2)}")
req_id += 1
# 4) Otherwise, just print the LLM’s reply
else:
print(f"🤖 {llm_msg.content}")
if __name__ == "__main__":
main()