app.py•1.91 kB
from flask import Flask, request, jsonify
import os
import requests
import json
from mcp_handler import MCPHandler
from tools.weather import WeatherTool
from tools.calculator import CalculatorTool
app = Flask(__name__)
# Initialize MCP handler with available tools
handler = MCPHandler()
handler.register_tool(WeatherTool())
handler.register_tool(CalculatorTool())
# LLM API configuration
LLM_API_KEY = os.environ.get("LLM_API_KEY")
LLM_API_URL = "https://api.llmprovider.com/v1/chat/completions" # Example URL
@app.route("/chat", methods=["POST"])
def chat():
data = request.json
messages = data.get("messages", [])
# If this is the first message, inject tool definitions
if len(messages) == 1:
system_message = {
"role": "system",
"content": f"You are an AI assistant with access to the following tools:\n{handler.get_tool_definitions()}"
}
messages.insert(0, system_message)
# Get response from LLM
response = get_llm_response(messages)
# Check if the response contains tool invocations
processed_response = handler.process_response(response)
# Add the processed response to messages for context
messages.append({"role": "assistant", "content": processed_response})
return jsonify({
"response": processed_response,
"messages": messages
})
def get_llm_response(messages):
"""Get a response from the LLM API"""
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {LLM_API_KEY}"
}
payload = {
"model": "llm-model-name",
"messages": messages,
"max_tokens": 1000
}
response = requests.post(LLM_API_URL, headers=headers, json=payload)
return response.json()["choices"][0]["message"]["content"]
if __name__ == "__main__":
app.run(debug=True, host="0.0.0.0", port=5000)