import logging
import json
from aiogram import Bot, Dispatcher, types
from aiogram.filters import Command
from openai import OpenAI
from config import TELEGRAM_TOKEN, OPENAI_KEY
from mcp_client import get_mcp_tools, call_mcp_tool
logging.basicConfig(level=logging.INFO)
bot = Bot(token=TELEGRAM_TOKEN)
dp = Dispatcher()
client = OpenAI(api_key=OPENAI_KEY)
# Подготовка описания инструментов для OpenAI
def get_tools_for_openai():
mcp_tools = get_mcp_tools()
openai_tools = []
for tool in mcp_tools:
openai_tools.append({
"type": "function",
"function": tool
})
return openai_tools
@dp.message()
async def handle_message(message: types.Message):
user_input = message.text
# 1. Запрос к LLM с инструментами
messages = [{"role": "user", "content": user_input}]
response = client.chat.completions.create(
model="gpt-4o-mini",
messages=messages,
tools=get_tools_for_openai(),
tool_choice="auto"
)
response_message = response.choices[0].message
tool_calls = response_message.tool_calls
# 2. Если LLM решила вызвать инструмент
if tool_calls:
messages.append(response_message)
for tool_call in tool_calls:
function_name = tool_call.function.name
function_args = json.loads(tool_call.function.arguments)
# Вызов локального MCP через HTTP
tool_result = call_mcp_tool(function_name, function_args)
messages.append({
"tool_call_id": tool_call.id,
"role": "tool",
"name": function_name,
"content": json.dumps(tool_result, ensure_ascii=False),
})
# 3. Финальный ответ от LLM с учетом данных из БД
final_response = client.chat.completions.create(
model="gpt-4o-mini",
messages=messages
)
await message.answer(final_response.choices[0].message.content)
else:
# Обычный ответ, если инструменты не нужны
await message.answer(response_message.content)
async def main():
await dp.start_polling(bot)
if __name__ == "__main__":
import asyncio
asyncio.run(main())