Skip to main content
Glama
snilld-ai
by snilld-ai
server.py11.3 kB
import asyncio import logging import sys from typing import Optional import click import mcp import mcp.types as types from mcp.server import Server, NotificationOptions from mcp.server.models import InitializationOptions from .llm import LLMConnector logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger(__name__) def serve(openai_api_key: str) -> Server: server = Server("openai-server") connector = LLMConnector(openai_api_key) @server.list_tools() async def handle_list_tools() -> list[types.Tool]: return [ types.Tool( name="ask-openai", description="Ask my assistant models a direct question", inputSchema={ "type": "object", "properties": { "query": {"type": "string", "description": "Ask assistant"}, "model": {"type": "string", "default": "gpt-4.1", "enum": ["gpt-4.1", "gpt-4.1-mini", "gpt-4o", "gpt-4o-mini", "o3", "o3-mini", "o3-deep-research"]}, "temperature": {"type": "number", "default": 0.1, "minimum": 0, "maximum": 1}, "max_tokens": {"type": "integer", "default": 500, "minimum": 1, "maximum": 40000} }, "required": ["query"] } ), types.Tool( name="list-assistants", description="List OpenAI assistants", inputSchema={ "type": "object", "properties": {} } ), types.Tool( name="retrieve-assistant", description="Retrieve an OpenAI assistant by ID", inputSchema={ "type": "object", "properties": { "assistant_id": {"type": "string", "description": "The ID of the assistant to retrieve"} }, "required": ["assistant_id"] } ), types.Tool( name="create-assistant", description="Create a new OpenAI assistant", inputSchema={ "type": "object", "properties": { "name": {"type": "string", "description": "The name of the assistant"}, "instructions": {"type": "string", "description": "The assistant's instructions"}, "model": {"type": "string", "default": "gpt-4-turbo", "description": "The model to use"}, "temperature": {"type": "number", "default": 0.7, "description": "The sampling temperature"}, "file_ids": {"type": "array", "items": {"type": "string"}, "description": "A list of file IDs to attach to the assistant"}, "enable_file_search": {"type": "boolean", "default": True, "description": "Enable file search tool"} }, "required": ["name", "instructions", "model"] } ), types.Tool( name="update-assistant", description="Update an existing OpenAI assistant", inputSchema={ "type": "object", "properties": { "assistant_id": {"type": "string", "description": "The ID of the assistant to update"}, "name": {"type": "string", "description": "The new name of the assistant"}, "instructions": {"type": "string", "description": "The new instructions for the assistant"}, "model": {"type": "string", "description": "The new model for the assistant"}, "temperature": {"type": "number", "description": "The new sampling temperature"}, "file_ids": {"type": "array", "items": {"type": "string"}, "description": "The new list of file IDs attached to the assistant"}, "enable_file_search": {"type": "boolean", "description": "Enable or disable the file search tool"} }, "required": ["assistant_id"] } ), types.Tool( name="delete-assistant", description="Delete an OpenAI assistant", inputSchema={ "type": "object", "properties": { "assistant_id": {"type": "string", "description": "The ID of the assistant to delete"} }, "required": ["assistant_id"] } ), types.Tool( name="upload-file", description="Upload a file for use by assistants", inputSchema={ "type": "object", "properties": { "file_path": {"type": "string", "description": "The local path to the file to upload"} }, "required": ["file_path"] } ), types.Tool( name="list-files", description="List files available for assistants", inputSchema={"type": "object", "properties": {}} ), types.Tool( name="delete-file", description="Delete a file", inputSchema={ "type": "object", "properties": { "file_id": {"type": "string", "description": "The ID of the file to delete"} }, "required": ["file_id"] } ) ] @server.call_tool() async def handle_tool_call(name: str, arguments: dict | None) -> list[types.TextContent]: try: if arguments is None: arguments = {} if name == "ask-openai": response = await connector.ask_openai( query=arguments["query"], model=arguments.get("model", "gpt-4"), temperature=arguments.get("temperature", 0.7), max_tokens=arguments.get("max_tokens", 500) ) return [types.TextContent(type="text", text=f"OpenAI Response:\\n{response}")] elif name == "list-assistants": response = await connector.list_assistants() assistants_str = "\\n".join([f"- ID: {a.id}, Name: {a.name}, Model: {a.model}" for a in response]) if not assistants_str: assistants_str = "No assistants found." return [types.TextContent(type="text", text=f"Assistants:\\n{assistants_str}")] elif name == "retrieve-assistant": assistant_id = arguments.get("assistant_id") if not assistant_id: raise ValueError("assistant_id is required for retrieve-assistant") response = await connector.retrieve_assistant(assistant_id) return [types.TextContent(type="text", text=f"Assistant Details:\\n{response}")] elif name == "create-assistant": tools = [] if arguments.get("enable_file_search", True): tools.append({"type": "file_search"}) response = await connector.create_assistant( name=arguments["name"], instructions=arguments["instructions"], model=arguments["model"], temperature=arguments.get("temperature", 0.7), file_ids=arguments.get("file_ids"), tools=tools ) return [types.TextContent(type="text", text=f"Assistant created:\\n{response}")] elif name == "update-assistant": assistant_id = arguments.pop("assistant_id") update_kwargs = arguments.copy() if "enable_file_search" in update_kwargs: tools = [] if update_kwargs.pop("enable_file_search"): tools.append({"type": "file_search"}) update_kwargs["tools"] = tools response = await connector.update_assistant(assistant_id, **update_kwargs) return [types.TextContent(type="text", text=f"Assistant updated:\\n{response}")] elif name == "delete-assistant": assistant_id = arguments["assistant_id"] response = await connector.delete_assistant(assistant_id) return [types.TextContent(type="text", text=f"Assistant deleted: {response.id}, status: {'deleted' if response.deleted else 'not deleted'}")] elif name == "upload-file": response = await connector.upload_file(arguments["file_path"]) return [types.TextContent(type="text", text=f"File uploaded: {response.filename}, ID: {response.id}")] elif name == "list-files": response = await connector.list_files() files_str = "\\n".join([f"- ID: {f.id}, Name: {f.filename}, Size: {f.bytes} bytes" for f in response]) if not files_str: files_str = "No files found." return [types.TextContent(type="text", text=f"Available Files:\\n{files_str}")] elif name == "delete-file": file_id = arguments["file_id"] response = await connector.delete_file(file_id) return [types.TextContent(type="text", text=f"File deleted: {response.id}, status: {'deleted' if response.deleted else 'not deleted'}")] raise ValueError(f"Unknown tool: {name}") except Exception as e: logger.error(f"Tool call failed: {str(e)}") return [types.TextContent(type="text", text=f"Error: {str(e)}")] return server @click.command() @click.option("--openai-api-key", envvar="OPENAI_API_KEY", required=True) def main(openai_api_key: str): try: async def _run(): async with mcp.server.stdio.stdio_server() as (read_stream, write_stream): server = serve(openai_api_key) await server.run( read_stream, write_stream, InitializationOptions( server_name="openai-server", server_version="0.1.0", capabilities=server.get_capabilities( notification_options=NotificationOptions(), experimental_capabilities={} ) ) ) asyncio.run(_run()) except KeyboardInterrupt: logger.info("Server stopped by user") except Exception as e: logger.exception("Server failed") sys.exit(1) if __name__ == "__main__": main()

Implementation Reference

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/snilld-ai/openai-assistant-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server