Skip to main content
Glama
darkangelpraha

x.ai Grok MCP Server

index.ts6.74 kB
#!/usr/bin/env node import { Server } from "@modelcontextprotocol/sdk/server/index.js"; import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js"; import { CallToolRequestSchema, ListToolsRequestSchema, Tool, } from "@modelcontextprotocol/sdk/types.js"; const XAI_API_URL = "https://api.x.ai/v1/chat/completions"; interface ChatMessage { role: "system" | "user" | "assistant"; content: string; } interface ChatCompletionRequest { messages: ChatMessage[]; model: string; stream?: boolean; temperature?: number; max_tokens?: number; } interface ChatCompletionResponse { id: string; object: string; created: number; model: string; choices: Array<{ index: number; message: { role: string; content: string; }; finish_reason: string; }>; usage?: { prompt_tokens: number; completion_tokens: number; total_tokens: number; }; } class XAIGrokServer { private server: Server; private apiKey: string; constructor() { this.apiKey = process.env.XAI_API_KEY || ""; if (!this.apiKey) { throw new Error( "XAI_API_KEY environment variable is required. " + "Use 1Password CLI reference: op://AI/API Credentials | Grok Business/API KEY" ); } this.server = new Server( { name: "xai-grok-mcp-server", version: "1.0.0", }, { capabilities: { tools: {}, }, } ); this.setupHandlers(); this.setupErrorHandling(); } private setupHandlers(): void { this.server.setRequestHandler(ListToolsRequestSchema, async () => ({ tools: [ { name: "chat_completion", description: "Send a chat completion request to x.ai Grok API. Supports system messages, user messages, and various models (grok-beta, grok-2-latest, grok-4-latest).", inputSchema: { type: "object", properties: { messages: { type: "array", description: "Array of chat messages. Each message should have 'role' (system/user/assistant) and 'content' (string).", items: { type: "object", properties: { role: { type: "string", enum: ["system", "user", "assistant"], description: "The role of the message sender", }, content: { type: "string", description: "The content of the message", }, }, required: ["role", "content"], }, }, model: { type: "string", description: "The Grok model to use. Options: grok-beta, grok-2-latest, grok-4-latest", default: "grok-4-latest", }, temperature: { type: "number", description: "Sampling temperature between 0 and 2. Higher values make output more random.", default: 1.0, minimum: 0, maximum: 2, }, max_tokens: { type: "number", description: "Maximum number of tokens to generate", minimum: 1, }, }, required: ["messages"], }, } as Tool, ], })); this.server.setRequestHandler( CallToolRequestSchema, async (request) => { if (request.params.name === "chat_completion") { return await this.handleChatCompletion(request.params.arguments as any); } throw new Error(`Unknown tool: ${request.params.name}`); } ); } private async handleChatCompletion(args: { messages: ChatMessage[]; model?: string; temperature?: number; max_tokens?: number; }): Promise<{ content: Array<{ type: "text"; text: string; }>; }> { if (!args.messages || !Array.isArray(args.messages) || args.messages.length === 0) { throw new Error("Messages array is required and must not be empty"); } const requestBody: ChatCompletionRequest = { messages: args.messages, model: args.model || "grok-4-latest", stream: false, temperature: args.temperature ?? 1.0, }; if (args.max_tokens) { requestBody.max_tokens = args.max_tokens; } try { const response = await fetch(XAI_API_URL, { method: "POST", headers: { "Content-Type": "application/json", Authorization: `Bearer ${this.apiKey}`, }, body: JSON.stringify(requestBody), }); if (!response.ok) { const errorText = await response.text(); let errorMessage = `x.ai API error: ${response.status} ${response.statusText}`; try { const errorJson = JSON.parse(errorText); if (errorJson.error) { errorMessage += ` - ${errorJson.error}`; } if (errorJson.message) { errorMessage += ` - ${errorJson.message}`; } } catch { errorMessage += ` - ${errorText}`; } throw new Error(errorMessage); } const data = (await response.json()) as ChatCompletionResponse; if (!data.choices || data.choices.length === 0) { throw new Error("No choices returned from API"); } const content = data.choices[0].message.content; return { content: [ { type: "text", text: JSON.stringify( { content, model: data.model, usage: data.usage, finish_reason: data.choices[0].finish_reason, }, null, 2 ), }, ], }; } catch (error) { if (error instanceof Error) { throw new Error(`Chat completion failed: ${error.message}`); } throw error; } } private setupErrorHandling(): void { this.server.onerror = (error) => { console.error("[MCP Error]", error); }; process.on("SIGINT", async () => { await this.server.close(); process.exit(0); }); } async run(): Promise<void> { const transport = new StdioServerTransport(); await this.server.connect(transport); console.error("x.ai Grok MCP server running on stdio"); } } const server = new XAIGrokServer(); server.run().catch((error) => { console.error("Fatal error:", error); process.exit(1); });

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/darkangelpraha/xai-grok-mcp-server'

If you have feedback or need assistance with the MCP directory API, please join our Discord server