Skip to main content
Glama
index.ts25.5 kB
#!/usr/bin/env node import { Server } from "@modelcontextprotocol/sdk/server/index.js"; import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js"; import { CallToolRequestSchema, ListToolsRequestSchema, Tool, CallToolRequest, CallToolResult, ErrorCode, McpError, } from "@modelcontextprotocol/sdk/types.js"; import OpenAI from "openai"; import { z } from "zod"; import fs from "fs/promises"; import path from "path"; import { config as dotenvConfig } from "dotenv"; import os from "os"; // Load environment variables dotenvConfig(); const ConfigSchema = z.object({ openrouterApiKey: z.string().min(1, "OpenRouter API key is required"), }); type Config = z.infer<typeof ConfigSchema>; // Supported aspect ratios for image generation const ASPECT_RATIOS = [ "1:1", "2:3", "3:2", "3:4", "4:3", "4:5", "5:4", "9:16", "16:9", "21:9" ] as const; type AspectRatio = typeof ASPECT_RATIOS[number]; class OpenRouterImageMCP { private server: Server; private openai: OpenAI | null = null; private config: Config | null = null; private lastImagePath: string | null = null; private configSource: 'environment' | 'config_file' | 'not_configured' = 'not_configured'; constructor() { this.server = new Server( { name: "openrouter-image-mcp", version: "1.0.0", }, { capabilities: { tools: {}, }, } ); this.setupHandlers(); } private setupHandlers() { this.server.setRequestHandler(ListToolsRequestSchema, async () => { return { tools: [ { name: "configure_openrouter_token", description: "Configure your OpenRouter API token for image generation", inputSchema: { type: "object", properties: { apiKey: { type: "string", description: "Your OpenRouter API key from https://openrouter.ai/settings/keys", }, }, required: ["apiKey"], }, }, { name: "generate_image", description: "Generate a NEW image from text prompt using OpenRouter (Gemini 2.5 Flash Image). Use this ONLY when creating a completely new image, not when modifying an existing one.", inputSchema: { type: "object", properties: { prompt: { type: "string", description: "Text prompt describing the NEW image to create from scratch", }, aspectRatio: { type: "string", enum: ["1:1", "2:3", "3:2", "3:4", "4:3", "4:5", "5:4", "9:16", "16:9", "21:9"], description: "Optional aspect ratio for the generated image. Default is 1:1 (1024×1024). Options: 1:1, 16:9, 9:16, 3:4, 4:3, etc.", }, }, required: ["prompt"], }, }, { name: "edit_image", description: "Edit a SPECIFIC existing image file, optionally using additional reference images. Use this when you have the exact file path of an image to modify.", inputSchema: { type: "object", properties: { imagePath: { type: "string", description: "Full file path to the main image file to edit", }, prompt: { type: "string", description: "Text describing the modifications to make to the existing image", }, referenceImages: { type: "array", items: { type: "string" }, description: "Optional array of file paths to additional reference images to use during editing (e.g., for style transfer, adding elements, etc.)", }, aspectRatio: { type: "string", enum: ["1:1", "2:3", "3:2", "3:4", "4:3", "4:5", "5:4", "9:16", "16:9", "21:9"], description: "Optional aspect ratio for the edited image. Default is 1:1 (1024×1024).", }, }, required: ["imagePath", "prompt"], }, }, { name: "get_configuration_status", description: "Check if OpenRouter API token is configured", inputSchema: { type: "object", properties: {}, additionalProperties: false, }, }, { name: "continue_editing", description: "Continue editing the LAST image that was generated or edited in this session, optionally using additional reference images. Use this for iterative improvements, modifications, or changes to the most recent image. This automatically uses the previous image without needing a file path.", inputSchema: { type: "object", properties: { prompt: { type: "string", description: "Text describing the modifications/changes/improvements to make to the last image (e.g., 'change the hat color to red', 'remove the background', 'add flowers')", }, referenceImages: { type: "array", items: { type: "string" }, description: "Optional array of file paths to additional reference images to use during editing (e.g., for style transfer, adding elements from other images, etc.)", }, aspectRatio: { type: "string", enum: ["1:1", "2:3", "3:2", "3:4", "4:3", "4:5", "5:4", "9:16", "16:9", "21:9"], description: "Optional aspect ratio for the edited image. Default is 1:1 (1024×1024).", }, }, required: ["prompt"], }, }, { name: "get_last_image_info", description: "Get information about the last generated/edited image in this session (file path, size, etc.). Use this to check what image is currently available for continue_editing.", inputSchema: { type: "object", properties: {}, additionalProperties: false, }, }, ] as Tool[], }; }); this.server.setRequestHandler(CallToolRequestSchema, async (request: CallToolRequest): Promise<CallToolResult> => { try { switch (request.params.name) { case "configure_openrouter_token": return await this.configureOpenRouterToken(request); case "generate_image": return await this.generateImage(request); case "edit_image": return await this.editImage(request); case "get_configuration_status": return await this.getConfigurationStatus(); case "continue_editing": return await this.continueEditing(request); case "get_last_image_info": return await this.getLastImageInfo(); default: throw new McpError(ErrorCode.MethodNotFound, `Unknown tool: ${request.params.name}`); } } catch (error) { if (error instanceof McpError) { throw error; } throw new McpError(ErrorCode.InternalError, `Tool execution failed: ${error instanceof Error ? error.message : String(error)}`); } }); } private async configureOpenRouterToken(request: CallToolRequest): Promise<CallToolResult> { const { apiKey } = request.params.arguments as { apiKey: string }; try { ConfigSchema.parse({ openrouterApiKey: apiKey }); this.config = { openrouterApiKey: apiKey }; this.openai = new OpenAI({ apiKey, baseURL: "https://openrouter.ai/api/v1", }); this.configSource = 'config_file'; // Manual configuration via tool await this.saveConfig(); return { content: [ { type: "text", text: "✅ OpenRouter API token configured successfully! You can now use OpenRouter image generation features.", }, ], }; } catch (error) { if (error instanceof z.ZodError) { throw new McpError(ErrorCode.InvalidParams, `Invalid API key: ${error.errors[0]?.message}`); } throw error; } } private async generateImage(request: CallToolRequest): Promise<CallToolResult> { if (!this.ensureConfigured()) { throw new McpError(ErrorCode.InvalidRequest, "OpenRouter API token not configured. Use configure_openrouter_token first."); } const { prompt, aspectRatio } = request.params.arguments as { prompt: string; aspectRatio?: AspectRatio; }; try { // Prepare the request payload const requestPayload: any = { model: "google/gemini-2.5-flash-image", messages: [ { role: "user", content: prompt, } ], modalities: ["image", "text"], }; // Add aspect ratio if provided if (aspectRatio) { requestPayload.image_config = { aspect_ratio: aspectRatio, }; } const response = await this.openai!.chat.completions.create(requestPayload); // Process response to extract image data const content: any[] = []; const savedFiles: string[] = []; let textContent = ""; // Get appropriate save directory based on OS const imagesDir = this.getImagesDirectory(); // Create directory await fs.mkdir(imagesDir, { recursive: true, mode: 0o755 }); const message = response.choices[0]?.message; if (message) { // Process text content if (message.content && typeof message.content === 'string') { textContent = message.content; } // Process images if ((message as any).images && Array.isArray((message as any).images)) { for (const imageData of (message as any).images) { const imageUrl = imageData.image_url?.url; if (imageUrl && imageUrl.startsWith('data:image/')) { // Extract base64 data from data URL const matches = imageUrl.match(/^data:image\/(\w+);base64,(.+)$/); if (matches && matches[2]) { const base64Data = matches[2]; const timestamp = new Date().toISOString().replace(/[:.]/g, '-'); const randomId = Math.random().toString(36).substring(2, 8); const fileName = `generated-${timestamp}-${randomId}.png`; const filePath = path.join(imagesDir, fileName); const imageBuffer = Buffer.from(base64Data, 'base64'); await fs.writeFile(filePath, imageBuffer); savedFiles.push(filePath); this.lastImagePath = filePath; // Add image to MCP response content.push({ type: "image", data: base64Data, mimeType: "image/png", }); } } } } } // Build response content let statusText = `🎨 Image generated with OpenRouter (Gemini 2.5 Flash Image)!\n\nPrompt: "${prompt}"`; if (aspectRatio) { statusText += `\nAspect Ratio: ${aspectRatio}`; } if (textContent) { statusText += `\n\nDescription: ${textContent}`; } if (savedFiles.length > 0) { statusText += `\n\n📁 Image saved to:\n${savedFiles.map(f => `- ${f}`).join('\n')}`; statusText += `\n\n💡 View the image by:`; statusText += `\n1. Opening the file at the path above`; statusText += `\n2. Clicking on "Called generate_image" in Cursor to expand the MCP call details`; statusText += `\n\n🔄 To modify this image, use: continue_editing`; statusText += `\n📋 To check current image info, use: get_last_image_info`; } else { statusText += `\n\nNote: No image was generated. The model may have returned only text.`; statusText += `\n\n💡 Tip: Try running the command again - sometimes the first call needs to warm up the model.`; } // Add text content first content.unshift({ type: "text", text: statusText, }); return { content }; } catch (error) { console.error("Error generating image:", error); throw new McpError( ErrorCode.InternalError, `Failed to generate image: ${error instanceof Error ? error.message : String(error)}` ); } } private async editImage(request: CallToolRequest): Promise<CallToolResult> { if (!this.ensureConfigured()) { throw new McpError(ErrorCode.InvalidRequest, "OpenRouter API token not configured. Use configure_openrouter_token first."); } const { imagePath, prompt, referenceImages, aspectRatio } = request.params.arguments as { imagePath: string; prompt: string; referenceImages?: string[]; aspectRatio?: AspectRatio; }; try { // Prepare the main image const imageBuffer = await fs.readFile(imagePath); const mimeType = this.getMimeType(imagePath); const imageBase64 = imageBuffer.toString('base64'); // Build content array for the message const contentParts: any[] = []; // Add the main image contentParts.push({ type: "image_url", image_url: { url: `data:${mimeType};base64,${imageBase64}`, } }); // Add reference images if provided if (referenceImages && referenceImages.length > 0) { for (const refPath of referenceImages) { try { const refBuffer = await fs.readFile(refPath); const refMimeType = this.getMimeType(refPath); const refBase64 = refBuffer.toString('base64'); contentParts.push({ type: "image_url", image_url: { url: `data:${refMimeType};base64,${refBase64}`, } }); } catch (error) { // Continue with other images, don't fail the entire operation continue; } } } // Add the text prompt contentParts.push({ type: "text", text: prompt, }); // Prepare the request payload const requestPayload: any = { model: "google/gemini-2.5-flash-image", messages: [ { role: "user", content: contentParts, } ], modalities: ["image", "text"], }; // Add aspect ratio if provided if (aspectRatio) { requestPayload.image_config = { aspect_ratio: aspectRatio, }; } const response = await this.openai!.chat.completions.create(requestPayload); // Process response const content: any[] = []; const savedFiles: string[] = []; let textContent = ""; // Get appropriate save directory const imagesDir = this.getImagesDirectory(); await fs.mkdir(imagesDir, { recursive: true, mode: 0o755 }); const message = response.choices[0]?.message; if (message) { // Process text content if (message.content && typeof message.content === 'string') { textContent = message.content; } // Process images if ((message as any).images && Array.isArray((message as any).images)) { for (const imageData of (message as any).images) { const imageUrl = imageData.image_url?.url; if (imageUrl && imageUrl.startsWith('data:image/')) { // Extract base64 data from data URL const matches = imageUrl.match(/^data:image\/(\w+);base64,(.+)$/); if (matches && matches[2]) { const base64Data = matches[2]; const timestamp = new Date().toISOString().replace(/[:.]/g, '-'); const randomId = Math.random().toString(36).substring(2, 8); const fileName = `edited-${timestamp}-${randomId}.png`; const filePath = path.join(imagesDir, fileName); const imageBuffer = Buffer.from(base64Data, 'base64'); await fs.writeFile(filePath, imageBuffer); savedFiles.push(filePath); this.lastImagePath = filePath; // Add image to MCP response content.push({ type: "image", data: base64Data, mimeType: "image/png", }); } } } } } // Build response let statusText = `🎨 Image edited with OpenRouter!\n\nOriginal: ${imagePath}\nEdit prompt: "${prompt}"`; if (aspectRatio) { statusText += `\nAspect Ratio: ${aspectRatio}`; } if (referenceImages && referenceImages.length > 0) { statusText += `\n\nReference images used:\n${referenceImages.map(f => `- ${f}`).join('\n')}`; } if (textContent) { statusText += `\n\nDescription: ${textContent}`; } if (savedFiles.length > 0) { statusText += `\n\n📁 Edited image saved to:\n${savedFiles.map(f => `- ${f}`).join('\n')}`; statusText += `\n\n💡 View the edited image by:`; statusText += `\n1. Opening the file at the path above`; statusText += `\n2. Clicking on "Called edit_image" in Cursor to expand the MCP call details`; statusText += `\n\n🔄 To continue editing, use: continue_editing`; statusText += `\n📋 To check current image info, use: get_last_image_info`; } else { statusText += `\n\nNote: No edited image was generated.`; statusText += `\n\n💡 Tip: Try running the command again - sometimes the first call needs to warm up the model.`; } content.unshift({ type: "text", text: statusText, }); return { content }; } catch (error) { throw new McpError( ErrorCode.InternalError, `Failed to edit image: ${error instanceof Error ? error.message : String(error)}` ); } } private async getConfigurationStatus(): Promise<CallToolResult> { const isConfigured = this.config !== null && this.openai !== null; let statusText: string; let sourceInfo = ""; if (isConfigured) { statusText = "✅ OpenRouter API token is configured and ready to use"; switch (this.configSource) { case 'environment': sourceInfo = "\n📍 Source: Environment variable (OPENROUTER_API_KEY)\n💡 This is the most secure configuration method."; break; case 'config_file': sourceInfo = "\n📍 Source: Local configuration file (.openrouter-image-config.json)\n💡 Consider using environment variables for better security."; break; } } else { statusText = "❌ OpenRouter API token is not configured"; sourceInfo = ` 📝 Configuration options (in priority order): 1. 🥇 MCP client environment variables (Recommended) 2. 🥈 System environment variable: OPENROUTER_API_KEY 3. 🥉 Use configure_openrouter_token tool 💡 For the most secure setup, add this to your MCP configuration: "env": { "OPENROUTER_API_KEY": "your-api-key-here" } 🔑 Get your API key from: https://openrouter.ai/settings/keys`; } return { content: [ { type: "text", text: statusText + sourceInfo, }, ], }; } private async continueEditing(request: CallToolRequest): Promise<CallToolResult> { if (!this.ensureConfigured()) { throw new McpError(ErrorCode.InvalidRequest, "OpenRouter API token not configured. Use configure_openrouter_token first."); } if (!this.lastImagePath) { throw new McpError(ErrorCode.InvalidRequest, "No previous image found. Please generate or edit an image first, then use continue_editing for subsequent edits."); } const { prompt, referenceImages, aspectRatio } = request.params.arguments as { prompt: string; referenceImages?: string[]; aspectRatio?: AspectRatio; }; // 检查最后的图片文件是否存在 try { await fs.access(this.lastImagePath); } catch { throw new McpError(ErrorCode.InvalidRequest, `Last image file not found at: ${this.lastImagePath}. Please generate a new image first.`); } // Use editImage logic with lastImagePath return await this.editImage({ method: "tools/call", params: { name: "edit_image", arguments: { imagePath: this.lastImagePath, prompt: prompt, referenceImages: referenceImages, aspectRatio: aspectRatio } } } as CallToolRequest); } private async getLastImageInfo(): Promise<CallToolResult> { if (!this.lastImagePath) { return { content: [ { type: "text", text: "📷 No previous image found.\n\nPlease generate or edit an image first, then this command will show information about your last image.", }, ], }; } // 检查文件是否存在 try { await fs.access(this.lastImagePath); const stats = await fs.stat(this.lastImagePath); return { content: [ { type: "text", text: `📷 Last Image Information:\n\nPath: ${this.lastImagePath}\nFile Size: ${Math.round(stats.size / 1024)} KB\nLast Modified: ${stats.mtime.toLocaleString()}\n\n💡 Use continue_editing to make further changes to this image.`, }, ], }; } catch { return { content: [ { type: "text", text: `📷 Last Image Information:\n\nPath: ${this.lastImagePath}\nStatus: ❌ File not found\n\n💡 The image file may have been moved or deleted. Please generate a new image.`, }, ], }; } } private ensureConfigured(): boolean { return this.config !== null && this.openai !== null; } private getMimeType(filePath: string): string { const ext = path.extname(filePath).toLowerCase(); switch (ext) { case '.jpg': case '.jpeg': return 'image/jpeg'; case '.png': return 'image/png'; case '.webp': return 'image/webp'; default: return 'image/jpeg'; } } private getImagesDirectory(): string { const platform = os.platform(); if (platform === 'win32') { // Windows: Use Documents folder const homeDir = os.homedir(); return path.join(homeDir, 'Documents', 'openrouter-images'); } else { // macOS/Linux: Use current directory or home directory if in system paths const cwd = process.cwd(); const homeDir = os.homedir(); // If in system directories, use home directory instead if (cwd.startsWith('/usr/') || cwd.startsWith('/opt/') || cwd.startsWith('/var/')) { return path.join(homeDir, 'openrouter-images'); } return path.join(cwd, 'generated_imgs'); } } private async saveConfig(): Promise<void> { if (this.config) { const configPath = path.join(process.cwd(), '.openrouter-image-config.json'); await fs.writeFile(configPath, JSON.stringify(this.config, null, 2)); } } private async loadConfig(): Promise<void> { // Try to load from environment variable first const envApiKey = process.env.OPENROUTER_API_KEY; if (envApiKey) { try { this.config = ConfigSchema.parse({ openrouterApiKey: envApiKey }); this.openai = new OpenAI({ apiKey: this.config.openrouterApiKey, baseURL: "https://openrouter.ai/api/v1", }); this.configSource = 'environment'; return; } catch (error) { // Invalid API key in environment } } // Fallback to config file try { const configPath = path.join(process.cwd(), '.openrouter-image-config.json'); const configData = await fs.readFile(configPath, 'utf-8'); const parsedConfig = JSON.parse(configData); this.config = ConfigSchema.parse(parsedConfig); this.openai = new OpenAI({ apiKey: this.config.openrouterApiKey, baseURL: "https://openrouter.ai/api/v1", }); this.configSource = 'config_file'; } catch { // Config file doesn't exist or is invalid, that's okay this.configSource = 'not_configured'; } } public async run(): Promise<void> { await this.loadConfig(); const transport = new StdioServerTransport(); await this.server.connect(transport); } } const server = new OpenRouterImageMCP(); server.run().catch(console.error);

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/WeiYu021/openrouter-image-MCP'

If you have feedback or need assistance with the MCP directory API, please join our Discord server