Skip to main content
Glama

Gemini MCP Server

openrouter_models.jsonโ€ข13.9 kB
{ "_README": { "description": "Model metadata for OpenRouter-backed providers.", "documentation": "https://github.com/BeehiveInnovations/zen-mcp-server/blob/main/docs/custom_models.md", "usage": "Models listed here are exposed through OpenRouter. Aliases are case-insensitive.", "field_notes": "Matches providers/shared/model_capabilities.py.", "field_descriptions": { "model_name": "The model identifier - OpenRouter format (e.g., 'anthropic/claude-opus-4') or custom model name (e.g., 'llama3.2')", "aliases": "Array of short names users can type instead of the full model name", "context_window": "Total number of tokens the model can process (input + output combined)", "max_output_tokens": "Maximum number of tokens the model can generate in a single response", "supports_extended_thinking": "Whether the model supports extended reasoning tokens (currently none do via OpenRouter or custom APIs)", "supports_json_mode": "Whether the model can guarantee valid JSON output", "supports_function_calling": "Whether the model supports function/tool calling", "supports_images": "Whether the model can process images/visual input", "max_image_size_mb": "Maximum total size in MB for all images combined (capped at 40MB max for custom models)", "supports_temperature": "Whether the model accepts temperature parameter in API calls (set to false for O3/O4 reasoning models)", "temperature_constraint": "Type of temperature constraint: 'fixed' (fixed value), 'range' (continuous range), 'discrete' (specific values), or omit for default range", "use_openai_response_api": "Set to true when the model must use the /responses endpoint (reasoning models like GPT-5 Pro). Leave false/omit for standard chat completions.", "default_reasoning_effort": "Default reasoning effort level for models that support it (e.g., 'low', 'medium', 'high'). Omit if not applicable.", "description": "Human-readable description of the model", "intelligence_score": "1-20 human rating used as the primary signal for auto-mode model ordering", "allow_code_generation": "Whether this model can generate and suggest fully working code - complete with functions, files, and detailed implementation instructions - for your AI tool to use right away. Only set this to 'true' for a model more capable than the AI model / CLI you're currently using." } }, "models": [ { "model_name": "anthropic/claude-sonnet-4.5", "aliases": [ "sonnet", "sonnet4.5" ], "context_window": 200000, "max_output_tokens": 64000, "supports_extended_thinking": false, "supports_json_mode": false, "supports_function_calling": false, "supports_images": true, "max_image_size_mb": 5.0, "description": "Claude Sonnet 4.5 - High-performance model with exceptional reasoning and efficiency", "intelligence_score": 12 }, { "model_name": "anthropic/claude-opus-4.1", "aliases": [ "opus", "claude-opus" ], "context_window": 200000, "max_output_tokens": 64000, "supports_extended_thinking": false, "supports_json_mode": false, "supports_function_calling": false, "supports_images": true, "max_image_size_mb": 5.0, "description": "Claude Opus 4.1 - Our most capable and intelligent model yet", "intelligence_score": 14 }, { "model_name": "anthropic/claude-sonnet-4.1", "aliases": [ "sonnet4.1" ], "context_window": 200000, "max_output_tokens": 64000, "supports_extended_thinking": false, "supports_json_mode": false, "supports_function_calling": false, "supports_images": true, "max_image_size_mb": 5.0, "description": "Claude Sonnet 4.1 - Last generation high-performance model with exceptional reasoning and efficiency", "intelligence_score": 10 }, { "model_name": "anthropic/claude-3.5-haiku", "aliases": [ "haiku" ], "context_window": 200000, "max_output_tokens": 64000, "supports_extended_thinking": false, "supports_json_mode": false, "supports_function_calling": false, "supports_images": true, "max_image_size_mb": 5.0, "description": "Claude 3 Haiku - Fast and efficient with vision", "intelligence_score": 8 }, { "model_name": "google/gemini-2.5-pro", "aliases": [ "pro", "gemini-pro", "gemini", "pro-openrouter" ], "context_window": 1048576, "max_output_tokens": 65536, "supports_extended_thinking": true, "supports_json_mode": true, "supports_function_calling": true, "supports_images": true, "max_image_size_mb": 20.0, "allow_code_generation": true, "description": "Google's Gemini 2.5 Pro via OpenRouter with vision", "intelligence_score": 18 }, { "model_name": "google/gemini-2.5-flash", "aliases": [ "flash", "gemini-flash" ], "context_window": 1048576, "max_output_tokens": 65536, "supports_extended_thinking": true, "supports_json_mode": true, "supports_function_calling": true, "supports_images": true, "max_image_size_mb": 15.0, "description": "Google's Gemini 2.5 Flash via OpenRouter with vision", "intelligence_score": 10 }, { "model_name": "mistralai/mistral-large-2411", "aliases": [ "mistral-large", "mistral" ], "context_window": 128000, "max_output_tokens": 32000, "supports_extended_thinking": false, "supports_json_mode": true, "supports_function_calling": true, "supports_images": false, "max_image_size_mb": 0.0, "description": "Mistral's largest model (text-only)", "intelligence_score": 11 }, { "model_name": "meta-llama/llama-3-70b", "aliases": [ "llama", "llama3", "llama3-70b", "llama-70b", "llama3-openrouter" ], "context_window": 8192, "max_output_tokens": 8192, "supports_extended_thinking": false, "supports_json_mode": false, "supports_function_calling": false, "supports_images": false, "max_image_size_mb": 0.0, "description": "Meta's Llama 3 70B model (text-only)", "intelligence_score": 9 }, { "model_name": "deepseek/deepseek-r1-0528", "aliases": [ "deepseek-r1", "deepseek", "r1", "deepseek-thinking" ], "context_window": 65536, "max_output_tokens": 32768, "supports_extended_thinking": true, "supports_json_mode": true, "supports_function_calling": false, "supports_images": false, "max_image_size_mb": 0.0, "description": "DeepSeek R1 with thinking mode - advanced reasoning capabilities (text-only)", "intelligence_score": 15 }, { "model_name": "perplexity/llama-3-sonar-large-32k-online", "aliases": [ "perplexity", "sonar", "perplexity-online" ], "context_window": 32768, "max_output_tokens": 32768, "supports_extended_thinking": false, "supports_json_mode": false, "supports_function_calling": false, "supports_images": false, "max_image_size_mb": 0.0, "description": "Perplexity's online model with web search (text-only)", "intelligence_score": 9 }, { "model_name": "openai/o3", "aliases": [ "o3" ], "context_window": 200000, "max_output_tokens": 100000, "supports_extended_thinking": false, "supports_json_mode": true, "supports_function_calling": true, "supports_images": true, "max_image_size_mb": 20.0, "supports_temperature": false, "temperature_constraint": "fixed", "description": "OpenAI's o3 model - well-rounded and powerful across domains with vision", "intelligence_score": 14 }, { "model_name": "openai/o3-mini", "aliases": [ "o3-mini", "o3mini" ], "context_window": 200000, "max_output_tokens": 100000, "supports_extended_thinking": false, "supports_json_mode": true, "supports_function_calling": true, "supports_images": true, "max_image_size_mb": 20.0, "supports_temperature": false, "temperature_constraint": "fixed", "description": "OpenAI's o3-mini model - balanced performance and speed with vision", "intelligence_score": 12 }, { "model_name": "openai/o3-mini-high", "aliases": [ "o3-mini-high", "o3mini-high" ], "context_window": 200000, "max_output_tokens": 100000, "supports_extended_thinking": false, "supports_json_mode": true, "supports_function_calling": true, "supports_images": true, "max_image_size_mb": 20.0, "supports_temperature": false, "temperature_constraint": "fixed", "description": "OpenAI's o3-mini with high reasoning effort - optimized for complex problems with vision", "intelligence_score": 13 }, { "model_name": "openai/o3-pro", "aliases": [ "o3pro" ], "context_window": 200000, "max_output_tokens": 100000, "supports_extended_thinking": false, "supports_json_mode": true, "supports_function_calling": true, "supports_images": true, "max_image_size_mb": 20.0, "supports_temperature": false, "temperature_constraint": "fixed", "description": "OpenAI's o3-pro model - professional-grade reasoning and analysis with vision", "intelligence_score": 15 }, { "model_name": "openai/o4-mini", "aliases": [ "o4-mini", "o4mini" ], "context_window": 200000, "max_output_tokens": 100000, "supports_extended_thinking": false, "supports_json_mode": true, "supports_function_calling": true, "supports_images": true, "max_image_size_mb": 20.0, "supports_temperature": false, "temperature_constraint": "fixed", "description": "OpenAI's o4-mini model - optimized for shorter contexts with rapid reasoning and vision", "intelligence_score": 11 }, { "model_name": "openai/gpt-5", "aliases": [ "gpt5" ], "context_window": 400000, "max_output_tokens": 128000, "supports_extended_thinking": true, "supports_json_mode": true, "supports_function_calling": true, "supports_images": true, "max_image_size_mb": 20.0, "supports_temperature": true, "temperature_constraint": "range", "description": "GPT-5 (400K context, 128K output) - Advanced model with reasoning support", "intelligence_score": 16 }, { "model_name": "openai/gpt-5-pro", "aliases": [ "gpt5pro" ], "context_window": 400000, "max_output_tokens": 272000, "supports_extended_thinking": true, "supports_json_mode": true, "supports_function_calling": true, "supports_images": true, "max_image_size_mb": 20.0, "supports_temperature": false, "temperature_constraint": "fixed", "use_openai_response_api": true, "default_reasoning_effort": "high", "allow_code_generation": true, "description": "GPT-5 Pro - Advanced reasoning model with highest quality responses (text+image input, text output only)", "intelligence_score": 18 }, { "model_name": "openai/gpt-5-codex", "aliases": [ "codex", "gpt5codex" ], "context_window": 400000, "max_output_tokens": 128000, "supports_extended_thinking": false, "supports_json_mode": true, "supports_function_calling": false, "supports_images": false, "max_image_size_mb": 0.0, "description": "GPT-5-Codex is a specialized version of GPT-5 optimized for software engineering and coding workflows", "intelligence_score": 17 }, { "model_name": "openai/gpt-5-mini", "aliases": [ "gpt5mini" ], "context_window": 400000, "max_output_tokens": 128000, "supports_extended_thinking": false, "supports_json_mode": true, "supports_function_calling": false, "supports_images": false, "max_image_size_mb": 0.0, "supports_temperature": true, "temperature_constraint": "fixed", "description": "GPT-5-mini (400K context, 128K output) - Efficient variant with reasoning support", "intelligence_score": 10 }, { "model_name": "openai/gpt-5-nano", "aliases": [ "gpt5nano" ], "context_window": 400000, "max_output_tokens": 128000, "supports_extended_thinking": false, "supports_json_mode": true, "supports_function_calling": false, "supports_images": false, "max_image_size_mb": 0.0, "supports_temperature": true, "temperature_constraint": "fixed", "description": "GPT-5 nano (400K context, 128K output) - Fastest, cheapest version of GPT-5 for summarization and classification tasks", "intelligence_score": 8 }, { "model_name": "x-ai/grok-4", "aliases": [ "grok-4", "grok4", "grok" ], "context_window": 256000, "max_output_tokens": 256000, "supports_extended_thinking": true, "supports_json_mode": true, "supports_function_calling": true, "supports_images": true, "max_image_size_mb": 20.0, "supports_temperature": true, "temperature_constraint": "range", "description": "xAI's Grok 4 via OpenRouter with vision and advanced reasoning", "intelligence_score": 15 } ] }

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/BeehiveInnovations/gemini-mcp-server'

If you have feedback or need assistance with the MCP directory API, please join our Discord server