Skip to main content
Glama

Gemini MCP Tool

constants.ts3.02 kB
// Logging export const LOG_PREFIX = "[GMCPT]"; // Error messages export const ERROR_MESSAGES = { QUOTA_EXCEEDED: "Quota exceeded for quota metric 'Gemini 2.5 Pro Requests'", QUOTA_EXCEEDED_SHORT: "⚠️ Gemini 2.5 Pro daily quota exceeded. Please retry with model: 'gemini-2.5-flash'", TOOL_NOT_FOUND: "not found in registry", NO_PROMPT_PROVIDED: "Please provide a prompt for analysis. Use @ syntax to include files (e.g., '@largefile.js explain what this does') or ask general questions", } as const; // Status messages export const STATUS_MESSAGES = { QUOTA_SWITCHING: "🚫 Gemini 2.5 Pro quota exceeded, switching to Flash model...", FLASH_RETRY: "⚡ Retrying with Gemini 2.5 Flash...", FLASH_SUCCESS: "✅ Flash model completed successfully", SANDBOX_EXECUTING: "🔒 Executing Gemini CLI command in sandbox mode...", GEMINI_RESPONSE: "Gemini response:", // Timeout prevention messages PROCESSING_START: "🔍 Starting analysis (may take 5-15 minutes for large codebases)", PROCESSING_CONTINUE: "⏳ Still processing... Gemini is working on your request", PROCESSING_COMPLETE: "✅ Analysis completed successfully", } as const; // Models export const MODELS = { PRO: "gemini-2.5-pro", FLASH: "gemini-2.5-flash", } as const; // MCP Protocol Constants export const PROTOCOL = { // Message roles ROLES: { USER: "user", ASSISTANT: "assistant", }, // Content types CONTENT_TYPES: { TEXT: "text", }, // Status codes STATUS: { SUCCESS: "success", ERROR: "error", FAILED: "failed", REPORT: "report", }, // Notification methods NOTIFICATIONS: { PROGRESS: "notifications/progress", }, // Timeout prevention KEEPALIVE_INTERVAL: 25000, // 25 seconds } as const; // CLI Constants export const CLI = { // Command names COMMANDS: { GEMINI: "gemini", ECHO: "echo", }, // Command flags FLAGS: { MODEL: "-m", SANDBOX: "-s", PROMPT: "-p", HELP: "-help", }, // Default values DEFAULTS: { MODEL: "default", // Fallback model used when no specific model is provided BOOLEAN_TRUE: "true", BOOLEAN_FALSE: "false", }, } as const; // (merged PromptArguments and ToolArguments) export interface ToolArguments { prompt?: string; model?: string; sandbox?: boolean | string; changeMode?: boolean | string; chunkIndex?: number | string; // Which chunk to return (1-based) chunkCacheKey?: string; // Optional cache key for continuation message?: string; // For Ping tool -- Un-used. // --> new tool methodology?: string; // Brainstorming framework to use domain?: string; // Domain context for specialized brainstorming constraints?: string; // Known limitations or requirements existingContext?: string; // Background information to build upon ideaCount?: number; // Target number of ideas to generate includeAnalysis?: boolean; // Include feasibility and impact analysis [key: string]: string | boolean | number | undefined; // Allow additional properties }

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/jamubc/gemini-mcp-tool'

If you have feedback or need assistance with the MCP directory API, please join our Discord server