Skip to main content
Glama
index.ts7.33 kB
#!/usr/bin/env node import { Command } from "commander"; import { Server } from "@modelcontextprotocol/sdk/server/index.js"; import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js"; import { CallToolRequestSchema, ListToolsRequestSchema, ListPromptsRequestSchema, GetPromptRequestSchema, CallToolRequest, ListToolsRequest, ListPromptsRequest, GetPromptRequest, Tool, Prompt, GetPromptResult, CallToolResult, } from "@modelcontextprotocol/sdk/types.js"; import { Logger } from "./utils/logger.js"; import { PROTOCOL, ToolArguments } from "./constants.js"; import { getToolDefinitions, getPromptDefinitions, executeTool, toolExists, getPromptMessage, registerTools } from "./tools/index.js"; // Global configuration for model settings export interface ServerConfig { primaryModel: string; fallbackModel?: string; } let serverConfig: ServerConfig; const server = new Server( { name: "fullstack-mcp", version: "1.0.0", }, { capabilities: { tools: {}, prompts: {}, notifications: {}, logging: {}, }, }, ); let isProcessing = false; let currentOperationName = ""; let latestOutput = ""; async function sendNotification(method: string, params: any) { try { await server.notification({ method, params }); } catch (error) { Logger.error("notification failed: ", error); } } /** * Send progress notification to client */ async function sendProgressNotification( progressToken: string | number | undefined, progress: number, total?: number, message?: string ) { if (!progressToken) return; try { const params: any = { progressToken, progress }; if (total !== undefined) params.total = total; if (message) params.message = message; await server.notification({ method: PROTOCOL.NOTIFICATIONS.PROGRESS, params }); } catch (error) { Logger.error("Failed to send progress notification:", error); } } function startProgressUpdates( operationName: string, progressToken?: string | number ) { isProcessing = true; currentOperationName = operationName; latestOutput = ""; const progressMessages = [ `Processing ${operationName} - OpenCode is analyzing your request...`, `${operationName} - Generating code and insights...`, `${operationName} - Creating structured response...`, `${operationName} - Large task in progress (this is normal)...`, `${operationName} - Still working... quality takes time...`, ]; let messageIndex = 0; let progress = 0; if (progressToken) { sendProgressNotification( progressToken, 0, undefined, `Starting ${operationName}` ); } const progressInterval = setInterval(async () => { if (isProcessing && progressToken) { progress += 1; const baseMessage = progressMessages[messageIndex % progressMessages.length]; const outputPreview = latestOutput.slice(-150).trim(); const message = outputPreview ? `${baseMessage}\nOutput: ...${outputPreview}` : baseMessage; await sendProgressNotification( progressToken, progress, undefined, message ); messageIndex++; } else if (!isProcessing) { clearInterval(progressInterval); } }, PROTOCOL.KEEPALIVE_INTERVAL); return { interval: progressInterval, progressToken }; } function stopProgressUpdates( progressData: { interval: NodeJS.Timeout; progressToken?: string | number }, success: boolean = true ) { const operationName = currentOperationName; isProcessing = false; currentOperationName = ""; clearInterval(progressData.interval); if (progressData.progressToken) { sendProgressNotification( progressData.progressToken, 100, 100, success ? `${operationName} completed successfully` : `${operationName} failed` ); } } // tools/list server.setRequestHandler(ListToolsRequestSchema, async (request: ListToolsRequest): Promise<{ tools: Tool[] }> => { return { tools: getToolDefinitions() as unknown as Tool[] }; }); // tools/call server.setRequestHandler(CallToolRequestSchema, async (request: CallToolRequest): Promise<CallToolResult> => { const toolName: string = request.params.name; if (toolExists(toolName)) { const progressToken = (request.params as any)._meta?.progressToken; const progressData = startProgressUpdates(toolName, progressToken); try { const args: ToolArguments = (request.params.arguments as ToolArguments) || {}; Logger.toolInvocation(toolName, request.params.arguments); const result = await executeTool(toolName, args, (newOutput) => { latestOutput = newOutput; }); stopProgressUpdates(progressData, true); return { content: [ { type: "text", text: result, }, ], isError: false, }; } catch (error) { stopProgressUpdates(progressData, false); Logger.error(`Error in tool '${toolName}':`, error); const errorMessage = error instanceof Error ? error.message : String(error); return { content: [ { type: "text", text: `Error executing ${toolName}: ${errorMessage}`, }, ], isError: true, }; } } else { throw new Error(`Unknown tool: ${request.params.name}`); } }); // prompts/list server.setRequestHandler(ListPromptsRequestSchema, async (request: ListPromptsRequest): Promise<{ prompts: Prompt[] }> => { return { prompts: getPromptDefinitions() as unknown as Prompt[] }; }); // prompts/get server.setRequestHandler(GetPromptRequestSchema, async (request: GetPromptRequest): Promise<GetPromptResult> => { const promptName = request.params.name; const args = request.params.arguments || {}; const promptMessage = getPromptMessage(promptName, args); if (!promptMessage) { throw new Error(`Unknown prompt: ${promptName}`); } return { messages: [{ role: "user" as const, content: { type: "text" as const, text: promptMessage } }] }; }); // Setup CLI arguments and start the server async function main() { const program = new Command(); program .name("fullstack-mcp") .description("MCP server for full-stack development with OpenCode CLI integration") .version("1.0.0") .requiredOption("-m, --model <model>", "Primary model to use (e.g., cerebras/zai-glm-4.6)") .option("-f, --fallback-model <model>", "Fallback model for quota/error situations") .parse(); const options = program.opts(); serverConfig = { primaryModel: options.model, fallbackModel: options.fallbackModel }; Logger.debug("init fullstack-mcp with model:", serverConfig.primaryModel); if (serverConfig.fallbackModel) { Logger.debug("fallback model:", serverConfig.fallbackModel); } // Initialize tool registry registerTools(); const transport = new StdioServerTransport(); await server.connect(transport); Logger.debug("fullstack-mcp listening on stdio"); } // Export server config for use in tools export function getServerConfig(): ServerConfig { return serverConfig; } main().catch((error) => { Logger.error("Fatal error:", error); process.exit(1); });

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/RhizomaticRobin/cerebras-code-fullstack-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server