Skip to main content
Glama
index.ts3.27 kB
#!/usr/bin/env node /** * ThinkingCap - Multi-Agent Research MCP Server * * A generalized multi-agent research tool that supports multiple LLM providers. * Agents are specified via command-line arguments. * * Usage: * npx thinkingcap openrouter:openai/gpt-4o groq cerebras xai * * Environment variables: * - OPENROUTER_API_KEY, OPENAI_API_KEY, GROQ_API_KEY, etc. * - These should be set in your shell profile (.bashrc, .zshrc) */ import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js"; import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js"; import { z } from "zod"; import { orchestrate } from "./orchestrator.js"; import { PROVIDERS } from "./providers.js"; // Parse command-line arguments to get agent specs const args = process.argv.slice(2); // Default agents if none specified const DEFAULT_AGENTS = [ "openrouter:moonshotai/kimi-k2-thinking", "groq:moonshotai/kimi-k2-instruct-0905", "cerebras:zai-glm-4.6", "xai:grok-4-fast", ]; const agentSpecs = args.length > 0 ? args : DEFAULT_AGENTS; // Create MCP server const server = new McpServer({ name: "thinkingcap", version: "0.2.0", }); // Register the main research tool server.tool( "research", { query: z.string().describe("The research query or question to investigate"), maxSearchResults: z .number() .min(1) .max(20) .default(5) .describe("Maximum web search results per agent"), }, async ({ query, maxSearchResults }) => { try { const result = await orchestrate(query, { agents: agentSpecs, temperature: 0.7, maxSearchResults: maxSearchResults ?? 5, maxIterations: 10, }); return { content: [ { type: "text" as const, text: result, }, ], }; } catch (error) { const errorMessage = error instanceof Error ? error.message : String(error); return { content: [ { type: "text" as const, text: `Research failed: ${errorMessage}`, }, ], isError: true, }; } } ); // Register a tool to list available providers server.tool( "list_providers", {}, async () => { const providerList = Object.entries(PROVIDERS) .map( ([key, config]) => `- ${key}: ${config.name} (env: ${config.envKey}, default model: ${config.defaultModel})` ) .join("\n"); const currentAgents = agentSpecs.join(", "); return { content: [ { type: "text" as const, text: `Available Providers:\n${providerList}\n\nCurrently configured agents: ${currentAgents}\n\nTo change agents, restart the server with different arguments:\nnpx not-enough-kimis provider1:model1 provider2:model2 ...`, }, ], }; } ); // Start the server async function main() { const transport = new StdioServerTransport(); await server.connect(transport); // Log to stderr so it doesn't interfere with MCP protocol console.error(`ThinkingCap MCP Server started`); console.error(`Configured agents: ${agentSpecs.join(", ")}`); } main().catch((error) => { console.error("Server error:", error); process.exit(1); });

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/Infatoshi/thinkingcap'

If you have feedback or need assistance with the MCP directory API, please join our Discord server