Skip to main content
Glama
orchestrator.ts6.97 kB
/** * Multi-Agent Orchestrator * Runs multiple LLM agents in parallel and synthesizes their responses */ import { callLLM, Message } from "./llm.js"; import { parseAgentSpec } from "./providers.js"; import { searchWeb, formatSearchResults } from "./search.js"; export interface AgentResult { agentId: number; provider: string; model: string; question: string; response: string; status: "success" | "error"; error?: string; } export interface OrchestratorConfig { agents: string[]; // e.g., ["openrouter:openai/gpt-4o", "groq", "cerebras"] temperature: number; maxSearchResults: number; maxIterations: number; } const SYSTEM_PROMPT = `You are a helpful research assistant. When given a question: 1. If the question requires current information, use the search results provided to answer comprehensively. 2. Synthesize information from multiple sources when available. 3. Be thorough but concise in your response. 4. Cite sources when making factual claims. 5. If you cannot find relevant information, say so clearly. Provide a complete, well-structured answer.`; const QUESTION_GENERATION_PROMPT = `You are an orchestrator that needs to create different research questions to thoroughly analyze a topic from multiple angles. Original user query: {query} Generate exactly {numAgents} different, specific questions that will help gather comprehensive information about this topic. Each question should approach the topic from a different angle (research, analysis, verification, alternatives, etc.). Return your response as a JSON array of strings ONLY, like this: ["question 1", "question 2", "question 3"] Only return the JSON array, nothing else.`; const SYNTHESIS_PROMPT = `You have received {numResponses} different responses from AI agents that analyzed the same query from different perspectives. Your job is to synthesize their responses into ONE comprehensive final answer. Here are all the agent responses: {agentResponses} IMPORTANT: Synthesize these into ONE final comprehensive answer that combines the best information from all agents. Do NOT mention that you are synthesizing multiple responses. Simply provide the final synthesized answer directly.`; /** * Generate research questions using an LLM */ async function generateQuestions( query: string, numAgents: number, agentSpec: string, temperature: number ): Promise<string[]> { const { provider, model } = parseAgentSpec(agentSpec); const prompt = QUESTION_GENERATION_PROMPT.replace("{query}", query).replace( "{numAgents}", numAgents.toString() ); try { const response = await callLLM( provider, model, [{ role: "user", content: prompt }], temperature ); // Parse JSON response const jsonMatch = response.content.match(/\[[\s\S]*\]/); if (jsonMatch) { const questions = JSON.parse(jsonMatch[0]) as string[]; if (questions.length === numAgents) { return questions; } } } catch (error) { console.error("Question generation failed:", error); } // Fallback questions return [ `Research comprehensive information about: ${query}`, `Analyze and provide insights about: ${query}`, `Find alternative perspectives on: ${query}`, `Verify and cross-check facts about: ${query}`, ].slice(0, numAgents); } /** * Run a single agent with search capability */ async function runAgent( agentId: number, agentSpec: string, question: string, temperature: number, maxSearchResults: number ): Promise<AgentResult> { const { provider, model } = parseAgentSpec(agentSpec); try { // Perform web search const searchResults = await searchWeb(question, maxSearchResults); const formattedResults = formatSearchResults(searchResults); // Build messages const messages: Message[] = [ { role: "system", content: SYSTEM_PROMPT }, { role: "user", content: `Question: ${question}\n\nSearch Results:\n${formattedResults}\n\nPlease provide a comprehensive answer based on the search results above.`, }, ]; // Call LLM const response = await callLLM(provider, model, messages, temperature); return { agentId, provider, model, question, response: response.content, status: "success", }; } catch (error) { return { agentId, provider, model, question, response: "", status: "error", error: error instanceof Error ? error.message : String(error), }; } } /** * Synthesize multiple agent responses into one */ async function synthesizeResponses( results: AgentResult[], agentSpec: string, temperature: number ): Promise<string> { const successfulResults = results.filter((r) => r.status === "success"); if (successfulResults.length === 0) { return "All agents failed to provide results. Please try again."; } if (successfulResults.length === 1) { return successfulResults[0].response; } const { provider, model } = parseAgentSpec(agentSpec); // Build agent responses text const agentResponsesText = successfulResults .map( (r, i) => `=== AGENT ${i + 1} (${r.provider}/${r.model}) ===\nQuestion: ${r.question}\n\n${r.response}` ) .join("\n\n"); const prompt = SYNTHESIS_PROMPT.replace( "{numResponses}", successfulResults.length.toString() ).replace("{agentResponses}", agentResponsesText); try { const response = await callLLM( provider, model, [{ role: "user", content: prompt }], temperature ); return response.content; } catch (error) { // Fallback: concatenate responses return successfulResults .map((r, i) => `=== Agent ${i + 1} Response ===\n${r.response}`) .join("\n\n"); } } /** * Main orchestration function */ export async function orchestrate( query: string, config: OrchestratorConfig ): Promise<string> { const { agents, temperature, maxSearchResults } = config; if (agents.length === 0) { throw new Error("At least one agent must be specified"); } // Single agent mode - just run directly if (agents.length === 1) { const result = await runAgent( 0, agents[0], query, temperature, maxSearchResults ); if (result.status === "error") { throw new Error(result.error); } return result.response; } // Multi-agent mode // Generate questions using the first agent const questions = await generateQuestions( query, agents.length, agents[0], temperature ); // Run all agents in parallel const agentPromises = agents.map((agentSpec, i) => runAgent(i, agentSpec, questions[i], temperature, maxSearchResults) ); const results = await Promise.all(agentPromises); // Synthesize responses using the first agent const finalResponse = await synthesizeResponses(results, agents[0], temperature); return finalResponse; }

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/Infatoshi/thinkingcap'

If you have feedback or need assistance with the MCP directory API, please join our Discord server