Skip to main content
Glama
index.ts6.13 kB
#!/usr/bin/env node import { Server } from "@modelcontextprotocol/sdk/server/index.js"; import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js"; import { CallToolRequestSchema, ListToolsRequestSchema, Tool, } from "@modelcontextprotocol/sdk/types.js"; import { z } from "zod"; import { MemoryStore } from "./memory-store.js"; import { VectorStore } from "./vector-store.js"; import path from "path"; import os from "os"; // Schema definitions for our tools const SetContextSchema = z.object({ key: z.string().describe("Unique identifier for the memory"), content: z.string().describe("The content to store"), metadata: z .record(z.any()) .optional() .describe("Optional metadata to associate with the memory"), }); const GetContextSchema = z.object({ query: z.string().describe("The search query to find relevant context"), limit: z .number() .min(1) .max(20) .default(5) .describe("Maximum number of results to return"), threshold: z .number() .min(0) .max(1) .default(0.7) .describe("Minimum similarity threshold (0-1)"), }); // Initialize server const server = new Server( { name: "rag-context-mcp", version: "1.0.0", }, { capabilities: { tools: {}, }, } ); // Get data directory from environment or use default const dataDir = process.env.RAG_CONTEXT_DATA_DIR || path.join(os.homedir(), ".rag-context-mcp"); // Initialize stores let memoryStore: MemoryStore; let vectorStore: VectorStore; async function initializeStores() { memoryStore = new MemoryStore(dataDir); await memoryStore.initialize(); vectorStore = new VectorStore(dataDir); await vectorStore.initialize(); } // Tool definitions const tools: Tool[] = [ { name: "setContext", description: "Store a piece of information in the memory with automatic vectorization and indexing", inputSchema: { type: "object", properties: { key: { type: "string", description: "Unique identifier for the memory", }, content: { type: "string", description: "The content to store", }, metadata: { type: "object", description: "Optional metadata to associate with the memory", additionalProperties: true, }, }, required: ["key", "content"], }, }, { name: "getContext", description: "Retrieve relevant context using semantic search and indexed retrieval", inputSchema: { type: "object", properties: { query: { type: "string", description: "The search query to find relevant context", }, limit: { type: "number", description: "Maximum number of results to return", minimum: 1, maximum: 20, default: 5, }, threshold: { type: "number", description: "Minimum similarity threshold (0-1)", minimum: 0, maximum: 1, default: 0.7, }, }, required: ["query"], }, }, ]; // Handle tool listing server.setRequestHandler(ListToolsRequestSchema, async () => { return { tools, }; }); // Handle tool execution server.setRequestHandler(CallToolRequestSchema, async (request) => { const { name, arguments: args } = request.params; try { if (name === "setContext") { const validatedArgs = SetContextSchema.parse(args); // Store in memory database const memory = await memoryStore.set( validatedArgs.key, validatedArgs.content, validatedArgs.metadata ); // Add to vector store for semantic search await vectorStore.add({ id: memory.id, content: validatedArgs.content, metadata: { ...validatedArgs.metadata, key: validatedArgs.key, timestamp: memory.timestamp, }, }); return { content: [ { type: "text", text: `Successfully stored context with key: ${validatedArgs.key}`, }, ], }; } if (name === "getContext") { const validatedArgs = GetContextSchema.parse(args); // Perform semantic search const searchResults = await vectorStore.search( validatedArgs.query, validatedArgs.limit, validatedArgs.threshold ); // Enrich results with full memory data const enrichedResults = await Promise.all( searchResults.map(async (result) => { const memory = await memoryStore.getById(result.id); return { ...result, memory, }; }) ); // Format results const formattedResults = enrichedResults .filter((r) => r.memory !== null) .map((result) => ({ key: result.memory!.key, content: result.memory!.content, similarity: result.score, metadata: result.memory!.metadata, timestamp: result.memory!.timestamp, })); return { content: [ { type: "text", text: JSON.stringify(formattedResults, null, 2), }, ], }; } throw new Error(`Unknown tool: ${name}`); } catch (error) { const errorMessage = error instanceof Error ? error.message : "Unknown error occurred"; return { content: [ { type: "text", text: `Error: ${errorMessage}`, }, ], isError: true, }; } }); // Main function async function main() { try { // Initialize stores await initializeStores(); // Create transport const transport = new StdioServerTransport(); // Connect server to transport await server.connect(transport); console.error("RAG Context MCP Server started successfully"); } catch (error) { console.error("Failed to start server:", error); process.exit(1); } } // Start the server main().catch((error) => { console.error("Fatal error:", error); process.exit(1); });

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/notbnull/mcp-rag-context'

If you have feedback or need assistance with the MCP directory API, please join our Discord server