Skip to main content
Glama

Role-Specific Context MCP Server

by Chris-June
inMemoryProvider.ts6.45 kB
import { OpenAIEmbeddings } from '@langchain/openai'; import { config } from '../../config'; import { ImportanceLevel, MemoryProvider, MemorySearchParams, MemoryStorageParams, MemoryType, VectorMemory } from '../types'; /** * In-memory implementation of the MemoryProvider interface * Useful for development and testing without requiring external services */ export class InMemoryProvider implements MemoryProvider { private memories: Map<string, VectorMemory> = new Map(); private embeddings: OpenAIEmbeddings; constructor() { // Initialize OpenAI embeddings this.embeddings = new OpenAIEmbeddings({ openAIApiKey: config.openai.apiKey, modelName: config.openai.embeddingModel, }); // Set up periodic cleanup of expired memories setInterval(() => this.clearExpiredMemories(), 60 * 60 * 1000); // Run every hour } /** * Store a memory with vector embedding */ public async storeMemory(params: MemoryStorageParams): Promise<VectorMemory> { const id = `mem_${Date.now()}_${Math.random().toString(36).substring(2, 9)}`; const timestamp = Date.now(); const ttl = params.ttl || config.memory.types[params.type].ttl; const importance = params.importance || config.memory.types[params.type].importance as ImportanceLevel; // Generate embedding const embedding = await this.embeddings.embedQuery(params.content); // Create the memory object const memory: VectorMemory = { id, roleId: params.roleId, content: params.content, timestamp, metadata: params.metadata || {}, ttl, type: params.type, importance, userId: params.userId, sessionId: params.sessionId, embedding, vectorId: id }; // Store in memory this.memories.set(id, memory); // Enforce memory limits per role this.enforceMemoryLimits(params.roleId, params.type); return memory; } /** * Get a memory by ID */ public async getMemory(id: string): Promise<VectorMemory | null> { const memory = this.memories.get(id); return memory || null; } /** * Delete a memory by ID */ public async deleteMemory(id: string): Promise<boolean> { return this.memories.delete(id); } /** * Search memories using vector similarity */ public async searchMemories(params: MemorySearchParams): Promise<VectorMemory[]> { // Generate embedding for the query const queryEmbedding = await this.embeddings.embedQuery(params.query); const limit = params.limit || config.memory.vectorSearch.maxResults; const minRelevanceScore = params.minRelevanceScore || config.memory.vectorSearch.minRelevanceScore; // Get all memories for this role const roleMemories = Array.from(this.memories.values()) .filter(memory => { // Filter by role ID if (memory.roleId !== params.roleId) return false; // Filter by type if provided if (params.type && memory.type !== params.type) return false; // Filter by user ID if provided if (params.userId && memory.userId !== params.userId) return false; // Filter by session ID if provided if (params.sessionId && memory.sessionId !== params.sessionId) return false; return true; }); if (roleMemories.length === 0) { return []; } // Calculate cosine similarity for each memory const scoredMemories = roleMemories.map(memory => { if (!memory.embedding) return { memory, score: 0 }; // Calculate cosine similarity const similarity = this.cosineSimilarity(queryEmbedding, memory.embedding); return { memory, score: similarity }; }); // Filter by minimum relevance score and sort by similarity return scoredMemories .filter(item => item.score >= minRelevanceScore) .sort((a, b) => b.score - a.score) .slice(0, limit) .map(item => item.memory); } /** * Get all memories for a role */ public async getMemoriesByRole(roleId: string, type?: MemoryType): Promise<VectorMemory[]> { return Array.from(this.memories.values()) .filter(memory => { if (memory.roleId !== roleId) return false; if (type && memory.type !== type) return false; return true; }) .sort((a, b) => b.timestamp - a.timestamp); // Most recent first } /** * Clear all memories for a role */ public async clearRoleMemories(roleId: string, type?: MemoryType): Promise<number> { let count = 0; for (const [id, memory] of this.memories.entries()) { if (memory.roleId === roleId && (!type || memory.type === type)) { this.memories.delete(id); count++; } } return count; } /** * Clear expired memories */ public async clearExpiredMemories(): Promise<number> { const now = Date.now(); let count = 0; for (const [id, memory] of this.memories.entries()) { if (memory.ttl && memory.timestamp + memory.ttl < now) { this.memories.delete(id); count++; } } return count; } /** * Enforce memory limits per role and type */ private enforceMemoryLimits(roleId: string, type: MemoryType): void { const roleTypeMemories = Array.from(this.memories.values()) .filter(memory => memory.roleId === roleId && memory.type === type) .sort((a, b) => b.timestamp - a.timestamp); // Most recent first if (roleTypeMemories.length <= config.memory.maxMemoriesPerRole) { return; } // Remove oldest memories to stay within limits const memoriesToRemove = roleTypeMemories.slice(config.memory.maxMemoriesPerRole); for (const memory of memoriesToRemove) { if (memory.id) { this.memories.delete(memory.id); } } } /** * Calculate cosine similarity between two vectors */ private cosineSimilarity(vecA: number[], vecB: number[]): number { if (vecA.length !== vecB.length) { throw new Error('Vectors must have the same length'); } let dotProduct = 0; let normA = 0; let normB = 0; for (let i = 0; i < vecA.length; i++) { dotProduct += vecA[i] * vecB[i]; normA += vecA[i] * vecA[i]; normB += vecB[i] * vecB[i]; } if (normA === 0 || normB === 0) { return 0; } return dotProduct / (Math.sqrt(normA) * Math.sqrt(normB)); } }

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/Chris-June/MCP-Server'

If you have feedback or need assistance with the MCP directory API, please join our Discord server