Skip to main content
Glama

Role-Specific Context MCP Server

by Chris-June
supabaseProvider.ts6.67 kB
import { createClient, SupabaseClient } from '@supabase/supabase-js'; import { OpenAIEmbeddings } from '@langchain/openai'; import { config } from '../../config'; import { BaseMemory, ImportanceLevel, MemoryProvider, MemorySearchParams, MemoryStorageParams, MemoryType, VectorMemory } from '../types'; export class SupabaseMemoryProvider implements MemoryProvider { private supabase: SupabaseClient; private embeddings: OpenAIEmbeddings; private collectionName: string; constructor() { // Initialize Supabase client this.supabase = createClient( config.supabase.url, config.supabase.key ); // Initialize OpenAI embeddings this.embeddings = new OpenAIEmbeddings({ openAIApiKey: config.openai.apiKey, modelName: config.openai.embeddingModel, }); this.collectionName = config.supabase.vectorCollection; } /** * Store a memory with vector embedding */ public async storeMemory(params: MemoryStorageParams): Promise<VectorMemory> { const id = `mem_${Date.now()}_${Math.random().toString(36).substring(2, 9)}`; const timestamp = Date.now(); const ttl = params.ttl || config.memory.types[params.type].ttl; const importance = params.importance || config.memory.types[params.type].importance as ImportanceLevel; // Create the memory object const memory: VectorMemory = { id, roleId: params.roleId, content: params.content, timestamp, metadata: params.metadata || {}, ttl, type: params.type, importance, userId: params.userId, sessionId: params.sessionId }; // Generate embedding const embedding = await this.embeddings.embedQuery(params.content); memory.embedding = embedding; // Store in Supabase const { data, error } = await this.supabase .from(this.collectionName) .insert({ id, role_id: params.roleId, content: params.content, timestamp, metadata: params.metadata || {}, ttl, type: params.type, importance, user_id: params.userId, session_id: params.sessionId, embedding }) .select(); if (error) { console.error('Error storing memory in Supabase:', error); throw new Error(`Failed to store memory: ${error.message}`); } // Set the vector ID memory.vectorId = data[0].id; return memory; } /** * Get a memory by ID */ public async getMemory(id: string): Promise<VectorMemory | null> { const { data, error } = await this.supabase .from(this.collectionName) .select('*') .eq('id', id) .single(); if (error || !data) { return null; } return this.mapDbRecordToMemory(data); } /** * Delete a memory by ID */ public async deleteMemory(id: string): Promise<boolean> { const { error } = await this.supabase .from(this.collectionName) .delete() .eq('id', id); return !error; } /** * Search memories using vector similarity */ public async searchMemories(params: MemorySearchParams): Promise<VectorMemory[]> { // Generate embedding for the query const embedding = await this.embeddings.embedQuery(params.query); const limit = params.limit || config.memory.vectorSearch.maxResults; const minRelevanceScore = params.minRelevanceScore || config.memory.vectorSearch.minRelevanceScore; // Build the query let query = this.supabase .rpc('match_memories', { query_embedding: embedding, match_threshold: minRelevanceScore, match_count: limit }) .eq('role_id', params.roleId); // Add filters if provided if (params.type) { query = query.eq('type', params.type); } if (params.userId) { query = query.eq('user_id', params.userId); } if (params.sessionId) { query = query.eq('session_id', params.sessionId); } // Execute the query const { data, error } = await query; if (error) { console.error('Error searching memories in Supabase:', error); return []; } // Map the results to memory objects return (data || []).map(this.mapDbRecordToMemory); } /** * Get all memories for a role */ public async getMemoriesByRole(roleId: string, type?: MemoryType): Promise<VectorMemory[]> { let query = this.supabase .from(this.collectionName) .select('*') .eq('role_id', roleId); if (type) { query = query.eq('type', type); } const { data, error } = await query; if (error) { console.error('Error getting memories by role from Supabase:', error); return []; } return (data || []).map(this.mapDbRecordToMemory); } /** * Clear all memories for a specific role */ public async clearRoleMemories(roleId: string, type?: MemoryType): Promise<number> { let query = this.supabase .from(this.collectionName) .delete() .eq('role_id', roleId); if (type) { query = query.eq('type', type); } const { data, error } = await query; if (error) { console.error('Error clearing role memories from Supabase:', error); return 0; } // Safely check if data exists and has length return Array.isArray(data) ? (data as any[]).length : 0; } /** * Clear expired memories */ public async clearExpiredMemories(): Promise<number> { const now = Date.now(); // Get the TTL value first const ttlResponse = await this.supabase .from(this.collectionName) .select('ttl') .limit(1) .single(); // Default TTL if not found const ttl = ttlResponse.data?.ttl || 7 * 24 * 60 * 60 * 1000; // 7 days const { data, error } = await this.supabase .from(this.collectionName) .delete() .lt('timestamp', now - ttl); if (error) { console.error('Error clearing expired memories from Supabase:', error); return 0; } // Safely check if data exists and has length return Array.isArray(data) ? (data as any[]).length : 0; } /** * Map a database record to a memory object */ private mapDbRecordToMemory(record: any): VectorMemory { return { id: record.id, roleId: record.role_id, content: record.content, timestamp: record.timestamp, metadata: record.metadata || {}, ttl: record.ttl, type: record.type as MemoryType, importance: record.importance as ImportanceLevel, userId: record.user_id, sessionId: record.session_id, embedding: record.embedding, vectorId: record.id }; } }

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/Chris-June/MCP-Server'

If you have feedback or need assistance with the MCP directory API, please join our Discord server