Skip to main content
Glama
vectorize.ts3.12 kB
import { v4 as uuidv4 } from "uuid" const MINIMUM_SIMILARITY_SCORE = 0.3 async function generateEmbeddings(text: string, env: Env) { const embeddings = await env.AI.run("@cf/baai/bge-m3", { text }) as AiTextEmbeddingsOutput const values = embeddings.data[0] if (!values) throw new Error("Failed to generate vector embedding") return values } export async function storeMemory(text: string, userId: string, env: Env) { const memoryId = uuidv4() try { const values = await generateEmbeddings(text, env) await env.VECTORIZE.upsert([ { id: memoryId, values, namespace: userId, metadata: { content: text }, }, ]) // Mark as synced in D1 try { await env.DB.prepare( "INSERT OR IGNORE INTO vector_sync (memory_id, synced_at) VALUES (?, ?)" ).bind(memoryId, new Date().toISOString()).run() } catch (dbError) { console.error(`Failed to mark vector as synced: ${dbError}`) } } catch (error) { console.error(`Failed to store vector for memory ${memoryId}:`, error) // Don't throw - let the memory still be stored in D1 } return memoryId } export async function searchMemories( query: string, userId: string, env: Env ) { try { const queryVector = await generateEmbeddings(query, env) const results = await env.VECTORIZE.query(queryVector, { namespace: userId, topK: 10, returnMetadata: "all", }) if (!results.matches || results.matches.length === 0) return [] const filtered = results.matches .filter(match => match.score > MINIMUM_SIMILARITY_SCORE) .sort((a, b) => (b.score || 0) - (a.score || 0)) if (filtered.length === 0) return [] // Fetch timestamps from D1 const ids = filtered.map(m => m.id) const placeholders = ids.map(() => '?').join(',') const dbResults = await env.DB.prepare( `SELECT id, created_at FROM memories WHERE id IN (${placeholders}) AND deleted_at IS NULL` ).bind(...ids).all() const timestampMap = new Map<string, string>() if (dbResults.results) { for (const row of dbResults.results) { const r = row as { id: string; created_at: string } timestampMap.set(r.id, r.created_at) } } return filtered.map(match => ({ content: match.metadata?.content || `Missing memory content (ID: ${match.id})`, score: match.score || 0, id: match.id, created_at: timestampMap.get(match.id) || null, })) } catch (error) { console.error(`Vector search failed for namespace ${userId}:`, error) return [] } } export async function updateMemoryVector( memoryId: string, newContent: string, userId: string, env: Env ) { const newValues = await generateEmbeddings(newContent, env) await env.VECTORIZE.upsert([ { id: memoryId, values: newValues, namespace: userId, metadata: { content: newContent }, }, ]) } export async function deleteVectorById(memoryId: string, userId: string, env: Env) { const result = await env.VECTORIZE.deleteByIds([memoryId]) }

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/redaphid/mcp-memory'

If you have feedback or need assistance with the MCP directory API, please join our Discord server