Skip to main content
Glama
prompt-cache.ts2.3 kB
/** * Prompt/Memory Cache - Placeholder for your planned feature * * This is where prompt caching and memory functionality will live */ import { BaseCacheManager, CacheConfig } from './cache-manager.js'; export interface PromptCacheEntry { prompt: string; response: string; modelUsed: string; timestamp: string; contextHash: string; } export interface MemoryEntry { content: string; context: any; importance: number; associations: string[]; timestamp: string; } export class PromptCache extends BaseCacheManager<PromptCacheEntry> { private static instance: PromptCache; constructor(config?: Partial<CacheConfig>) { super({ maxEntries: 200, // Larger cache for prompts ttlMs: 60 * 60 * 1000, // 1 hour TTL enableStats: true, ...config }); } static getInstance(config?: Partial<CacheConfig>): PromptCache { if (!this.instance) { this.instance = new PromptCache(config); } return this.instance; } // TODO: Implement your prompt caching logic here async cachePromptResponse( promptHash: string, prompt: string, response: string, modelUsed: string ): Promise<void> { const entry: PromptCacheEntry = { prompt, response, modelUsed, timestamp: new Date().toISOString(), contextHash: promptHash }; await this.set(promptHash, entry); } } export class MemoryCache extends BaseCacheManager<MemoryEntry> { private static instance: MemoryCache; constructor(config?: Partial<CacheConfig>) { super({ maxEntries: 1000, // Large cache for memory ttlMs: undefined, // No expiration for memory enableStats: true, ...config }); } static getInstance(config?: Partial<CacheConfig>): MemoryCache { if (!this.instance) { this.instance = new MemoryCache(config); } return this.instance; } // TODO: Implement your memory functionality here async storeMemory( key: string, content: string, context: any, importance: number = 0.5 ): Promise<void> { const entry: MemoryEntry = { content, context, importance, associations: [], // TODO: Implement association logic timestamp: new Date().toISOString() }; await this.set(key, entry); } }

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/houtini-ai/lm'

If you have feedback or need assistance with the MCP directory API, please join our Discord server