Skip to main content
Glama

DollhouseMCP

by DollhouseMCP
MemoryManager.triggers.performance.test.tsโ€ข6.49 kB
/** * Performance tests for Memory trigger extraction * Tests performance with large numbers of triggers */ import { describe, it, expect, beforeAll, afterAll } from '@jest/globals'; import { MemoryManager } from '../../src/elements/memories/MemoryManager.js'; import { Memory } from '../../src/elements/memories/Memory.js'; import { PortfolioManager } from '../../src/portfolio/PortfolioManager.js'; import * as path from 'path'; import * as fs from 'fs/promises'; import * as os from 'os'; describe('Memory Trigger Performance', () => { let memoryManager: MemoryManager; let testDir: string; let memoriesDir: string; let originalPortfolioDir: string | undefined; beforeAll(async () => { // Create test directory testDir = await fs.mkdtemp(path.join(os.tmpdir(), 'memory-perf-test-')); memoriesDir = path.join(testDir, 'memories'); await fs.mkdir(memoriesDir, { recursive: true }); // Save original portfolio dir originalPortfolioDir = process.env.DOLLHOUSE_PORTFOLIO_DIR; process.env.DOLLHOUSE_PORTFOLIO_DIR = testDir; // Reset PortfolioManager singleton (PortfolioManager as any).instance = null; memoryManager = new MemoryManager(); }); afterAll(async () => { // Restore original portfolio dir if (originalPortfolioDir) { process.env.DOLLHOUSE_PORTFOLIO_DIR = originalPortfolioDir; } else { delete process.env.DOLLHOUSE_PORTFOLIO_DIR; } // Reset PortfolioManager singleton (PortfolioManager as any).instance = null; // Clean up test directory await fs.rm(testDir, { recursive: true, force: true }); }); describe('Performance with Large Trigger Sets', () => { it('should handle 200 triggers efficiently', async () => { // Generate a large number of triggers (limited to avoid size validation issues) const largeTriggerSet = Array.from({ length: 200 }, (_, i) => `t${i}`); const memoryWithLargeTriggerSet = `metadata: name: "performance-test-memory" description: "Memory with 200 triggers for performance testing" triggers: ${JSON.stringify(largeTriggerSet)} version: "1.0.0" entries: - id: "entry-1" timestamp: "2025-09-26T16:00:00Z" content: "Performance test entry"`; await fs.writeFile(path.join(memoriesDir, 'perf-test.yaml'), memoryWithLargeTriggerSet); const startTime = Date.now(); const memory = await memoryManager.load('perf-test.yaml'); const loadTime = Date.now() - startTime; // Should load successfully expect(memory).toBeDefined(); expect(memory.metadata.triggers).toBeDefined(); expect(memory.metadata.triggers?.length).toBe(20); // Limited to 20 max // Should complete in reasonable time (< 500ms) expect(loadTime).toBeLessThan(500); console.log(`Loaded 200 triggers in ${loadTime}ms`); }); it('should efficiently filter invalid triggers from large sets', async () => { // Mix valid and invalid triggers const mixedTriggers = Array.from({ length: 100 }, (_, i) => { if (i % 2 === 0) { return `valid-trigger-${i}`; } else { return `invalid!trigger@${i}`; } }); const memoryWithMixedTriggers = `metadata: name: "filter-performance-test" description: "Memory with mixed valid/invalid triggers" triggers: ${JSON.stringify(mixedTriggers)} version: "1.0.0" entries: []`; await fs.writeFile(path.join(memoriesDir, 'filter-perf.yaml'), memoryWithMixedTriggers); const startTime = Date.now(); const memory = await memoryManager.load('filter-perf.yaml'); const filterTime = Date.now() - startTime; // Should filter to only valid triggers (but limited to first 20 of array) // Since we interleave valid/invalid, we get about half valid from first 20 expect(memory.metadata.triggers).toBeDefined(); expect(memory.metadata.triggers?.length).toBe(10); // About half of first 20 are valid // All triggers should be valid memory.metadata.triggers?.forEach(trigger => { expect(trigger).toMatch(/^[a-zA-Z0-9\-_]+$/); }); // Should complete quickly even with filtering (< 300ms) expect(filterTime).toBeLessThan(300); console.log(`Filtered 100 triggers to 50 valid in ${filterTime}ms`); }); it('should handle memory save/load cycle with many triggers efficiently', async () => { // Create a memory with many triggers programmatically const memory = new Memory({ name: 'save-load-perf-test', description: 'Performance test for save/load cycle', triggers: Array.from({ length: 100 }, (_, i) => `t${i}`) }); // Add some entries await memory.addEntry('Test entry 1', ['test']); await memory.addEntry('Test entry 2', ['test']); const saveStartTime = Date.now(); await memoryManager.save(memory, 'save-load-perf.yaml'); const saveTime = Date.now() - saveStartTime; const loadStartTime = Date.now(); const loadedMemory = await memoryManager.load('save-load-perf.yaml'); const loadTime = Date.now() - loadStartTime; // Verify triggers were preserved expect(loadedMemory.metadata.triggers?.length).toBe(100); // Both operations should be fast expect(saveTime).toBeLessThan(200); expect(loadTime).toBeLessThan(200); console.log(`Save: ${saveTime}ms, Load: ${loadTime}ms for 100 triggers`); }); }); describe('Memory Usage', () => { it('should not have excessive memory usage with large trigger sets', () => { // Create multiple memories with triggers const memories: Memory[] = []; for (let i = 0; i < 100; i++) { const memory = new Memory({ name: `memory-${i}`, description: `Test memory ${i}`, triggers: Array.from({ length: 50 }, (_, j) => `trigger-${i}-${j}`) }); memories.push(memory); } // Should have created 100 memories with 50 triggers each expect(memories).toHaveLength(100); // Each memory should have its triggers memories.forEach(memory => { expect(memory.metadata.triggers?.length).toBe(50); }); // Memory footprint should be reasonable // 100 memories * 50 triggers * ~20 bytes per trigger = ~100KB // This is just a sanity check - actual measurement would need heap profiling expect(memories.length * 50).toBeLessThan(10000); // Total triggers < 10k }); }); });

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/DollhouseMCP/DollhouseMCP'

If you have feedback or need assistance with the MCP directory API, please join our Discord server