Skip to main content
Glama

Prompt Auto-Optimizer MCP

by sloth-wq
gc-optimization.test.ts18.3 kB
/** * Integration Tests for GC Optimization System * * Tests the complete integration of GC optimization with GEPA components, * including performance validation and memory efficiency verification. */ import { describe, test, expect, beforeAll, afterAll, beforeEach, afterEach } from 'vitest'; import { GarbageCollectionOptimizer } from '../../core/gc-optimizer'; import { GEPAGCIntegration, GEPAGCManager, GEPAGCUtils } from '../../core/gc-integration'; import { GCBenchmarkSuite, runQuickGCBenchmark } from '../../core/gc-benchmarks'; import { PerformanceTracker } from '../../services/performance-tracker'; import { MemoryLeakDetector, MemoryLeakIntegration } from '../../core/memory-leak-detector'; describe('GC Optimization Integration', () => { let performanceTracker: PerformanceTracker; let gcIntegration: GEPAGCIntegration; let memoryLeakDetector: MemoryLeakDetector; beforeAll(async () => { performanceTracker = new PerformanceTracker(); memoryLeakDetector = MemoryLeakIntegration.initialize(); // Initialize global GC integration gcIntegration = GEPAGCManager.initialize(performanceTracker, { autoDetectWorkload: true, adaptiveStrategy: true, memoryThresholds: { lowMemory: 128, highMemory: 256, criticalMemory: 512, }, }); await gcIntegration.initialize(); }); afterAll(() => { gcIntegration.shutdown(); GEPAGCManager.shutdown(); MemoryLeakIntegration.shutdown(); }); describe('GEPA Component Integration', () => { test('should integrate with evolution engine', async () => { // Simulate evolution engine operations const candidate = GEPAGCUtils.getObject<any>('evolution-engine', 'candidates'); expect(candidate).toBeDefined(); expect(candidate).toMatchObject({ id: '', fitness: 0, objectives: expect.any(Array), parameters: expect.any(Object), metadata: expect.any(Object), generation: 0, }); // Modify candidate candidate.id = 'test-candidate-001'; candidate.fitness = 0.85; candidate.objectives = [0.9, 0.7, 0.8]; candidate.generation = 5; // Return to pool GEPAGCUtils.returnObject('evolution-engine', 'candidates', candidate); // Get again - should be reset const reusedCandidate = GEPAGCUtils.getObject<any>('evolution-engine', 'candidates'); expect(reusedCandidate.id).toBe(''); expect(reusedCandidate.fitness).toBe(0); expect(reusedCandidate.objectives).toEqual([]); }); test('should integrate with pareto frontier', async () => { const solution = GEPAGCUtils.getObject<any>('pareto-frontier', 'solutions'); expect(solution).toBeDefined(); expect(solution).toMatchObject({ id: '', objectives: expect.any(Array), dominationRank: 0, crowdingDistance: 0, feasible: true, }); // Test solution operations solution.id = 'pareto-solution-001'; solution.objectives = [0.1, 0.2, 0.3]; solution.dominationRank = 1; solution.crowdingDistance = 0.5; GEPAGCUtils.returnObject('pareto-frontier', 'solutions', solution); }); test('should integrate with cache system', async () => { const cacheEntry = GEPAGCUtils.getObject<any>('cache-system', 'entries'); expect(cacheEntry).toBeDefined(); expect(cacheEntry).toMatchObject({ key: '', value: null, timestamp: 0, ttl: 0, hits: 0, size: 0, }); // Test cache operations cacheEntry.key = 'test-cache-key'; cacheEntry.value = { data: 'test-data' }; cacheEntry.timestamp = Date.now(); cacheEntry.ttl = 60000; cacheEntry.size = 1024; GEPAGCUtils.returnObject('cache-system', 'entries', cacheEntry); }); test('should integrate with LLM adapter', async () => { const response = GEPAGCUtils.getObject<any>('llm-adapter', 'responses'); expect(response).toBeDefined(); expect(response).toMatchObject({ id: '', content: '', metadata: expect.any(Object), timestamp: 0, tokens: { input: 0, output: 0 }, }); // Test LLM response operations response.id = 'llm-response-001'; response.content = 'This is a test response from the LLM'; response.timestamp = Date.now(); response.tokens = { input: 100, output: 50 }; GEPAGCUtils.returnObject('llm-adapter', 'responses', response); }); }); describe('Buffer Management Integration', () => { test('should provide optimized buffers', () => { const buffer1 = GEPAGCUtils.getBuffer('test-component', 1024); expect(buffer1).toBeInstanceOf(Buffer); expect(buffer1.length).toBe(1024); // Return buffer GEPAGCUtils.returnBuffer('test-component', buffer1); // Get another buffer of same size - should reuse const buffer2 = GEPAGCUtils.getBuffer('test-component', 1024); expect(buffer2).toBeInstanceOf(Buffer); expect(buffer2.length).toBe(1024); }); test('should handle different buffer sizes', () => { const sizes = [512, 2048, 4096, 16384]; const buffers = sizes.map(size => GEPAGCUtils.getBuffer('test-component', size)); buffers.forEach((buffer, index) => { expect(buffer.length).toBe(sizes[index]); }); // Return all buffers buffers.forEach(buffer => { GEPAGCUtils.returnBuffer('test-component', buffer); }); }); }); describe('Workload Detection and Adaptation', () => { test('should detect workload characteristics', async () => { // Simulate high-throughput workload for (let i = 0; i < 100; i++) { await GEPAGCUtils.withOptimization( 'evolution-engine', 'candidate-creation', async () => { const candidate = GEPAGCUtils.getObject('evolution-engine', 'candidates'); // Simulate processing await new Promise(resolve => setTimeout(resolve, 1)); GEPAGCUtils.returnObject('evolution-engine', 'candidates', candidate); return candidate; } ); } // Allow time for workload detection await new Promise(resolve => setTimeout(resolve, 1000)); const stats = gcIntegration.getIntegrationStatistics(); expect(stats.workloadDetection).toBeDefined(); }); test('should adapt strategy based on workload', async () => { const strategyChangePromise = new Promise((resolve) => { gcIntegration.once('adaptiveStrategyChange', resolve); }); // Simulate memory-intensive workload const buffers = []; for (let i = 0; i < 50; i++) { const buffer = GEPAGCUtils.getBuffer('test-component', 1024 * 1024); // 1MB each buffers.push(buffer); } // Wait for adaptive strategy change (with timeout) const timeout = new Promise(resolve => setTimeout(resolve, 5000)); await Promise.race([strategyChangePromise, timeout]); // Cleanup buffers.forEach(buffer => { GEPAGCUtils.returnBuffer('test-component', buffer); }); }); }); describe('Memory Optimization', () => { test('should optimize component memory usage', async () => { // Create memory pressure by allocating many objects const objects = []; for (let i = 0; i < 1000; i++) { const candidate = GEPAGCUtils.getObject('evolution-engine', 'candidates'); candidate.id = `test-${i}`; candidate.fitness = Math.random(); objects.push(candidate); } const beforeMemory = process.memoryUsage().heapUsed; // Optimize evolution engine memory const result = await gcIntegration.optimizeComponentMemory('evolution-engine'); expect(result).toMatchObject({ beforeMemory: expect.any(Number), afterMemory: expect.any(Number), optimizationsApplied: expect.arrayContaining([ expect.stringMatching(/cleanup|autotune|specific/) ]), }); // Return objects to pool objects.forEach(obj => { GEPAGCUtils.returnObject('evolution-engine', 'candidates', obj); }); }); test('should handle memory pressure scenarios', async () => { const componentOptimizedPromise = new Promise((resolve) => { gcIntegration.once('componentOptimized', resolve); }); // Create extreme memory pressure const largeBuffers = []; try { for (let i = 0; i < 100; i++) { const buffer = GEPAGCUtils.getBuffer('memory-test', 10 * 1024 * 1024); // 10MB each largeBuffers.push(buffer); } // Wait for memory optimization (with timeout) const timeout = new Promise(resolve => setTimeout(resolve, 3000)); await Promise.race([componentOptimizedPromise, timeout]); } finally { // Cleanup largeBuffers.forEach(buffer => { GEPAGCUtils.returnBuffer('memory-test', buffer); }); } }); }); describe('Performance Monitoring', () => { test('should track pool performance metrics', () => { const stats = gcIntegration.getIntegrationStatistics(); expect(stats).toMatchObject({ gcOptimization: expect.any(Object), componentPools: expect.arrayContaining([ expect.objectContaining({ component: expect.any(String), pools: expect.any(Array), }) ]), workloadDetection: expect.objectContaining({ currentWorkload: expect.any(Object), detectionHistory: expect.any(Array), adaptiveChanges: expect.any(Number), }), memoryEfficiency: expect.objectContaining({ totalMemorySaved: expect.any(Number), optimizationHits: expect.any(Number), poolEfficiency: expect.any(Number), }), }); }); test('should generate performance reports', () => { // Generate some activity first for (let i = 0; i < 50; i++) { const candidate = GEPAGCUtils.getObject('evolution-engine', 'candidates'); candidate.id = `perf-test-${i}`; GEPAGCUtils.returnObject('evolution-engine', 'candidates', candidate); } const stats = gcIntegration.getIntegrationStatistics(); // Check that we have meaningful statistics expect(stats.componentPools.length).toBeGreaterThan(0); const evolutionPools = stats.componentPools.find(cp => cp.component === 'evolution-engine'); expect(evolutionPools).toBeDefined(); expect(evolutionPools!.pools.length).toBeGreaterThan(0); }); }); describe('Error Handling and Resilience', () => { test('should handle missing component gracefully', () => { expect(() => { GEPAGCUtils.getObject('non-existent-component', 'test-object'); }).toThrow('Component non-existent-component not registered'); }); test('should handle missing object type gracefully', () => { expect(() => { GEPAGCUtils.getObject('evolution-engine', 'non-existent-type'); }).toThrow('Object type non-existent-type not found'); }); test('should continue operating with optimization errors', async () => { // This should not throw even if internal optimization fails const result = await gcIntegration.optimizeComponentMemory('non-existent-component'); expect(result).toMatchObject({ beforeMemory: expect.any(Number), afterMemory: expect.any(Number), optimizationsApplied: expect.any(Array), }); }); }); }); describe('GC Benchmark Integration', () => { let benchmarkSuite: GCBenchmarkSuite; beforeEach(() => { benchmarkSuite = new GCBenchmarkSuite(); }); describe('Quick Benchmark', () => { test('should run quick performance benchmark', async () => { const result = await runQuickGCBenchmark(); expect(result).toMatchObject({ performanceGain: expect.any(Number), memoryReduction: expect.any(Number), recommendations: expect.arrayContaining([ expect.any(String) ]), }); expect(result.performanceGain).toBeGreaterThanOrEqual(0); expect(result.memoryReduction).toBeGreaterThanOrEqual(0); expect(result.recommendations.length).toBeGreaterThan(0); }, 30000); // 30 second timeout for benchmark }); describe('Workload Simulations', () => { test('should run high-throughput simulation', async () => { const config = { name: 'integration-test', description: 'Integration test benchmark', iterations: 100, warmupIterations: 10, workloadType: 'high-throughput' as const, memoryPressure: 'medium' as const, dataSize: 1024, concurrency: 1, }; const result = await benchmarkSuite.runBenchmark('high-throughput-small', true); expect(result).toMatchObject({ config: expect.objectContaining({ workloadType: 'high-throughput', }), metrics: expect.objectContaining({ totalDuration: expect.any(Number), averageDuration: expect.any(Number), throughput: expect.any(Number), memoryUsage: expect.objectContaining({ peak: expect.any(Number), average: expect.any(Number), }), }), gcMetrics: expect.objectContaining({ totalCollections: expect.any(Number), totalGCTime: expect.any(Number), }), }); }, 20000); // 20 second timeout }); }); describe('End-to-End GC Optimization', () => { test('should demonstrate complete optimization workflow', async () => { const performanceTracker = new PerformanceTracker(); // Initialize GC integration const integration = new GEPAGCIntegration(performanceTracker, { autoDetectWorkload: true, adaptiveStrategy: true, }); await integration.initialize(); try { // Simulate a complete GEPA workflow const workflowSteps = [ // Evolution engine phase async () => { const candidates = []; for (let i = 0; i < 100; i++) { const candidate = integration.getOptimizedObject('evolution-engine', 'candidates'); candidate.id = `workflow-candidate-${i}`; candidate.fitness = Math.random(); candidate.objectives = [Math.random(), Math.random(), Math.random()]; candidates.push(candidate); } // Return candidates candidates.forEach(candidate => { integration.returnOptimizedObject('evolution-engine', 'candidates', candidate); }); return candidates.length; }, // Pareto frontier analysis async () => { const solutions = []; for (let i = 0; i < 50; i++) { const solution = integration.getOptimizedObject('pareto-frontier', 'solutions'); solution.id = `workflow-solution-${i}`; solution.objectives = [Math.random(), Math.random()]; solution.dominationRank = Math.floor(Math.random() * 5); solutions.push(solution); } // Return solutions solutions.forEach(solution => { integration.returnOptimizedObject('pareto-frontier', 'solutions', solution); }); return solutions.length; }, // Cache system operations async () => { const entries = []; for (let i = 0; i < 200; i++) { const entry = integration.getOptimizedObject('cache-system', 'entries'); entry.key = `workflow-cache-${i}`; entry.value = { data: `cached-data-${i}` }; entry.timestamp = Date.now(); entries.push(entry); } // Return entries entries.forEach(entry => { integration.returnOptimizedObject('cache-system', 'entries', entry); }); return entries.length; }, // LLM adapter operations async () => { const responses = []; for (let i = 0; i < 20; i++) { const response = integration.getOptimizedObject('llm-adapter', 'responses'); response.id = `workflow-response-${i}`; response.content = `Generated response ${i}`; response.timestamp = Date.now(); response.tokens = { input: 100 + i, output: 50 + i }; responses.push(response); } // Return responses responses.forEach(response => { integration.returnOptimizedObject('llm-adapter', 'responses', response); }); return responses.length; }, ]; // Execute workflow steps const results = []; for (const step of workflowSteps) { const result = await step(); results.push(result); } // Force memory optimization await integration.optimizeComponentMemory('evolution-engine'); await integration.optimizeComponentMemory('pareto-frontier'); await integration.optimizeComponentMemory('cache-system'); await integration.optimizeComponentMemory('llm-adapter'); // Get final statistics const finalStats = integration.getIntegrationStatistics(); expect(results).toEqual([100, 50, 200, 20]); expect(finalStats.componentPools.length).toBe(4); // Verify all component pools have activity const componentNames = finalStats.componentPools.map(cp => cp.component); expect(componentNames).toEqual( expect.arrayContaining([ 'evolution-engine', 'pareto-frontier', 'cache-system', 'llm-adapter' ]) ); } finally { integration.shutdown(); } }, 30000); // 30 second timeout for full workflow });

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/sloth-wq/prompt-auto-optimizer-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server