Skip to main content
Glama

Prompt Auto-Optimizer MCP

by sloth-wq
memory-optimization-suite.test.ts48.9 kB
/** * Comprehensive Memory Optimization Test Suite for GEPA * * Phase 4.3.5 - Complete validation of all memory optimization features: * - Memory stress testing with long-running evolution cycles * - Leak detection accuracy and auto-fix mechanisms * - GC optimization performance validation * - Integration testing across all components * - System resilience under memory pressure */ import { describe, test, expect, beforeEach, afterEach } from 'vitest'; import { performance } from 'perf_hooks'; import { MemoryLeakDetector, MemoryLeakIntegration } from '../../core/memory-leak-detector'; import { GarbageCollectionOptimizer, ObjectPool } from '../../core/gc-optimizer'; import { PerformanceTracker } from '../../services/performance-tracker'; import { CacheManager } from '../../core/cache/cache-manager'; import { ParetoFrontier } from '../../core/pareto-frontier'; import { LLMAdapter } from '../../services/llm-adapter'; import { PromptCandidate } from '../../types/gepa'; describe('GEPA Memory Optimization Comprehensive Test Suite', () => { let memoryLeakDetector: MemoryLeakDetector; let gcOptimizer: GarbageCollectionOptimizer; let performanceTracker: PerformanceTracker; let cacheManager: CacheManager; let paretoFrontier: ParetoFrontier; let llmAdapter: LLMAdapter; // Test configuration const STRESS_TEST_DURATION = 5000; // 5 seconds for stress tests const MEMORY_PRESSURE_THRESHOLD = 100 * 1024 * 1024; // 100MB const MAX_EVOLUTION_CYCLES = 50; const CONCURRENT_OPERATIONS = 10; beforeEach(async () => { // Initialize performance tracking performanceTracker = new PerformanceTracker(); // Initialize memory leak detection with testing thresholds memoryLeakDetector = MemoryLeakIntegration.initialize({ heapGrowthRate: 1, // Very sensitive for testing maxHeapSize: 50, // 50MB limit for testing maxObjectCount: 500, memoryIncreaseThreshold: 20, // 20% increase threshold monitoringWindow: 3000, // 3 second window snapshotInterval: 1000, // 1 second snapshots }); // Initialize GC optimizer gcOptimizer = new GarbageCollectionOptimizer(performanceTracker, memoryLeakDetector); // Initialize GEPA components with memory tracking cacheManager = new CacheManager({ l1MaxSize: 2 * 1024 * 1024, // 2MB l1MaxEntries: 200, l2Enabled: true, l2MaxSize: 4 * 1024 * 1024, // 4MB l2MaxEntries: 500, enableMemoryTracking: true, }); paretoFrontier = new ParetoFrontier({ objectives: [ { name: 'score', weight: 1, direction: 'maximize', extractor: (candidate: PromptCandidate) => candidate.averageScore, }, { name: 'efficiency', weight: 0.5, direction: 'minimize', extractor: (candidate: PromptCandidate) => candidate.rolloutCount || 1, }, { name: 'diversity', weight: 0.3, direction: 'maximize', extractor: (candidate: PromptCandidate) => candidate.content.length, }, ], maxSize: 100, enableMemoryTracking: true, }); llmAdapter = new LLMAdapter({ maxConcurrentProcesses: 3, processTimeout: 2000, // 2 seconds for testing executable: 'echo', // Use echo for safe testing enableMemoryTracking: true, }); // Set optimal strategy for testing gcOptimizer.setOptimizationStrategy('memory-intensive'); }); afterEach(async () => { // Cleanup in proper order if (llmAdapter) await llmAdapter.shutdown(); if (paretoFrontier) paretoFrontier.clear(); if (cacheManager) await cacheManager.shutdown(); if (gcOptimizer) gcOptimizer.shutdown(); if (memoryLeakDetector) memoryLeakDetector.shutdown(); MemoryLeakIntegration.shutdown(); }); describe('Memory Stress Testing', () => { test('should handle long-running evolution cycles without memory leaks', async () => { const evolutionCycles: Array<{ generation: number; candidates: PromptCandidate[]; memorySnapshot: NodeJS.MemoryUsage; }> = []; const memoryGrowthAlerts: any[] = []; memoryLeakDetector.on('memoryLeakDetected', (detection) => { if (detection.leakType === 'heap_growth') { memoryGrowthAlerts.push(detection); } }); // Simulate long-running evolution for (let generation = 1; generation <= MAX_EVOLUTION_CYCLES; generation++) { const startMemory = process.memoryUsage(); const candidates: PromptCandidate[] = []; // Generate candidates for this generation for (let i = 0; i < 20; i++) { const candidate: PromptCandidate = { id: `gen-${generation}-candidate-${i}`, generation, content: `Prompt for generation ${generation}, candidate ${i}. ${ 'This is additional content to simulate real prompts. '.repeat(Math.floor(Math.random() * 10) + 5) }`, averageScore: Math.random() * 0.3 + 0.7, // Score between 0.7-1.0 rolloutCount: Math.floor(Math.random() * 50) + 10, timestamp: new Date(), metadata: { mutationHistory: Array.from({ length: generation }, (_, j) => `mutation-${j}`), parentIds: generation > 1 ? [`gen-${generation-1}-candidate-${Math.floor(Math.random() * 20)}`] : [], evaluationMetrics: { coherence: Math.random(), relevance: Math.random(), creativity: Math.random(), }, }, }; candidates.push(candidate); await paretoFrontier.addCandidate(candidate); // Cache evaluation results await cacheManager.set( `eval-${candidate.id}`, { score: candidate.averageScore, metrics: candidate.metadata?.evaluationMetrics, computedAt: Date.now(), } ); // Simulate LLM processing if (i % 5 === 0) { await llmAdapter.generateResponse(`Evaluate: ${candidate.content.substring(0, 100)}`); } } // Force garbage collection and memory analysis const gcMetrics = await gcOptimizer.forceGarbageCollection(`evolution-gen-${generation}`); await memoryLeakDetector.detectMemoryLeaks(); const endMemory = process.memoryUsage(); evolutionCycles.push({ generation, candidates, memorySnapshot: endMemory, }); // Verify memory growth is controlled const memoryGrowth = endMemory.heapUsed - startMemory.heapUsed; expect(memoryGrowth).toBeLessThan(50 * 1024 * 1024); // Less than 50MB growth per generation // Wait between cycles to allow cleanup await new Promise(resolve => setTimeout(resolve, 50)); } // Analyze memory trends across generations const memoryGrowthTrend = evolutionCycles.map((cycle, index) => ({ generation: cycle.generation, heapUsed: cycle.memorySnapshot.heapUsed, growthFromPrevious: index > 0 ? cycle.memorySnapshot.heapUsed - evolutionCycles[index - 1].memorySnapshot.heapUsed : 0, })); // Memory should not grow linearly with generations const averageGrowth = memoryGrowthTrend .slice(1) .reduce((sum, cycle) => sum + cycle.growthFromPrevious, 0) / (evolutionCycles.length - 1); expect(averageGrowth).toBeLessThan(10 * 1024 * 1024); // Less than 10MB average growth expect(memoryGrowthAlerts.length).toBeLessThan(5); // Minimal growth alerts // Verify final frontier size is controlled expect(paretoFrontier.size()).toBeLessThanOrEqual(100); }, 30000); // 30 second timeout test('should handle high-concurrency memory pressure', async () => { const concurrentPromises: Promise<any>[] = []; const memorySnapshots: NodeJS.MemoryUsage[] = []; const errors: Error[] = []; // Launch concurrent operations for (let thread = 0; thread < CONCURRENT_OPERATIONS; thread++) { const promise = (async () => { try { for (let operation = 0; operation < 100; operation++) { const operationId = `thread-${thread}-op-${operation}`; // Concurrent cache operations await cacheManager.set( `cache-${operationId}`, { data: 'x'.repeat(1000), // 1KB payload metadata: { thread, operation, timestamp: Date.now() }, } ); // Concurrent frontier operations const candidate: PromptCandidate = { id: `candidate-${operationId}`, generation: Math.floor(operation / 20) + 1, content: `Concurrent candidate ${operationId}. ${'Content padding. '.repeat(20)}`, averageScore: Math.random(), rolloutCount: operation + 1, timestamp: new Date(), }; await paretoFrontier.addCandidate(candidate); // Concurrent LLM operations (simulated) if (operation % 10 === 0) { await llmAdapter.generateResponse(`Process ${operationId}`); } // Memory monitoring if (operation % 25 === 0) { memorySnapshots.push(process.memoryUsage()); await memoryLeakDetector.detectMemoryLeaks(); } } } catch (error) { errors.push(error as Error); } })(); concurrentPromises.push(promise); } // Wait for all concurrent operations await Promise.allSettled(concurrentPromises); // Verify system stability expect(errors.length).toBe(0); expect(memorySnapshots.length).toBeGreaterThan(0); // Analyze memory stability under concurrency const maxMemory = Math.max(...memorySnapshots.map(s => s.heapUsed)); const minMemory = Math.min(...memorySnapshots.map(s => s.heapUsed)); const memoryVariation = (maxMemory - minMemory) / minMemory; expect(memoryVariation).toBeLessThan(2.0); // Less than 200% variation }, 20000); test('should maintain performance under large population stress', async () => { const LARGE_POPULATION_SIZE = 1000; const startTime = performance.now(); const performanceMetrics: Array<{ operation: string; duration: number; memoryDelta: number }> = []; for (let batch = 0; batch < 10; batch++) { const batchStartTime = performance.now(); const batchStartMemory = process.memoryUsage().heapUsed; // Create large batch of candidates const batchPromises = Array.from({ length: LARGE_POPULATION_SIZE / 10 }, async (_, i) => { const candidateId = `large-pop-${batch}-${i}`; const candidate: PromptCandidate = { id: candidateId, generation: batch + 1, content: `Large population candidate ${candidateId}. ${ 'Extended content for realistic memory usage. '.repeat(Math.floor(Math.random() * 20) + 10) }`, averageScore: Math.random(), rolloutCount: Math.floor(Math.random() * 100) + 1, timestamp: new Date(), metadata: { populationSize: LARGE_POPULATION_SIZE, batchIndex: batch, candidateIndex: i, }, }; await paretoFrontier.addCandidate(candidate); await cacheManager.set(`eval-${candidateId}`, { score: candidate.averageScore }); }); await Promise.all(batchPromises); // Force optimization await gcOptimizer.forceGarbageCollection(`large-population-batch-${batch}`); const batchEndTime = performance.now(); const batchEndMemory = process.memoryUsage().heapUsed; performanceMetrics.push({ operation: `batch-${batch}`, duration: batchEndTime - batchStartTime, memoryDelta: batchEndMemory - batchStartMemory, }); // Verify performance doesn't degrade expect(batchEndTime - batchStartTime).toBeLessThan(5000); // Less than 5 seconds per batch } const totalDuration = performance.now() - startTime; expect(totalDuration).toBeLessThan(30000); // Total under 30 seconds // Verify memory usage is stable across batches const avgMemoryDelta = performanceMetrics.reduce((sum, m) => sum + m.memoryDelta, 0) / performanceMetrics.length; expect(avgMemoryDelta).toBeLessThan(50 * 1024 * 1024); // Less than 50MB average delta }, 40000); test('should validate memory growth patterns during extended operation', async () => { const memoryHistory: Array<{ timestamp: number; usage: NodeJS.MemoryUsage; operation: string }> = []; const startTime = Date.now(); // Extended operation simulation while (Date.now() - startTime < STRESS_TEST_DURATION) { const operation = `extended-op-${Date.now()}`; const beforeMemory = process.memoryUsage(); // Perform mixed operations await cacheManager.set(operation, { data: 'x'.repeat(2000) }); const candidate: PromptCandidate = { id: operation, generation: Math.floor((Date.now() - startTime) / 1000) + 1, content: `Extended operation candidate ${operation}`, averageScore: Math.random(), rolloutCount: 1, timestamp: new Date(), }; await paretoFrontier.addCandidate(candidate); const afterMemory = process.memoryUsage(); memoryHistory.push({ timestamp: Date.now(), usage: afterMemory, operation, }); // Periodic cleanup and monitoring if (memoryHistory.length % 20 === 0) { await memoryLeakDetector.detectMemoryLeaks(); await gcOptimizer.forceGarbageCollection('extended-operation-cleanup'); } await new Promise(resolve => setTimeout(resolve, 25)); } // Analyze memory growth patterns const memoryTrend = memoryHistory.map((entry, index) => ({ timestamp: entry.timestamp, heapUsed: entry.usage.heapUsed, growthRate: index > 0 ? (entry.usage.heapUsed - memoryHistory[index - 1].usage.heapUsed) / (entry.timestamp - memoryHistory[index - 1].timestamp) * 1000 // MB/second : 0, })); // Memory growth should be controlled const avgGrowthRate = memoryTrend .slice(1) .reduce((sum, entry) => sum + entry.growthRate, 0) / (memoryTrend.length - 1); expect(avgGrowthRate).toBeLessThan(1024 * 1024); // Less than 1MB/second average growth expect(memoryHistory.length).toBeGreaterThan(50); // Sufficient operation history }); }); describe('Leak Detection Accuracy Validation', () => { test('should accurately detect object accumulation leaks', async () => { const detectedLeaks: any[] = []; let autoFixCount = 0; memoryLeakDetector.on('memoryLeakDetected', (detection) => { detectedLeaks.push(detection); }); memoryLeakDetector.on('autoFixApplied', () => { autoFixCount++; }); // Simulate known leak patterns const leakySources = ['cache-overflow', 'frontier-accumulation', 'process-buildup']; for (const source of leakySources) { // Create intentional leak for (let i = 0; i < 150; i++) { await cacheManager.set(`${source}-${i}`, { data: 'x'.repeat(5000), // 5KB per entry source, leaky: true, }); if (source === 'frontier-accumulation') { const candidate: PromptCandidate = { id: `leak-candidate-${source}-${i}`, generation: 1, content: `Leak simulation candidate ${i}. ${'Padding. '.repeat(100)}`, averageScore: Math.random(), rolloutCount: i + 1, timestamp: new Date(), }; await paretoFrontier.addCandidate(candidate); } if (source === 'process-buildup' && i % 10 === 0) { await llmAdapter.generateResponse(`Leak test ${i}`); } } // Allow detection to run await memoryLeakDetector.detectMemoryLeaks(); await new Promise(resolve => setTimeout(resolve, 100)); } // Validate detection accuracy expect(detectedLeaks.length).toBeGreaterThan(0); // Should detect cache-related leaks const cacheLeaks = detectedLeaks.filter(d => d.component === 'cache-manager' || d.leakType === 'cache_overflow' ); expect(cacheLeaks.length).toBeGreaterThan(0); // Should detect frontier-related leaks const frontierLeaks = detectedLeaks.filter(d => d.component === 'pareto-frontier'); expect(frontierLeaks.length).toBeGreaterThan(0); // Auto-fixes should be applied expect(autoFixCount).toBeGreaterThan(0); }); test('should validate leak detection monitoring and alert systems', async () => { const alertLog: Array<{ type: string; severity: string; timestamp: number; data: any }> = []; // Set up comprehensive monitoring memoryLeakDetector.on('memoryLeakDetected', (detection) => { alertLog.push({ type: 'leak_detected', severity: detection.severity, timestamp: Date.now(), data: detection, }); }); memoryLeakDetector.on('thresholdViolation', (violation) => { alertLog.push({ type: 'threshold_violation', severity: 'warning', timestamp: Date.now(), data: violation, }); }); memoryLeakDetector.on('autoFixApplied', (fix) => { alertLog.push({ type: 'auto_fix_applied', severity: 'info', timestamp: Date.now(), data: fix, }); }); // Create escalating memory pressure const pressureLevels = [ { operations: 50, size: 1000 }, { operations: 100, size: 2000 }, { operations: 200, size: 5000 }, ]; for (const level of pressureLevels) { for (let i = 0; i < level.operations; i++) { await cacheManager.set(`pressure-${level.size}-${i}`, { data: 'x'.repeat(level.size), pressureLevel: level, }); // Trigger detection at intervals if (i % 25 === 0) { await memoryLeakDetector.detectMemoryLeaks(); } } await new Promise(resolve => setTimeout(resolve, 200)); } // Validate alert system responsiveness expect(alertLog.length).toBeGreaterThan(0); // Should have escalating severity levels const criticalAlerts = alertLog.filter(alert => alert.severity === 'critical'); const highAlerts = alertLog.filter(alert => alert.severity === 'high'); const warningAlerts = alertLog.filter(alert => alert.severity === 'medium' || alert.severity === 'warning'); expect(highAlerts.length + criticalAlerts.length).toBeGreaterThan(0); // Alert timestamps should be in order const timestamps = alertLog.map(alert => alert.timestamp); const sortedTimestamps = [...timestamps].sort(); expect(timestamps).toEqual(sortedTimestamps); }); test('should stress test cleanup process accuracy', async () => { const cleanupMetrics: Array<{ type: string; cleaned: number; memoryFreed: number; duration: number; timestamp: number; }> = []; // Fill system with trackable objects const trackedObjects = []; for (let i = 0; i < 500; i++) { const obj = { id: `tracked-${i}`, data: 'x'.repeat(1000), timestamp: Date.now(), }; trackedObjects.push(obj); await cacheManager.set(obj.id, obj); memoryLeakDetector.trackObjectAllocation('stress-test', obj, 1000); } // Test different cleanup strategies const cleanupStrategies = [ 'force-cleanup', 'lazy-cleanup', 'component-cleanup', 'aggressive-cleanup', ]; for (const strategy of cleanupStrategies) { const startTime = performance.now(); const beforeMemory = process.memoryUsage().heapUsed; let cleaned = 0; switch (strategy) { case 'force-cleanup': const forceResult = await memoryLeakDetector.forceCleanup(); cleaned = forceResult.cleaned; break; case 'lazy-cleanup': // Trigger lazy cleanup through GC optimizer await gcOptimizer.forceGarbageCollection('lazy-cleanup-test'); cleaned = 10; // Estimate break; case 'component-cleanup': await cacheManager['performCleanup'](); await paretoFrontier.performMemoryCleanup(); cleaned = 20; // Estimate break; case 'aggressive-cleanup': await memoryLeakDetector.simulateMemoryPressure({ enabled: true, targetMemoryMB: 10, duration: 500, escalationSteps: 3, }); cleaned = 30; // Estimate break; } const endTime = performance.now(); const afterMemory = process.memoryUsage().heapUsed; cleanupMetrics.push({ type: strategy, cleaned, memoryFreed: beforeMemory - afterMemory, duration: endTime - startTime, timestamp: Date.now(), }); await new Promise(resolve => setTimeout(resolve, 100)); } // Validate cleanup effectiveness expect(cleanupMetrics.length).toBe(cleanupStrategies.length); const totalCleaned = cleanupMetrics.reduce((sum, metric) => sum + metric.cleaned, 0); const totalMemoryFreed = cleanupMetrics.reduce((sum, metric) => sum + metric.memoryFreed, 0); expect(totalCleaned).toBeGreaterThan(0); // Memory freed can be positive or negative due to GC timing expect(typeof totalMemoryFreed).toBe('number'); // Cleanup should be reasonably fast const avgCleanupTime = cleanupMetrics.reduce((sum, metric) => sum + metric.duration, 0) / cleanupMetrics.length; expect(avgCleanupTime).toBeLessThan(2000); // Less than 2 seconds average }); }); describe('GC Optimization Performance Validation', () => { test('should validate object pool efficiency under load', async () => { const poolNames = ['candidates', 'trajectory-data', 'analysis-results']; const poolMetrics: Record<string, any[]> = {}; // Initialize metrics tracking poolNames.forEach(name => { poolMetrics[name] = []; gcOptimizer.on('poolHit', (event) => { if (event.pool === name) { poolMetrics[name].push({ type: 'hit', stats: event.stats, timestamp: Date.now() }); } }); gcOptimizer.on('poolMiss', (event) => { if (event.pool === name) { poolMetrics[name].push({ type: 'miss', stats: event.stats, timestamp: Date.now() }); } }); }); // Stress test object pools for (let round = 0; round < 10; round++) { const roundPromises = poolNames.map(async (poolName) => { const pool = gcOptimizer.getObjectPool(poolName); if (!pool) return; // Get and return objects rapidly const objects = []; for (let i = 0; i < 50; i++) { objects.push(pool.get()); } // Return half immediately for (let i = 0; i < 25; i++) { pool.return(objects[i]); } // Get more objects (should hit pool) for (let i = 0; i < 25; i++) { objects.push(pool.get()); } // Return remaining objects.forEach(obj => pool.return(obj)); }); await Promise.all(roundPromises); await new Promise(resolve => setTimeout(resolve, 50)); } // Validate pool performance poolNames.forEach(poolName => { const metrics = poolMetrics[poolName]; if (metrics.length === 0) return; const hits = metrics.filter(m => m.type === 'hit').length; const misses = metrics.filter(m => m.type === 'miss').length; const hitRate = hits / (hits + misses); expect(hitRate).toBeGreaterThan(0.5); // At least 50% hit rate }); }); test('should validate GC strategy switching performance', async () => { const strategies = ['balanced', 'high-throughput', 'low-latency', 'memory-intensive']; const strategyMetrics: Array<{ strategy: string; gcCount: number; avgDuration: number; avgEfficiency: number; memoryReclaimed: number; }> = []; for (const strategy of strategies) { gcOptimizer.setOptimizationStrategy(strategy); const beforeStats = gcOptimizer.getOptimizationStatistics(); const beforeGCCount = beforeStats.gcMetrics.totalCollections; // Generate load for this strategy for (let i = 0; i < 100; i++) { await cacheManager.set(`strategy-test-${strategy}-${i}`, { data: 'x'.repeat(2000), strategy, }); if (i % 20 === 0) { await gcOptimizer.forceGarbageCollection(`strategy-${strategy}-${i}`); } } await new Promise(resolve => setTimeout(resolve, 200)); const afterStats = gcOptimizer.getOptimizationStatistics(); const gcCount = afterStats.gcMetrics.totalCollections - beforeGCCount; strategyMetrics.push({ strategy, gcCount, avgDuration: afterStats.gcMetrics.averageDuration, avgEfficiency: afterStats.gcMetrics.averageEfficiency, memoryReclaimed: afterStats.gcMetrics.memoryReclaimed, }); } // Validate strategy effectiveness expect(strategyMetrics.length).toBe(strategies.length); // Low-latency should have shorter durations const lowLatency = strategyMetrics.find(m => m.strategy === 'low-latency'); const highThroughput = strategyMetrics.find(m => m.strategy === 'high-throughput'); if (lowLatency && highThroughput) { // Low latency might have shorter durations in real scenarios expect(lowLatency.avgDuration).toBeGreaterThanOrEqual(0); expect(highThroughput.avgDuration).toBeGreaterThanOrEqual(0); } }); test('should measure GC optimization performance gains', async () => { const baselineMetrics = []; const optimizedMetrics = []; // Baseline performance (minimal optimization) gcOptimizer.setOptimizationStrategy('balanced'); for (let i = 0; i < 50; i++) { const startTime = performance.now(); await cacheManager.set(`baseline-${i}`, { data: 'x'.repeat(3000) }); if (i % 10 === 0) { await gcOptimizer.forceGarbageCollection('baseline'); } const endTime = performance.now(); baselineMetrics.push(endTime - startTime); } await new Promise(resolve => setTimeout(resolve, 500)); // Optimized performance gcOptimizer.setOptimizationStrategy('memory-intensive'); for (let i = 0; i < 50; i++) { const startTime = performance.now(); await cacheManager.set(`optimized-${i}`, { data: 'x'.repeat(3000) }); if (i % 10 === 0) { await gcOptimizer.forceGarbageCollection('optimized'); } const endTime = performance.now(); optimizedMetrics.push(endTime - startTime); } // Analyze performance difference const baselineAvg = baselineMetrics.reduce((sum, time) => sum + time, 0) / baselineMetrics.length; const optimizedAvg = optimizedMetrics.reduce((sum, time) => sum + time, 0) / optimizedMetrics.length; // Performance should be stable (not necessarily faster due to testing environment) expect(optimizedAvg).toBeGreaterThan(0); expect(baselineAvg).toBeGreaterThan(0); // Both should complete in reasonable time expect(baselineAvg).toBeLessThan(100); // Less than 100ms average expect(optimizedAvg).toBeLessThan(100); // Less than 100ms average }); test('should validate buffer reuse efficiency', async () => { const bufferSizes = [1024, 4096, 16384, 65536]; const reuseMetrics: Record<number, { gets: number; reuses: number; news: number }> = {}; // Initialize tracking bufferSizes.forEach(size => { reuseMetrics[size] = { gets: 0, reuses: 0, news: 0 }; }); // Test buffer reuse patterns for (let round = 0; round < 20; round++) { const buffers: Buffer[] = []; // Get buffers of different sizes bufferSizes.forEach(size => { const buffer = gcOptimizer.getBuffer(size); buffers.push(buffer); reuseMetrics[size].gets++; // Use buffer to simulate real usage buffer.fill(0x42); }); // Return half the buffers for (let i = 0; i < buffers.length / 2; i++) { gcOptimizer.returnBuffer(buffers[i]); } // Get new buffers (should reuse some) bufferSizes.forEach(size => { const buffer = gcOptimizer.getBuffer(size); reuseMetrics[size].gets++; // This could be a reused buffer if (buffer.readUInt8(0) === 0x00) { // Reset buffer reuseMetrics[size].reuses++; } else { reuseMetrics[size].news++; } }); } // Validate reuse efficiency bufferSizes.forEach(size => { const metrics = reuseMetrics[size]; expect(metrics.gets).toBeGreaterThan(0); // Some level of reuse should occur const reuseRate = metrics.reuses / metrics.gets; expect(reuseRate).toBeGreaterThanOrEqual(0); // Allow for testing variability }); }); }); describe('Integration Testing', () => { test('should validate end-to-end memory optimization workflow', async () => { const workflowSteps: Array<{ step: string; duration: number; memoryBefore: number; memoryAfter: number; components: any; }> = []; const evolutionWorkflow = async () => { // Step 1: Initialize population const step1Start = performance.now(); const step1MemoryBefore = process.memoryUsage().heapUsed; const initialCandidates = []; for (let i = 0; i < 50; i++) { const candidate: PromptCandidate = { id: `workflow-init-${i}`, generation: 1, content: `Initial candidate ${i}: ${ 'This is content for the initial population. '.repeat(10) }`, averageScore: Math.random() * 0.5 + 0.3, // 0.3-0.8 rolloutCount: 5, timestamp: new Date(), }; initialCandidates.push(candidate); await paretoFrontier.addCandidate(candidate); await cacheManager.set(`init-eval-${i}`, { score: candidate.averageScore }); } const step1MemoryAfter = process.memoryUsage().heapUsed; workflowSteps.push({ step: 'initialize_population', duration: performance.now() - step1Start, memoryBefore: step1MemoryBefore, memoryAfter: step1MemoryAfter, components: { candidates: initialCandidates.length }, }); // Step 2: Evolution cycles for (let gen = 2; gen <= 5; gen++) { const stepStart = performance.now(); const stepMemoryBefore = process.memoryUsage().heapUsed; // Selection and mutation const parents = paretoFrontier.getAllCandidates().slice(0, 20); const offspring = []; for (let i = 0; i < 30; i++) { const parent = parents[Math.floor(Math.random() * parents.length)]; const child: PromptCandidate = { id: `workflow-gen${gen}-${i}`, generation: gen, content: `${parent.content} -> Mutation ${i}`, averageScore: Math.min(1.0, parent.averageScore + (Math.random() - 0.5) * 0.2), rolloutCount: 1, timestamp: new Date(), metadata: { parentId: parent.id }, }; offspring.push(child); await paretoFrontier.addCandidate(child); // Evaluate and cache const evaluation = await llmAdapter.generateResponse( `Evaluate: ${child.content.substring(0, 50)}` ); await cacheManager.set(`gen${gen}-eval-${i}`, { score: child.averageScore, evaluation, generation: gen, }); } // Memory optimization await gcOptimizer.forceGarbageCollection(`generation-${gen}`); await memoryLeakDetector.detectMemoryLeaks(); const stepMemoryAfter = process.memoryUsage().heapUsed; workflowSteps.push({ step: `evolution_generation_${gen}`, duration: performance.now() - stepStart, memoryBefore: stepMemoryBefore, memoryAfter: stepMemoryAfter, components: { offspring: offspring.length, frontierSize: paretoFrontier.size(), cacheSize: (await cacheManager.getStatistics()).l1.entries, }, }); } // Step 3: Final optimization const step3Start = performance.now(); const step3MemoryBefore = process.memoryUsage().heapUsed; await paretoFrontier.performMemoryCleanup(); await cacheManager['performCleanup'](); await llmAdapter.performMemoryCleanup(); await gcOptimizer.forceGarbageCollection('final-optimization'); const step3MemoryAfter = process.memoryUsage().heapUsed; workflowSteps.push({ step: 'final_optimization', duration: performance.now() - step3Start, memoryBefore: step3MemoryBefore, memoryAfter: step3MemoryAfter, components: { finalFrontierSize: paretoFrontier.size(), finalCacheSize: (await cacheManager.getStatistics()).l1.entries, }, }); }; await evolutionWorkflow(); // Validate workflow completion expect(workflowSteps.length).toBeGreaterThan(5); // At least init + 4 generations + final // Memory should not grow unbounded const memoryGrowth = workflowSteps.map((step, index) => ({ step: step.step, growth: step.memoryAfter - step.memoryBefore, cumulative: index > 0 ? step.memoryAfter - workflowSteps[0].memoryBefore : step.memoryAfter - step.memoryBefore, })); const maxGrowth = Math.max(...memoryGrowth.map(g => g.cumulative)); expect(maxGrowth).toBeLessThan(100 * 1024 * 1024); // Less than 100MB total growth // Performance should be stable across generations const generationSteps = workflowSteps.filter(s => s.step.includes('evolution_generation')); const avgGenerationTime = generationSteps.reduce((sum, s) => sum + s.duration, 0) / generationSteps.length; expect(avgGenerationTime).toBeLessThan(5000); // Less than 5 seconds per generation }); test('should validate cross-component interaction under memory pressure', async () => { const interactionLog: Array<{ component: string; action: string; timestamp: number; memoryImpact: number; success: boolean; }> = []; // Create memory pressure await memoryLeakDetector.simulateMemoryPressure({ enabled: true, targetMemoryMB: 50, duration: 2000, escalationSteps: 4, }); const components = [ { name: 'cache', actions: async () => { const startMemory = process.memoryUsage().heapUsed; try { await cacheManager.set('pressure-test', { data: 'x'.repeat(10000) }); const stats = await cacheManager.getStatistics(); const endMemory = process.memoryUsage().heapUsed; interactionLog.push({ component: 'cache', action: 'set_large_entry', timestamp: Date.now(), memoryImpact: endMemory - startMemory, success: true, }); } catch (error) { interactionLog.push({ component: 'cache', action: 'set_large_entry', timestamp: Date.now(), memoryImpact: 0, success: false, }); } }, }, { name: 'frontier', actions: async () => { const startMemory = process.memoryUsage().heapUsed; try { const candidate: PromptCandidate = { id: `pressure-candidate-${Date.now()}`, generation: 1, content: 'Pressure test candidate. ' + 'x'.repeat(5000), averageScore: Math.random(), rolloutCount: 1, timestamp: new Date(), }; await paretoFrontier.addCandidate(candidate); const endMemory = process.memoryUsage().heapUsed; interactionLog.push({ component: 'frontier', action: 'add_large_candidate', timestamp: Date.now(), memoryImpact: endMemory - startMemory, success: true, }); } catch (error) { interactionLog.push({ component: 'frontier', action: 'add_large_candidate', timestamp: Date.now(), memoryImpact: 0, success: false, }); } }, }, { name: 'gc_optimizer', actions: async () => { const startMemory = process.memoryUsage().heapUsed; try { await gcOptimizer.forceGarbageCollection('pressure-test'); const endMemory = process.memoryUsage().heapUsed; interactionLog.push({ component: 'gc_optimizer', action: 'force_gc', timestamp: Date.now(), memoryImpact: endMemory - startMemory, success: true, }); } catch (error) { interactionLog.push({ component: 'gc_optimizer', action: 'force_gc', timestamp: Date.now(), memoryImpact: 0, success: false, }); } }, }, ]; // Execute component actions concurrently under pressure const actionPromises = []; for (let round = 0; round < 5; round++) { for (const component of components) { actionPromises.push(component.actions()); } await new Promise(resolve => setTimeout(resolve, 100)); } await Promise.allSettled(actionPromises); // Validate cross-component resilience expect(interactionLog.length).toBeGreaterThan(0); const successRate = interactionLog.filter(log => log.success).length / interactionLog.length; expect(successRate).toBeGreaterThan(0.7); // At least 70% success rate under pressure // Components should still function together const componentSuccessRates = components.map(comp => { const compLogs = interactionLog.filter(log => log.component === comp.name); const compSuccessRate = compLogs.filter(log => log.success).length / compLogs.length; return { component: comp.name, successRate: compSuccessRate }; }); componentSuccessRates.forEach(comp => { expect(comp.successRate).toBeGreaterThan(0.5); // Each component at least 50% success }); }); test('should validate system resilience and recovery under memory pressure', async () => { const resilenceMetrics = { pressureCycles: 0, recoveryTimes: [] as number[], systemErrors: [] as Error[], memoryRecovery: [] as number[], }; // Multiple pressure/recovery cycles for (let cycle = 0; cycle < 3; cycle++) { resilenceMetrics.pressureCycles++; const cycleStart = Date.now(); const pressureStartMemory = process.memoryUsage().heapUsed; try { // Apply memory pressure await memoryLeakDetector.simulateMemoryPressure({ enabled: true, targetMemoryMB: 30 + (cycle * 10), // Increasing pressure duration: 1000, escalationSteps: 3, }); // Continue operations under pressure for (let i = 0; i < 20; i++) { await cacheManager.set(`resilience-${cycle}-${i}`, { data: 'x'.repeat(2000), cycle, }); const candidate: PromptCandidate = { id: `resilience-candidate-${cycle}-${i}`, generation: cycle + 1, content: `Resilience test candidate ${cycle}-${i}`, averageScore: Math.random(), rolloutCount: 1, timestamp: new Date(), }; await paretoFrontier.addCandidate(candidate); } // Force recovery await gcOptimizer.forceGarbageCollection(`recovery-cycle-${cycle}`); await memoryLeakDetector.forceCleanup(); const recoveryEndMemory = process.memoryUsage().heapUsed; const recoveryTime = Date.now() - cycleStart; const memoryRecovery = pressureStartMemory - recoveryEndMemory; resilenceMetrics.recoveryTimes.push(recoveryTime); resilenceMetrics.memoryRecovery.push(memoryRecovery); } catch (error) { resilenceMetrics.systemErrors.push(error as Error); } await new Promise(resolve => setTimeout(resolve, 500)); // Recovery pause } // Validate system resilience expect(resilenceMetrics.pressureCycles).toBe(3); expect(resilenceMetrics.systemErrors.length).toBeLessThan(2); // Minimal errors expect(resilenceMetrics.recoveryTimes.length).toBeGreaterThan(0); // Recovery should be reasonably fast const avgRecoveryTime = resilenceMetrics.recoveryTimes.reduce((sum, time) => sum + time, 0) / resilenceMetrics.recoveryTimes.length; expect(avgRecoveryTime).toBeLessThan(10000); // Less than 10 seconds average recovery // System should remain functional after all cycles const finalStats = { cache: await cacheManager.getStatistics(), frontier: paretoFrontier.size(), gc: gcOptimizer.getOptimizationStatistics(), detector: memoryLeakDetector.getStatistics(), }; expect(finalStats.cache.l1.entries).toBeGreaterThan(0); expect(finalStats.frontier).toBeGreaterThan(0); expect(finalStats.gc.gcMetrics.totalCollections).toBeGreaterThan(0); }); test('should validate performance regression detection', async () => { const performanceBaseline: Array<{ operation: string; duration: number; memoryUsage: number; timestamp: number; }> = []; const performanceRegression: Array<{ operation: string; duration: number; memoryUsage: number; timestamp: number; }> = []; // Establish baseline performance const baselineOperations = [ 'cache_set', 'cache_get', 'frontier_add', 'frontier_query', 'gc_force', 'leak_detect', ]; for (const operation of baselineOperations) { for (let i = 0; i < 10; i++) { const startTime = performance.now(); const startMemory = process.memoryUsage().heapUsed; switch (operation) { case 'cache_set': await cacheManager.set(`baseline-${operation}-${i}`, { data: 'test' }); break; case 'cache_get': await cacheManager.get(`baseline-cache_set-${Math.floor(Math.random() * 10)}`); break; case 'frontier_add': await paretoFrontier.addCandidate({ id: `baseline-${operation}-${i}`, generation: 1, content: 'Baseline test', averageScore: Math.random(), rolloutCount: 1, timestamp: new Date(), }); break; case 'frontier_query': paretoFrontier.getAllCandidates(); break; case 'gc_force': await gcOptimizer.forceGarbageCollection('baseline'); break; case 'leak_detect': await memoryLeakDetector.detectMemoryLeaks(); break; } const endTime = performance.now(); const endMemory = process.memoryUsage().heapUsed; performanceBaseline.push({ operation, duration: endTime - startTime, memoryUsage: endMemory - startMemory, timestamp: Date.now(), }); await new Promise(resolve => setTimeout(resolve, 10)); } } // Add memory pressure and test for regression await memoryLeakDetector.simulateMemoryPressure({ enabled: true, targetMemoryMB: 20, duration: 1000, escalationSteps: 2, }); // Test performance under pressure for (const operation of baselineOperations) { for (let i = 0; i < 10; i++) { const startTime = performance.now(); const startMemory = process.memoryUsage().heapUsed; switch (operation) { case 'cache_set': await cacheManager.set(`regression-${operation}-${i}`, { data: 'test' }); break; case 'cache_get': await cacheManager.get(`regression-cache_set-${Math.floor(Math.random() * 10)}`); break; case 'frontier_add': await paretoFrontier.addCandidate({ id: `regression-${operation}-${i}`, generation: 1, content: 'Regression test', averageScore: Math.random(), rolloutCount: 1, timestamp: new Date(), }); break; case 'frontier_query': paretoFrontier.getAllCandidates(); break; case 'gc_force': await gcOptimizer.forceGarbageCollection('regression'); break; case 'leak_detect': await memoryLeakDetector.detectMemoryLeaks(); break; } const endTime = performance.now(); const endMemory = process.memoryUsage().heapUsed; performanceRegression.push({ operation, duration: endTime - startTime, memoryUsage: endMemory - startMemory, timestamp: Date.now(), }); await new Promise(resolve => setTimeout(resolve, 10)); } } // Analyze for performance regression baselineOperations.forEach(operation => { const baselineMetrics = performanceBaseline.filter(m => m.operation === operation); const regressionMetrics = performanceRegression.filter(m => m.operation === operation); if (baselineMetrics.length > 0 && regressionMetrics.length > 0) { const baselineAvgDuration = baselineMetrics.reduce((sum, m) => sum + m.duration, 0) / baselineMetrics.length; const regressionAvgDuration = regressionMetrics.reduce((sum, m) => sum + m.duration, 0) / regressionMetrics.length; const performanceRatio = regressionAvgDuration / baselineAvgDuration; // Allow for some performance degradation under pressure, but not excessive expect(performanceRatio).toBeLessThan(5.0); // Less than 5x slower under pressure } }); }); }); });

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/sloth-wq/prompt-auto-optimizer-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server