Skip to main content
Glama

Prompt Auto-Optimizer MCP

by sloth-wq
gc-integration.ts24.6 kB
/** * GEPA GC Optimization Integration Layer * * Provides seamless integration of garbage collection optimization with all GEPA components * including evolution engine, cache system, Pareto frontier, and LLM adapter. */ import { EventEmitter } from 'events'; import { GarbageCollectionOptimizer, ObjectPool } from './gc-optimizer'; import { PerformanceTracker } from '../services/performance-tracker'; import { MemoryLeakDetector, MemoryLeakIntegration } from './memory-leak-detector'; /** * Integration configuration for GEPA components */ export interface GEPAIntegrationConfig { /** Enable automatic workload detection */ autoDetectWorkload: boolean; /** Enable adaptive strategy switching */ adaptiveStrategy: boolean; /** Memory optimization thresholds */ memoryThresholds: { lowMemory: number; // MB highMemory: number; // MB criticalMemory: number; // MB }; /** Component-specific pool configurations */ componentPools: { evolutionEngine: { candidatePoolSize: number; populationPoolSize: number; generationPoolSize: number; }; paretoFrontier: { solutionPoolSize: number; frontierPoolSize: number; }; cacheSystem: { entryPoolSize: number; keyPoolSize: number; }; llmAdapter: { processPoolSize: number; responsePoolSize: number; }; }; } /** * Workload characteristics detected from GEPA operations */ export interface WorkloadCharacteristics { type: 'high-throughput' | 'low-latency' | 'memory-intensive' | 'batch-processing' | 'mixed'; confidence: number; metrics: { operationsPerSecond: number; averageLatency: number; memoryGrowthRate: number; allocationPattern: 'bursty' | 'steady' | 'periodic'; }; recommendedStrategy: string; } /** * Integration hooks for GEPA components */ export interface ComponentIntegrationHooks { /** Pre-operation hook */ preOperation: (component: string, operation: string, data?: any) => Promise<void>; /** Post-operation hook */ postOperation: (component: string, operation: string, result?: any) => Promise<void>; /** Memory allocation hook */ onAllocation: (component: string, size: number, type: string) => void; /** Memory deallocation hook */ onDeallocation: (component: string, size: number, type: string) => void; } /** * Main GEPA GC Integration Manager */ export class GEPAGCIntegration extends EventEmitter { private gcOptimizer: GarbageCollectionOptimizer; private performanceTracker: PerformanceTracker; private memoryLeakDetector: MemoryLeakDetector; private config: GEPAIntegrationConfig; // Component-specific pools private componentPools = new Map<string, Map<string, ObjectPool<any>>>(); // Workload detection state private workloadDetection = { enabled: true, samples: [] as Array<{ timestamp: number; metrics: any }>, lastDetection: 0, detectionInterval: 30000, // 30 seconds }; // Integration hooks private hooks!: ComponentIntegrationHooks; // Monitoring intervals private workloadMonitoringInterval?: ReturnType<typeof setInterval>; private adaptiveStrategyInterval?: ReturnType<typeof setInterval>; constructor( performanceTracker: PerformanceTracker, config?: Partial<GEPAIntegrationConfig> ) { super(); this.performanceTracker = performanceTracker; this.memoryLeakDetector = MemoryLeakIntegration.initialize(); this.gcOptimizer = new GarbageCollectionOptimizer( this.performanceTracker, this.memoryLeakDetector ); this.config = { autoDetectWorkload: true, adaptiveStrategy: true, memoryThresholds: { lowMemory: 256, highMemory: 512, criticalMemory: 1024, }, componentPools: { evolutionEngine: { candidatePoolSize: 1000, populationPoolSize: 100, generationPoolSize: 50, }, paretoFrontier: { solutionPoolSize: 500, frontierPoolSize: 50, }, cacheSystem: { entryPoolSize: 2000, keyPoolSize: 1000, }, llmAdapter: { processPoolSize: 20, responsePoolSize: 100, }, }, ...config, }; this.initializeComponentPools(); this.setupIntegrationHooks(); this.startWorkloadMonitoring(); this.setupAdaptiveStrategy(); } /** * Initialize the GC integration system */ async initialize(): Promise<void> { // Register all GEPA components for memory tracking this.registerGEPAComponents(); // Set initial optimization strategy if (this.config.autoDetectWorkload) { await this.detectAndApplyWorkloadStrategy(); } else { this.gcOptimizer.setOptimizationStrategy('balanced'); } this.emit('initialized', { config: this.config }); } /** * Get optimized object for component operation */ getOptimizedObject<T>(component: string, objectType: string): T { const componentPools = this.componentPools.get(component); if (!componentPools) { throw new Error(`Component ${component} not registered`); } const pool = componentPools.get(objectType); if (!pool) { throw new Error(`Object type ${objectType} not found for component ${component}`); } const obj = pool.get(); // Track allocation this.hooks.onAllocation(component, 0, objectType); return obj; } /** * Return optimized object to pool */ returnOptimizedObject<T>(component: string, objectType: string, object: T): void { const componentPools = this.componentPools.get(component); if (!componentPools) return; const pool = componentPools.get(objectType); if (!pool) return; pool.return(object); // Track deallocation this.hooks.onDeallocation(component, 0, objectType); } /** * Get optimized buffer for component */ getOptimizedBuffer(component: string, size: number): Buffer { const buffer = this.gcOptimizer.getBuffer(size); this.hooks.onAllocation(component, size, 'buffer'); return buffer; } /** * Return buffer to optimization pool */ returnOptimizedBuffer(component: string, buffer: Buffer): void { this.gcOptimizer.returnBuffer(buffer); this.hooks.onDeallocation(component, buffer.length, 'buffer'); } /** * Perform component-specific memory optimization */ async optimizeComponentMemory(component: string): Promise<{ beforeMemory: number; afterMemory: number; optimizationsApplied: string[]; }> { const beforeMemory = process.memoryUsage().heapUsed; const optimizations: string[] = []; const componentPools = this.componentPools.get(component); if (componentPools) { // Optimize all pools for this component for (const [poolName, pool] of componentPools) { await pool.performMaintenanceCleanup(); optimizations.push(`${poolName}-cleanup`); // Auto-tune if enabled if (pool.config.autoTune) { await pool.autoTune(); optimizations.push(`${poolName}-autotune`); } } } // Component-specific optimizations switch (component) { case 'evolution-engine': await this.optimizeEvolutionEngine(); optimizations.push('evolution-engine-specific'); break; case 'pareto-frontier': await this.optimizeParetoFrontier(); optimizations.push('pareto-frontier-specific'); break; case 'cache-system': await this.optimizeCacheSystem(); optimizations.push('cache-system-specific'); break; case 'llm-adapter': await this.optimizeLLMAdapter(); optimizations.push('llm-adapter-specific'); break; } const afterMemory = process.memoryUsage().heapUsed; this.emit('componentOptimized', { component, beforeMemory, afterMemory, memoryFreed: beforeMemory - afterMemory, optimizationsApplied: optimizations, }); return { beforeMemory, afterMemory, optimizationsApplied: optimizations, }; } /** * Force garbage collection for specific workload */ async forceGCForWorkload(workloadType: string): Promise<void> { // Apply workload-specific strategy temporarily const currentStrategy = this.gcOptimizer['currentStrategy']; this.gcOptimizer.setOptimizationStrategy(workloadType); // Force GC with workload context await this.gcOptimizer.forceGarbageCollection(`workload-${workloadType}`); // Restore previous strategy if adaptive mode is disabled if (!this.config.adaptiveStrategy) { this.gcOptimizer['currentStrategy'] = currentStrategy; } } /** * Get comprehensive integration statistics */ getIntegrationStatistics(): { gcOptimization: any; componentPools: Array<{ component: string; pools: Array<{ name: string; stats: any }>; }>; workloadDetection: { currentWorkload: WorkloadCharacteristics | null; detectionHistory: Array<{ timestamp: number; workload: WorkloadCharacteristics }>; adaptiveChanges: number; }; memoryEfficiency: { totalMemorySaved: number; optimizationHits: number; poolEfficiency: number; }; } { const gcStats = this.gcOptimizer.getOptimizationStatistics(); const componentPoolStats = Array.from(this.componentPools.entries()).map(([component, pools]) => ({ component, pools: Array.from(pools.entries()).map(([name, pool]) => ({ name, stats: pool.getStatistics(), })), })); return { gcOptimization: gcStats, componentPools: componentPoolStats, workloadDetection: { currentWorkload: this.getCurrentWorkloadCharacteristics(), detectionHistory: [], // Would store historical detections adaptiveChanges: 0, // Would track strategy changes }, memoryEfficiency: { totalMemorySaved: 0, // Would calculate from pool reuse optimizationHits: 0, // Would track successful optimizations poolEfficiency: this.calculateOverallPoolEfficiency(), }, }; } /** * Shutdown the integration system */ shutdown(): void { if (this.workloadMonitoringInterval) { clearInterval(this.workloadMonitoringInterval); } if (this.adaptiveStrategyInterval) { clearInterval(this.adaptiveStrategyInterval); } // Shutdown all component pools for (const pools of this.componentPools.values()) { for (const pool of pools.values()) { pool.shutdown(); } } this.gcOptimizer.shutdown(); MemoryLeakIntegration.shutdown(); this.emit('shutdown'); } // Private Methods private initializeComponentPools(): void { // Evolution Engine Pools const evolutionPools = new Map<string, ObjectPool<any>>(); evolutionPools.set('candidates', this.gcOptimizer.createObjectPool({ name: 'evolution-candidates', maxSize: this.config.componentPools.evolutionEngine.candidatePoolSize, minSize: Math.floor(this.config.componentPools.evolutionEngine.candidatePoolSize * 0.1), factory: () => ({ id: '', fitness: 0, objectives: [], parameters: {}, metadata: {}, generation: 0, }), reset: (obj) => { obj.id = ''; obj.fitness = 0; obj.objectives.length = 0; obj.parameters = {}; obj.metadata = {}; obj.generation = 0; }, evictionStrategy: 'lru', autoTune: true, })); evolutionPools.set('populations', this.gcOptimizer.createObjectPool({ name: 'evolution-populations', maxSize: this.config.componentPools.evolutionEngine.populationPoolSize, minSize: 10, factory: () => ({ generation: 0, candidates: [], statistics: {}, diversity: 0, }), reset: (obj) => { obj.generation = 0; obj.candidates.length = 0; obj.statistics = {}; obj.diversity = 0; }, evictionStrategy: 'fifo', autoTune: true, })); this.componentPools.set('evolution-engine', evolutionPools); // Pareto Frontier Pools const paretoFrontierPools = new Map<string, ObjectPool<any>>(); paretoFrontierPools.set('solutions', this.gcOptimizer.createObjectPool({ name: 'pareto-solutions', maxSize: this.config.componentPools.paretoFrontier.solutionPoolSize, minSize: 50, factory: () => ({ id: '', objectives: [], dominationRank: 0, crowdingDistance: 0, feasible: true, }), reset: (obj) => { obj.id = ''; obj.objectives.length = 0; obj.dominationRank = 0; obj.crowdingDistance = 0; obj.feasible = true; }, evictionStrategy: 'lru', autoTune: true, })); this.componentPools.set('pareto-frontier', paretoFrontierPools); // Cache System Pools const cacheSystemPools = new Map<string, ObjectPool<any>>(); cacheSystemPools.set('entries', this.gcOptimizer.createObjectPool({ name: 'cache-entries', maxSize: this.config.componentPools.cacheSystem.entryPoolSize, minSize: 200, factory: () => ({ key: '', value: null, timestamp: 0, ttl: 0, hits: 0, size: 0, }), reset: (obj) => { obj.key = ''; obj.value = null; obj.timestamp = 0; obj.ttl = 0; obj.hits = 0; obj.size = 0; }, evictionStrategy: 'lru', ttl: 300000, // 5 minutes autoTune: true, })); this.componentPools.set('cache-system', cacheSystemPools); // LLM Adapter Pools const llmAdapterPools = new Map<string, ObjectPool<any>>(); llmAdapterPools.set('responses', this.gcOptimizer.createObjectPool({ name: 'llm-responses', maxSize: this.config.componentPools.llmAdapter.responsePoolSize, minSize: 10, factory: () => ({ id: '', content: '', metadata: {}, timestamp: 0, tokens: { input: 0, output: 0 }, }), reset: (obj) => { obj.id = ''; obj.content = ''; obj.metadata = {}; obj.timestamp = 0; obj.tokens = { input: 0, output: 0 }; }, evictionStrategy: 'ttl', ttl: 600000, // 10 minutes autoTune: true, })); this.componentPools.set('llm-adapter', llmAdapterPools); } private setupIntegrationHooks(): void { this.hooks = { preOperation: async (component, operation, _data) => { // Track operation start this.performanceTracker.recordMetric({ id: `${component}-${operation}-${Date.now()}`, name: `${component}-${operation}`, category: 'component-operation', timestamp: Date.now(), data: { component, operation, phase: 'start' }, }); // Memory pressure check const memoryUsage = process.memoryUsage().heapUsed / (1024 * 1024); if (memoryUsage > this.config.memoryThresholds.highMemory) { await this.optimizeComponentMemory(component); } }, postOperation: async (component, operation, result) => { // Track operation completion this.performanceTracker.recordMetric({ id: `${component}-${operation}-end-${Date.now()}`, name: `${component}-${operation}`, category: 'component-operation', timestamp: Date.now(), data: { component, operation, phase: 'end', result: !!result }, }); // Update workload detection samples if (this.workloadDetection.enabled) { this.workloadDetection.samples.push({ timestamp: Date.now(), metrics: { component, operation, memoryUsage: process.memoryUsage().heapUsed, duration: 0, // Would calculate from start/end metrics }, }); // Keep only recent samples const cutoff = Date.now() - 60000; // 1 minute this.workloadDetection.samples = this.workloadDetection.samples.filter( sample => sample.timestamp > cutoff ); } }, onAllocation: (component, size, type) => { // Track memory allocation MemoryLeakIntegration.trackCacheOperation('set', `${component}-${type}`, size); }, onDeallocation: (component, size, type) => { // Track memory deallocation MemoryLeakIntegration.trackCacheOperation('delete', `${component}-${type}`, size); }, }; } private registerGEPAComponents(): void { // Register components with memory leak detector const detector = MemoryLeakIntegration.getDetector(); if (detector) { detector.registerComponent('gepa-evolution-engine'); detector.registerComponent('gepa-pareto-frontier'); detector.registerComponent('gepa-cache-system'); detector.registerComponent('gepa-llm-adapter'); } } private startWorkloadMonitoring(): void { if (!this.config.autoDetectWorkload) return; this.workloadMonitoringInterval = setInterval(async () => { const now = Date.now(); if (now - this.workloadDetection.lastDetection > this.workloadDetection.detectionInterval) { await this.detectAndApplyWorkloadStrategy(); this.workloadDetection.lastDetection = now; } }, 10000); // Check every 10 seconds } private setupAdaptiveStrategy(): void { if (!this.config.adaptiveStrategy) return; this.adaptiveStrategyInterval = setInterval(async () => { const characteristics = this.getCurrentWorkloadCharacteristics(); if (characteristics && characteristics.confidence > 0.7) { this.gcOptimizer.setOptimizationStrategy(characteristics.type); this.emit('adaptiveStrategyChange', { workload: characteristics, strategy: characteristics.recommendedStrategy, }); } }, 30000); // Adapt every 30 seconds } private async detectAndApplyWorkloadStrategy(): Promise<void> { const characteristics = this.getCurrentWorkloadCharacteristics(); if (characteristics && characteristics.confidence > 0.6) { this.gcOptimizer.setOptimizationStrategy(characteristics.type); this.emit('workloadDetected', characteristics); } } private getCurrentWorkloadCharacteristics(): WorkloadCharacteristics | null { if (this.workloadDetection.samples.length < 10) { return null; } const samples = this.workloadDetection.samples.slice(-50); // Last 50 samples if (samples.length < 2) return null; // Insufficient data const timeSpan = samples[samples.length - 1]!.timestamp - samples[0]!.timestamp; const operationsPerSecond = (samples.length / timeSpan) * 1000; // Analyze memory growth const memoryUsages = samples.map(s => s.metrics.memoryUsage); const memoryGrowthRate = (memoryUsages[memoryUsages.length - 1] - memoryUsages[0]) / timeSpan * 1000; // Determine workload type based on characteristics let type: WorkloadCharacteristics['type'] = 'mixed'; let confidence = 0.5; if (operationsPerSecond > 100) { type = 'high-throughput'; confidence = 0.8; } else if (memoryGrowthRate > 1024 * 1024) { // 1MB/s growth type = 'memory-intensive'; confidence = 0.7; } else if (operationsPerSecond < 10) { type = 'batch-processing'; confidence = 0.6; } return { type, confidence, metrics: { operationsPerSecond, averageLatency: 0, // Would calculate from operation metrics memoryGrowthRate: memoryGrowthRate / (1024 * 1024), // MB/s allocationPattern: 'steady', // Would analyze allocation patterns }, recommendedStrategy: type, }; } private async optimizeEvolutionEngine(): Promise<void> { // Evolution engine specific optimizations const pools = this.componentPools.get('evolution-engine'); if (pools) { const candidatePool = pools.get('candidates'); if (candidatePool) { // Evict old generations await candidatePool.evictLRU(0.2); } } } private async optimizeParetoFrontier(): Promise<void> { // Pareto frontier specific optimizations const pools = this.componentPools.get('pareto-frontier'); if (pools) { const solutionPool = pools.get('solutions'); if (solutionPool) { // Compact dominated solutions await solutionPool.compact(); } } } private async optimizeCacheSystem(): Promise<void> { // Cache system specific optimizations const pools = this.componentPools.get('cache-system'); if (pools) { const entryPool = pools.get('entries'); if (entryPool) { // Force eviction of expired entries await entryPool.compact(); } } } private async optimizeLLMAdapter(): Promise<void> { // LLM adapter specific optimizations const pools = this.componentPools.get('llm-adapter'); if (pools) { const responsePool = pools.get('responses'); if (responsePool) { // Evict old responses await responsePool.evictLRU(0.3); } } } private calculateOverallPoolEfficiency(): number { let totalHits = 0; let totalAccesses = 0; for (const pools of this.componentPools.values()) { for (const pool of pools.values()) { const stats = pool.getStatistics(); totalHits += stats.hits; totalAccesses += stats.hits + stats.misses; } } return totalAccesses > 0 ? totalHits / totalAccesses : 0; } } /** * Global GEPA GC Integration instance for easy access */ export class GEPAGCManager { private static instance: GEPAGCIntegration | null = null; /** * Initialize the global GEPA GC integration */ static initialize( performanceTracker: PerformanceTracker, config?: Partial<GEPAIntegrationConfig> ): GEPAGCIntegration { if (!this.instance) { this.instance = new GEPAGCIntegration(performanceTracker, config); } return this.instance; } /** * Get the global GEPA GC integration instance */ static getInstance(): GEPAGCIntegration | null { return this.instance; } /** * Shutdown the global instance */ static shutdown(): void { if (this.instance) { this.instance.shutdown(); this.instance = null; } } } /** * Utility functions for GEPA component integration */ export const GEPAGCUtils = { /** * Get optimized object for component */ getObject<T>(component: string, objectType: string): T { const instance = GEPAGCManager.getInstance(); if (!instance) { throw new Error('GEPA GC Integration not initialized'); } return instance.getOptimizedObject<T>(component, objectType); }, /** * Return object to optimization pool */ returnObject<T>(component: string, objectType: string, object: T): void { const instance = GEPAGCManager.getInstance(); if (instance) { instance.returnOptimizedObject(component, objectType, object); } }, /** * Get optimized buffer */ getBuffer(component: string, size: number): Buffer { const instance = GEPAGCManager.getInstance(); if (!instance) { return Buffer.alloc(size); } return instance.getOptimizedBuffer(component, size); }, /** * Return buffer to optimization pool */ returnBuffer(component: string, buffer: Buffer): void { const instance = GEPAGCManager.getInstance(); if (instance) { instance.returnOptimizedBuffer(component, buffer); } }, /** * Execute operation with integrated hooks */ async withOptimization<T>( component: string, operation: string, fn: () => Promise<T>, data?: any ): Promise<T> { const instance = GEPAGCManager.getInstance(); if (!instance) { return fn(); } await instance['hooks'].preOperation(component, operation, data); try { const result = await fn(); await instance['hooks'].postOperation(component, operation, result); return result; } catch (error) { await instance['hooks'].postOperation(component, operation, null); throw error; } }, };

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/sloth-wq/prompt-auto-optimizer-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server