Skip to main content
Glama

Prompt Auto-Optimizer MCP

by sloth-wq
evolution-optimizations.ts7.92 kB
/** * OPTIMIZATION: Helper classes and methods for Evolution Engine performance improvements * These are separated for clarity and can be integrated into the main EvolutionEngine class */ import type { PromptCandidate, ExecutionTrajectory, TaskContext } from '../types/gepa'; /** * OPTIMIZATION: Candidate pool for efficient candidate management */ export class CandidatePool { private candidates: PromptCandidate[] = []; private readonly maxSize: number; private readonly seenIds = new Set<string>(); constructor(maxSize: number) { this.maxSize = maxSize; } addCandidates(newCandidates: PromptCandidate[]): void { for (const candidate of newCandidates) { if (this.candidates.length >= this.maxSize) break; // Prevent duplicates if (!this.seenIds.has(candidate.id)) { this.candidates.push(candidate); this.seenIds.add(candidate.id); } } } getCandidates(): PromptCandidate[] { return this.candidates; } size(): number { return this.candidates.length; } clear(): void { this.candidates = []; this.seenIds.clear(); } } /** * OPTIMIZATION: Trajectory batch loader for parallel loading */ export class TrajectoryBatchLoader { constructor(private trajectoryStore: any) {} async loadInBatches(promptIds: string[], limit: number = 10): Promise<ExecutionTrajectory[]> { const batchSize = 5; const allTrajectories: ExecutionTrajectory[] = []; for (let i = 0; i < promptIds.length; i += batchSize) { const batch = promptIds.slice(i, i + batchSize); const batchResults = await Promise.allSettled( batch.map(id => this.trajectoryStore.query({ promptId: id, limit })) ); batchResults.forEach(result => { if (result.status === 'fulfilled') { allTrajectories.push(...result.value); } }); } return allTrajectories; } } /** * OPTIMIZATION: Evaluation batch processor with adaptive sizing */ export class EvaluationBatchProcessor { private maxConcurrent: number; constructor(maxConcurrent: number = 5) { this.maxConcurrent = maxConcurrent; } async evaluateInBatches<T>( items: T[], evaluator: (item: T) => Promise<any>, options: { priorityScore?: (item: T) => number; adaptiveBatching?: boolean; } = {} ): Promise<void> { // Sort by priority if scorer provided const sortedItems = options.priorityScore ? [...items].sort((a, b) => options.priorityScore!(b) - options.priorityScore!(a)) : items; // Adaptive batch sizing const optimalBatchSize = options.adaptiveBatching ? Math.min(this.maxConcurrent, Math.max(2, Math.floor(items.length / 4))) : this.maxConcurrent; for (let i = 0; i < sortedItems.length; i += optimalBatchSize) { const batch = sortedItems.slice(i, i + optimalBatchSize); const results = await Promise.allSettled( batch.map(item => evaluator(item)) ); // Log failures without stopping results.forEach((result, index) => { if (result.status === 'rejected') { // eslint-disable-next-line no-console console.warn(`Batch evaluation failed for item ${i + index}: ${result.reason}`); } }); } } } /** * OPTIMIZATION: Parallel mutation generator */ export class ParallelMutationGenerator { constructor(private promptMutator: any) {} async generateReflectiveMutations( candidates: PromptCandidate[], trajectoryMap: Map<string, ExecutionTrajectory[]>, maxResults: number ): Promise<PromptCandidate[]> { const results: PromptCandidate[] = []; const mutationPromises = candidates.map(async candidate => { try { const candidateTrajectories = trajectoryMap.get(candidate.id) || []; return await this.promptMutator.generateReflectiveMutations(candidate, candidateTrajectories); } catch (error) { // eslint-disable-next-line no-console console.warn(`Failed to generate reflective mutations for ${candidate.id}: ${error}`); return []; } }); const mutationResults = await Promise.allSettled(mutationPromises); mutationResults.forEach(result => { if (result.status === 'fulfilled') { const perCandidateLimit = Math.max(1, Math.floor(maxResults / candidates.length)); results.push(...result.value.slice(0, perCandidateLimit)); } }); return results.slice(0, maxResults); } async generateCrossoverMutations( candidates: PromptCandidate[], maxResults: number ): Promise<PromptCandidate[]> { try { const crossoverCandidates = await this.promptMutator.generateCrossoverMutations(candidates); return crossoverCandidates.slice(0, maxResults); } catch (error) { // eslint-disable-next-line no-console console.warn(`Failed to generate crossover mutations: ${error}`); return []; } } async generateAdaptiveMutations( candidates: PromptCandidate[], taskContext: TaskContext, maxResults: number ): Promise<PromptCandidate[]> { const results: PromptCandidate[] = []; const mutationPromises = candidates.map(async candidate => { try { return await this.promptMutator.generateAdaptiveMutations(candidate, taskContext); } catch (error) { // eslint-disable-next-line no-console console.warn(`Failed to generate adaptive mutations for ${candidate.id}: ${error}`); return []; } }); const mutationResults = await Promise.allSettled(mutationPromises); mutationResults.forEach(result => { if (result.status === 'fulfilled') { const perCandidateLimit = Math.max(1, Math.floor(maxResults / candidates.length)); results.push(...result.value.slice(0, perCandidateLimit)); } }); return results.slice(0, maxResults); } } /** * OPTIMIZATION: Convergence detector with early stopping */ export class ConvergenceDetector { private improvementHistory: number[] = []; private readonly windowSize: number; private readonly stagnationThreshold: number; constructor(windowSize: number = 5, stagnationThreshold: number = 3) { this.windowSize = windowSize; this.stagnationThreshold = stagnationThreshold; } addScore(score: number): void { this.improvementHistory.push(score); if (this.improvementHistory.length > this.windowSize) { this.improvementHistory.shift(); } } isConverged(): boolean { if (this.improvementHistory.length < this.windowSize) return false; // Check if improvement rate is below threshold const recentScores = this.improvementHistory.slice(-this.stagnationThreshold); const improvements = []; for (let i = 1; i < recentScores.length; i++) { const currentScore = recentScores[i]; const previousScore = recentScores[i - 1]; // Add null checks to prevent undefined access if (currentScore !== undefined && previousScore !== undefined) { improvements.push(currentScore - previousScore); } } // Avoid division by zero and handle empty improvements array if (improvements.length === 0) return false; const avgImprovement = improvements.reduce((a, b) => a + b, 0) / improvements.length; return avgImprovement < 0.01; // Less than 1% improvement } getImprovementRate(): number { if (this.improvementHistory.length < 2) return 0; const recent = this.improvementHistory.slice(-this.windowSize); const firstScore = recent[0]; const lastScore = recent[recent.length - 1]; // Add null checks to prevent undefined access if (firstScore === undefined || lastScore === undefined) { return 0; } return (lastScore - firstScore) / recent.length; } reset(): void { this.improvementHistory = []; } }

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/sloth-wq/prompt-auto-optimizer-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server