Skip to main content
Glama

Prompt Auto-Optimizer MCP

by sloth-wq
anomaly-detection.ts31.9 kB
/** * GEPA Anomaly Detection System * * Advanced behavioral pattern analysis and statistical anomaly detection * with machine learning capabilities for performance degradation detection. */ import { EventEmitter } from 'events'; import { ErrorEvent, ErrorLevel, ErrorCategory } from './error-tracking-system'; import { PerformanceMetric } from '../../services/performance-tracker'; import { MemoryLeakIntegration } from '../memory-leak-detector'; // Core Anomaly Types export interface AnomalyEvent { id: string; timestamp: number; type: AnomalyType; severity: AnomalySeverity; description: string; affectedSystems: string[]; detectionMethod: string; confidence: number; evidence: Record<string, any>; impactAssessment: ImpactAssessment; recommendedActions: string[]; } export enum AnomalyType { PERFORMANCE_DEGRADATION = 'performance_degradation', ERROR_BURST = 'error_burst', UNUSUAL_PATTERN = 'unusual_pattern', RESOURCE_EXHAUSTION = 'resource_exhaustion', BEHAVIORAL_DRIFT = 'behavioral_drift', CASCADING_FAILURE = 'cascading_failure', MEMORY_LEAK = 'memory_leak', RESPONSE_TIME_SPIKE = 'response_time_spike', THROUGHPUT_DROP = 'throughput_drop', CYCLIC_PATTERN = 'cyclic_pattern' } export enum AnomalySeverity { LOW = 'low', MEDIUM = 'medium', HIGH = 'high', CRITICAL = 'critical' } export interface ImpactAssessment { affectedUsers: number; businessImpact: 'minimal' | 'moderate' | 'significant' | 'severe'; estimatedDowntime: number; dataLossRisk: boolean; securityRisk: boolean; } export interface BehavioralPattern { id: string; name: string; description: string; normalRange: { min: number; max: number }; currentValue: number; historicalMean: number; standardDeviation: number; confidenceInterval: { lower: number; upper: number }; trendDirection: 'increasing' | 'decreasing' | 'stable' | 'volatile'; seasonalComponent: number[]; } export interface StatisticalModel { type: 'zscore' | 'isolation_forest' | 'moving_average' | 'seasonal_decomposition'; parameters: Record<string, any>; trainingData: Array<{ timestamp: number; value: number }>; lastTrained: number; accuracy: number; } export interface AnomalyDetectionConfig { enableRealTimeDetection: boolean; enableBehavioralAnalysis: boolean; enableStatisticalModels: boolean; enablePredictiveDetection: boolean; sensitivityLevel: 'low' | 'medium' | 'high'; minimumConfidenceThreshold: number; anomalyRetentionDays: number; modelRetrainingInterval: number; alertingEnabled: boolean; } /** * Advanced Anomaly Detection Engine */ export class AnomalyDetectionSystem extends EventEmitter { private config: Required<AnomalyDetectionConfig>; private detectedAnomalies: AnomalyEvent[] = []; private behavioralPatterns: Map<string, BehavioralPattern> = new Map(); private statisticalModels: Map<string, StatisticalModel> = new Map(); private baselineMetrics: Map<string, Array<{ timestamp: number; value: number }>> = new Map(); private alertingThresholds: Map<string, number> = new Map(); constructor(config: Partial<AnomalyDetectionConfig> = {}) { super(); this.config = { enableRealTimeDetection: config.enableRealTimeDetection ?? true, enableBehavioralAnalysis: config.enableBehavioralAnalysis ?? true, enableStatisticalModels: config.enableStatisticalModels ?? true, enablePredictiveDetection: config.enablePredictiveDetection ?? true, sensitivityLevel: config.sensitivityLevel ?? 'medium', minimumConfidenceThreshold: config.minimumConfidenceThreshold ?? 0.8, anomalyRetentionDays: config.anomalyRetentionDays ?? 30, modelRetrainingInterval: config.modelRetrainingInterval ?? 86400000, // 24 hours alertingEnabled: config.alertingEnabled ?? true }; this.initializeBaselineModels(); this.initializeMemoryIntegration(); this.startBackgroundProcesses(); } /** * Analyze error events for anomalies */ analyzeErrorEvent(error: ErrorEvent): AnomalyEvent[] { const anomalies: AnomalyEvent[] = []; if (!this.config.enableRealTimeDetection) return anomalies; // Check for error burst anomalies const errorBurstAnomaly = this.detectErrorBurst(error); if (errorBurstAnomaly) anomalies.push(errorBurstAnomaly); // Check for unusual error patterns const patternAnomaly = this.detectUnusualErrorPattern(error); if (patternAnomaly) anomalies.push(patternAnomaly); // Check for cascading failures const cascadingFailure = this.detectCascadingFailure(error); if (cascadingFailure) anomalies.push(cascadingFailure); // Store and emit anomalies anomalies.forEach(anomaly => { this.recordAnomaly(anomaly); }); return anomalies; } /** * Analyze performance metrics for anomalies */ analyzePerformanceMetric(metric: PerformanceMetric): AnomalyEvent[] { const anomalies: AnomalyEvent[] = []; if (!this.config.enableRealTimeDetection) return anomalies; // Extract numeric value from metric const value = this.extractMetricValue(metric); if (value === null) return anomalies; // Update baseline data this.updateBaselineData(metric.category, metric.timestamp, value); // Statistical anomaly detection if (this.config.enableStatisticalModels) { const statAnomaly = this.detectStatisticalAnomaly(metric.category, value, metric.timestamp); if (statAnomaly) anomalies.push(statAnomaly); } // Behavioral pattern analysis if (this.config.enableBehavioralAnalysis) { const behaviorAnomaly = this.detectBehavioralAnomaly(metric.category, value, metric.timestamp); if (behaviorAnomaly) anomalies.push(behaviorAnomaly); } // Performance-specific anomalies const perfAnomaly = this.detectPerformanceAnomaly(metric); if (perfAnomaly) anomalies.push(perfAnomaly); // Store and emit anomalies anomalies.forEach(anomaly => { this.recordAnomaly(anomaly); }); return anomalies; } /** * Analyze system resource utilization */ analyzeResourceUtilization(resources: { cpu: number; memory: number; disk: number; network: number; timestamp: number; }): AnomalyEvent[] { const anomalies: AnomalyEvent[] = []; // Check for resource exhaustion const resourceAnomaly = this.detectResourceExhaustion(resources); if (resourceAnomaly) anomalies.push(resourceAnomaly); // Check for memory leaks const memoryLeakAnomaly = this.detectMemoryLeakAnomaly(resources.memory, resources.timestamp); if (memoryLeakAnomaly) anomalies.push(memoryLeakAnomaly); // Store and emit anomalies anomalies.forEach(anomaly => { this.recordAnomaly(anomaly); }); return anomalies; } /** * Get behavioral patterns analysis */ getBehavioralPatterns(): BehavioralPattern[] { return Array.from(this.behavioralPatterns.values()); } /** * Get anomaly detection metrics */ getAnomalyMetrics(timeWindow?: number): { totalAnomalies: number; anomaliesBySeverity: Record<AnomalySeverity, number>; anomaliesByType: Record<AnomalyType, number>; averageConfidence: number; falsePositiveRate: number; detectionAccuracy: number; } { const window = timeWindow || 86400000; // 24 hours const cutoff = Date.now() - window; const recentAnomalies = this.detectedAnomalies.filter(a => a.timestamp >= cutoff); const totalAnomalies = recentAnomalies.length; // Aggregate by severity const anomaliesBySeverity = {} as Record<AnomalySeverity, number>; for (const severity of Object.values(AnomalySeverity)) { anomaliesBySeverity[severity] = recentAnomalies.filter(a => a.severity === severity).length; } // Aggregate by type const anomaliesByType = {} as Record<AnomalyType, number>; for (const type of Object.values(AnomalyType)) { anomaliesByType[type] = recentAnomalies.filter(a => a.type === type).length; } // Calculate average confidence const averageConfidence = totalAnomalies > 0 ? recentAnomalies.reduce((sum, a) => sum + a.confidence, 0) / totalAnomalies : 0; return { totalAnomalies, anomaliesBySeverity, anomaliesByType, averageConfidence, falsePositiveRate: 0, // Would be calculated with feedback system detectionAccuracy: 0 // Would be calculated with validation data }; } /** * Train or retrain statistical models */ trainModels(category: string, historicalData?: Array<{ timestamp: number; value: number }>): void { const data = historicalData || this.baselineMetrics.get(category) || []; if (data.length < 50) { // eslint-disable-next-line no-console console.warn(`Insufficient data to train model for ${category}: ${data.length} points`); return; } // Train Z-score model this.trainZScoreModel(category, data); // Train moving average model this.trainMovingAverageModel(category, data); // Train seasonal decomposition model this.trainSeasonalModel(category, data); // eslint-disable-next-line no-console console.log(`Trained anomaly detection models for ${category} with ${data.length} data points`); } /** * Predict future anomalies */ predictAnomalies(timeHorizon: number = 3600000): Array<{ predictedTime: number; type: AnomalyType; probability: number; severity: AnomalySeverity; }> { if (!this.config.enablePredictiveDetection) return []; const predictions: Array<{ predictedTime: number; type: AnomalyType; probability: number; severity: AnomalySeverity; }> = []; // Analyze trends for each behavioral pattern for (const [_category, pattern] of this.behavioralPatterns) { const prediction = this.predictPatternAnomaly(pattern, timeHorizon); if (prediction) predictions.push(prediction); } return predictions.sort((a, b) => b.probability - a.probability); } /** * Clear anomaly history */ clearHistory(): void { this.detectedAnomalies = []; this.behavioralPatterns.clear(); this.baselineMetrics.clear(); } // Private methods private detectErrorBurst(error: ErrorEvent): AnomalyEvent | null { // Look for rapid succession of similar errors const timeWindow = 300000; // 5 minutes // const cutoff = Date.now() - timeWindow; // Unused variable // This would need integration with error tracking system // For now, simulate burst detection // const burstThreshold = this.getSensitivityThreshold('error_burst', 10); // Unused variable // Simplified burst detection if (error.level === ErrorLevel.CRITICAL || error.level === ErrorLevel.FATAL) { return { id: `anomaly_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`, timestamp: Date.now(), type: AnomalyType.ERROR_BURST, severity: AnomalySeverity.HIGH, description: `Critical error burst detected: ${error.message}`, affectedSystems: [error.source], detectionMethod: 'error_burst_detection', confidence: 0.9, evidence: { errorLevel: error.level, errorCategory: error.category, timeWindow }, impactAssessment: { affectedUsers: 100, businessImpact: 'significant', estimatedDowntime: 0, dataLossRisk: false, securityRisk: false }, recommendedActions: [ 'Investigate error root cause', 'Scale affected services', 'Enable circuit breakers' ] }; } return null; } private detectUnusualErrorPattern(error: ErrorEvent): AnomalyEvent | null { // Detect unusual error patterns based on historical data // This would analyze error fingerprints, timing, and correlation // For now, detect unusual error categories const unusualCategories = [ErrorCategory.MEMORY, ErrorCategory.AUTHENTICATION]; if (unusualCategories.includes(error.category)) { return { id: `anomaly_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`, timestamp: Date.now(), type: AnomalyType.UNUSUAL_PATTERN, severity: AnomalySeverity.MEDIUM, description: `Unusual error pattern detected in ${error.category} category`, affectedSystems: [error.source], detectionMethod: 'pattern_analysis', confidence: 0.75, evidence: { errorCategory: error.category, errorLevel: error.level }, impactAssessment: { affectedUsers: 50, businessImpact: 'moderate', estimatedDowntime: 0, dataLossRisk: error.category === ErrorCategory.DATABASE, securityRisk: error.category === ErrorCategory.AUTHENTICATION }, recommendedActions: [ 'Review recent changes', 'Check system configuration', 'Monitor affected components' ] }; } return null; } private detectCascadingFailure(error: ErrorEvent): AnomalyEvent | null { // Detect cascading failures by analyzing error correlation across systems // This would require integration with distributed tracing // Simplified detection for critical errors in core systems const coreComponents = ['llm-adapter', 'trajectory-store', 'pareto-frontier']; if (error.level === ErrorLevel.CRITICAL && coreComponents.includes(error.source)) { return { id: `anomaly_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`, timestamp: Date.now(), type: AnomalyType.CASCADING_FAILURE, severity: AnomalySeverity.CRITICAL, description: `Potential cascading failure detected in core component: ${error.source}`, affectedSystems: [error.source, ...coreComponents.filter(c => c !== error.source)], detectionMethod: 'cascading_failure_detection', confidence: 0.85, evidence: { coreComponent: error.source, errorLevel: error.level }, impactAssessment: { affectedUsers: 1000, businessImpact: 'severe', estimatedDowntime: 1800000, // 30 minutes dataLossRisk: true, securityRisk: false }, recommendedActions: [ 'Activate incident response', 'Enable degraded mode', 'Prepare for system restart', 'Notify stakeholders' ] }; } return null; } private detectStatisticalAnomaly(category: string, value: number, timestamp: number): AnomalyEvent | null { const model = this.statisticalModels.get(`${category}_zscore`); if (!model || !model.trainingData.length) return null; // Z-score anomaly detection const mean = model.trainingData.reduce((sum, d) => sum + d.value, 0) / model.trainingData.length; const variance = model.trainingData.reduce((sum, d) => sum + Math.pow(d.value - mean, 2), 0) / model.trainingData.length; const stdDev = Math.sqrt(variance); const zScore = Math.abs((value - mean) / stdDev); const threshold = this.getSensitivityThreshold('statistical', 3); if (zScore > threshold) { const severity = zScore > 4 ? AnomalySeverity.CRITICAL : zScore > 3.5 ? AnomalySeverity.HIGH : AnomalySeverity.MEDIUM; return { id: `anomaly_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`, timestamp, type: AnomalyType.PERFORMANCE_DEGRADATION, severity, description: `Statistical anomaly detected in ${category}: value ${value} (Z-score: ${zScore.toFixed(2)})`, affectedSystems: [category], detectionMethod: 'z_score_analysis', confidence: Math.min(0.95, zScore / 5), evidence: { value, zScore, mean, stdDev, threshold }, impactAssessment: { affectedUsers: severity === AnomalySeverity.CRITICAL ? 500 : 100, businessImpact: severity === AnomalySeverity.CRITICAL ? 'significant' : 'moderate', estimatedDowntime: 0, dataLossRisk: false, securityRisk: false }, recommendedActions: [ 'Investigate performance metrics', 'Check resource utilization', 'Review recent deployments' ] }; } return null; } private detectBehavioralAnomaly(category: string, value: number, timestamp: number): AnomalyEvent | null { const pattern = this.behavioralPatterns.get(category); if (!pattern) { // Create new behavioral pattern this.createBehavioralPattern(category, value, timestamp); return null; } // Update pattern this.updateBehavioralPattern(pattern, value, timestamp); // Check if value is outside normal range if (value < pattern.normalRange.min || value > pattern.normalRange.max) { const deviation = Math.max( pattern.normalRange.min - value, value - pattern.normalRange.max ); const confidence = Math.min(0.95, deviation / (pattern.standardDeviation * 2)); if (confidence > this.config.minimumConfidenceThreshold) { return { id: `anomaly_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`, timestamp, type: AnomalyType.BEHAVIORAL_DRIFT, severity: deviation > pattern.standardDeviation * 3 ? AnomalySeverity.HIGH : AnomalySeverity.MEDIUM, description: `Behavioral anomaly detected in ${category}: value ${value} outside normal range [${pattern.normalRange.min.toFixed(2)}, ${pattern.normalRange.max.toFixed(2)}]`, affectedSystems: [category], detectionMethod: 'behavioral_analysis', confidence, evidence: { value, normalRange: pattern.normalRange, deviation, trendDirection: pattern.trendDirection }, impactAssessment: { affectedUsers: 50, businessImpact: 'moderate', estimatedDowntime: 0, dataLossRisk: false, securityRisk: false }, recommendedActions: [ 'Analyze behavior change', 'Check system configuration', 'Review operational changes' ] }; } } return null; } private detectPerformanceAnomaly(metric: PerformanceMetric): AnomalyEvent | null { if (!metric.duration) return null; // Detect response time spikes const responseTimeThreshold = this.getSensitivityThreshold('response_time', 5000); // 5 seconds if (metric.duration > responseTimeThreshold) { return { id: `anomaly_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`, timestamp: metric.timestamp, type: AnomalyType.RESPONSE_TIME_SPIKE, severity: metric.duration > responseTimeThreshold * 2 ? AnomalySeverity.HIGH : AnomalySeverity.MEDIUM, description: `Response time spike detected: ${metric.duration}ms in ${metric.category}`, affectedSystems: [metric.category], detectionMethod: 'response_time_analysis', confidence: 0.9, evidence: { duration: metric.duration, threshold: responseTimeThreshold, category: metric.category }, impactAssessment: { affectedUsers: 200, businessImpact: 'moderate', estimatedDowntime: 0, dataLossRisk: false, securityRisk: false }, recommendedActions: [ 'Check system load', 'Investigate bottlenecks', 'Scale resources if needed' ] }; } return null; } private detectResourceExhaustion(resources: { cpu: number; memory: number; disk: number; network: number; timestamp: number; }): AnomalyEvent | null { const cpuThreshold = 90; const memoryThreshold = 85; const diskThreshold = 95; if (resources.cpu > cpuThreshold || resources.memory > memoryThreshold || resources.disk > diskThreshold) { const exhaustedResources = []; if (resources.cpu > cpuThreshold) exhaustedResources.push('CPU'); if (resources.memory > memoryThreshold) exhaustedResources.push('Memory'); if (resources.disk > diskThreshold) exhaustedResources.push('Disk'); return { id: `anomaly_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`, timestamp: resources.timestamp, type: AnomalyType.RESOURCE_EXHAUSTION, severity: AnomalySeverity.CRITICAL, description: `Resource exhaustion detected: ${exhaustedResources.join(', ')}`, affectedSystems: ['system'], detectionMethod: 'resource_monitoring', confidence: 0.95, evidence: { cpu: resources.cpu, memory: resources.memory, disk: resources.disk, exhaustedResources }, impactAssessment: { affectedUsers: 1000, businessImpact: 'severe', estimatedDowntime: 900000, // 15 minutes dataLossRisk: resources.disk > diskThreshold, securityRisk: false }, recommendedActions: [ 'Scale system resources', 'Kill non-essential processes', 'Clear temporary files', 'Investigate resource leaks' ] }; } return null; } private detectMemoryLeakAnomaly(memoryUsage: number, timestamp: number): AnomalyEvent | null { // Track memory usage over time const memoryData = this.baselineMetrics.get('memory') || []; memoryData.push({ timestamp, value: memoryUsage }); if (memoryData.length > 100) { memoryData.splice(0, memoryData.length - 100); } this.baselineMetrics.set('memory', memoryData); // Detect continuous memory growth if (memoryData.length >= 20) { const recent = memoryData.slice(-20); const firstHalf = recent.slice(0, 10); const secondHalf = recent.slice(10); const firstAvg = firstHalf.reduce((sum, d) => sum + d.value, 0) / firstHalf.length; const secondAvg = secondHalf.reduce((sum, d) => sum + d.value, 0) / secondHalf.length; const growthRate = (secondAvg - firstAvg) / firstAvg; if (growthRate > 0.1) { // 10% growth return { id: `anomaly_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`, timestamp, type: AnomalyType.MEMORY_LEAK, severity: growthRate > 0.2 ? AnomalySeverity.CRITICAL : AnomalySeverity.HIGH, description: `Memory leak detected: ${(growthRate * 100).toFixed(1)}% growth rate`, affectedSystems: ['memory'], detectionMethod: 'memory_trend_analysis', confidence: Math.min(0.95, growthRate * 5), evidence: { memoryUsage, growthRate, firstAvg, secondAvg }, impactAssessment: { affectedUsers: 500, businessImpact: 'significant', estimatedDowntime: 1800000, // 30 minutes dataLossRisk: true, securityRisk: false }, recommendedActions: [ 'Investigate memory allocation patterns', 'Check for unclosed resources', 'Consider service restart', 'Enable memory profiling' ] }; } } return null; } private recordAnomaly(anomaly: AnomalyEvent): void { this.detectedAnomalies.push(anomaly); // Maintain retention policy const retentionCutoff = Date.now() - (this.config.anomalyRetentionDays * 86400000); this.detectedAnomalies = this.detectedAnomalies.filter(a => a.timestamp > retentionCutoff); // Emit event this.emit('anomaly-detected', anomaly); // Send alerts for high severity anomalies if (this.config.alertingEnabled && (anomaly.severity === AnomalySeverity.HIGH || anomaly.severity === AnomalySeverity.CRITICAL)) { this.emit('anomaly-alert', anomaly); } } private extractMetricValue(metric: PerformanceMetric): number | null { if (metric.duration !== undefined) return metric.duration; if (typeof metric.data.value === 'number') return metric.data.value; if (typeof metric.data.cpu === 'number') return metric.data.cpu; if (typeof metric.data.memory === 'number') return metric.data.memory; return null; } private updateBaselineData(category: string, timestamp: number, value: number): void { let data = this.baselineMetrics.get(category) || []; data.push({ timestamp, value }); // Keep only recent data (last 24 hours) const cutoff = timestamp - 86400000; data = data.filter(d => d.timestamp > cutoff); this.baselineMetrics.set(category, data); } private createBehavioralPattern(category: string, value: number, _timestamp: number): void { const pattern: BehavioralPattern = { id: `pattern_${category}_${Date.now()}`, name: `${category}_behavior`, description: `Behavioral pattern for ${category} metrics`, normalRange: { min: value * 0.8, max: value * 1.2 }, currentValue: value, historicalMean: value, standardDeviation: 0, confidenceInterval: { lower: value, upper: value }, trendDirection: 'stable', seasonalComponent: new Array(24).fill(1) // Hourly seasonal factors }; this.behavioralPatterns.set(category, pattern); } private updateBehavioralPattern(pattern: BehavioralPattern, value: number, _timestamp: number): void { // Update current value pattern.currentValue = value; // Update historical mean (exponential moving average) const alpha = 0.1; pattern.historicalMean = alpha * value + (1 - alpha) * pattern.historicalMean; // Update standard deviation (simplified) const deviation = Math.abs(value - pattern.historicalMean); pattern.standardDeviation = alpha * deviation + (1 - alpha) * pattern.standardDeviation; // Update normal range (mean ± 2 standard deviations) pattern.normalRange = { min: pattern.historicalMean - 2 * pattern.standardDeviation, max: pattern.historicalMean + 2 * pattern.standardDeviation }; // Update confidence interval pattern.confidenceInterval = { lower: pattern.historicalMean - 1.96 * pattern.standardDeviation, upper: pattern.historicalMean + 1.96 * pattern.standardDeviation }; } private predictPatternAnomaly(pattern: BehavioralPattern, timeHorizon: number): { predictedTime: number; type: AnomalyType; probability: number; severity: AnomalySeverity; } | null { // Simple trend-based prediction if (pattern.trendDirection === 'stable') return null; const currentTime = Date.now(); const projectionTime = currentTime + timeHorizon; // Estimate probability based on current trend let probability = 0; let severity = AnomalySeverity.LOW; if (pattern.trendDirection === 'increasing') { // Check if trend will push value outside normal range const projectedValue = pattern.currentValue * 1.2; // 20% increase assumption if (projectedValue > pattern.normalRange.max) { probability = 0.7; severity = AnomalySeverity.MEDIUM; } } if (probability > 0.5) { return { predictedTime: projectionTime, type: AnomalyType.BEHAVIORAL_DRIFT, probability, severity }; } return null; } private trainZScoreModel(category: string, data: Array<{ timestamp: number; value: number }>): void { const values = data.map(d => d.value); const mean = values.reduce((sum, v) => sum + v, 0) / values.length; const variance = values.reduce((sum, v) => sum + Math.pow(v - mean, 2), 0) / values.length; const model: StatisticalModel = { type: 'zscore', parameters: { mean, variance, stdDev: Math.sqrt(variance) }, trainingData: data, lastTrained: Date.now(), accuracy: 0.85 // Would be calculated with validation data }; this.statisticalModels.set(`${category}_zscore`, model); } private trainMovingAverageModel(category: string, data: Array<{ timestamp: number; value: number }>): void { const windowSize = Math.min(20, Math.floor(data.length / 5)); const model: StatisticalModel = { type: 'moving_average', parameters: { windowSize }, trainingData: data, lastTrained: Date.now(), accuracy: 0.75 }; this.statisticalModels.set(`${category}_ma`, model); } private trainSeasonalModel(category: string, data: Array<{ timestamp: number; value: number }>): void { // Simplified seasonal decomposition const hourlyData: Record<number, number[]> = {}; for (const point of data) { const hour = new Date(point.timestamp).getHours(); if (!hourlyData[hour]) hourlyData[hour] = []; hourlyData[hour].push(point.value); } const seasonalFactors: Record<number, number> = {}; const globalMean = data.reduce((sum, d) => sum + d.value, 0) / data.length; for (let hour = 0; hour < 24; hour++) { const hourData = hourlyData[hour]; if (hourData && hourData.length > 0) { const hourlyMean = hourData.reduce((sum, v) => sum + v, 0) / hourData.length; seasonalFactors[hour] = hourlyMean / globalMean; } else { seasonalFactors[hour] = 1; } } const model: StatisticalModel = { type: 'seasonal_decomposition', parameters: { seasonalFactors, globalMean }, trainingData: data, lastTrained: Date.now(), accuracy: 0.8 }; this.statisticalModels.set(`${category}_seasonal`, model); } private getSensitivityThreshold(_type: string, defaultValue: number): number { const multipliers = { low: 1.5, medium: 1.0, high: 0.7 }; return defaultValue * multipliers[this.config.sensitivityLevel]; } private initializeBaselineModels(): void { // Initialize common thresholds this.alertingThresholds.set('error_burst', 10); this.alertingThresholds.set('response_time', 5000); this.alertingThresholds.set('cpu_usage', 90); this.alertingThresholds.set('memory_usage', 85); } private estimateMemoryUsage(): number { // Estimate memory based on anomalies and baseline data let totalSize = 0; // Baseline metrics size for (const metrics of this.baselineMetrics.values()) { totalSize += metrics.length * 128; // Approximate size per metric } // Detected anomalies size totalSize += this.detectedAnomalies.length * 512; // Approximate size per anomaly // Statistical models size (estimated) totalSize += this.statisticalModels.size * 2048; // Approximate size per model return totalSize; } private initializeMemoryIntegration(): void { MemoryLeakIntegration.initialize(); setInterval(() => { const memoryUsage = this.estimateMemoryUsage(); MemoryLeakIntegration.trackAnomalyDetection('monitor', memoryUsage); }, 60000); } private startBackgroundProcesses(): void { // Model retraining setInterval(() => { for (const category of this.baselineMetrics.keys()) { this.trainModels(category); } }, this.config.modelRetrainingInterval); // Cleanup old data setInterval(() => { const cutoff = Date.now() - (this.config.anomalyRetentionDays * 86400000); this.detectedAnomalies = this.detectedAnomalies.filter(a => a.timestamp > cutoff); }, 3600000); // Hourly cleanup } }

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/sloth-wq/prompt-auto-optimizer-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server