import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
import { EventEmitter } from 'events';
import {
PerformanceMonitor,
getPerformanceMonitor,
PerformanceMonitorConfig,
ToolExecutionMetrics,
ResourceUsageMetrics,
QueueMetrics,
CacheMetrics,
OCRMetrics,
SystemHealthStatus
} from '../../src/core/performance-monitor.js';
import * as os from 'os';
// Mock the os module
vi.mock('os', () => ({
cpus: vi.fn(() => Array(4).fill({ model: 'Intel', speed: 2400 })),
freemem: vi.fn(() => 8 * 1024 * 1024 * 1024), // 8GB free
totalmem: vi.fn(() => 16 * 1024 * 1024 * 1024), // 16GB total
loadavg: vi.fn(() => [1.5, 1.2, 1.0])
}));
// Mock the logger
vi.mock('../../src/logger.js', () => ({
logger: {
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
debug: vi.fn()
}
}));
// Mock performance utils
vi.mock('../../src/utils/performance-utils.js', () => ({
MovingAverage: class {
private values: number[] = [];
add(value: number) { this.values.push(value); }
getAverage() {
if (this.values.length === 0) return 0;
return this.values.reduce((a, b) => a + b, 0) / this.values.length;
}
clear() { this.values = []; }
},
ExponentialMovingAverage: class {
private value = 0;
add(value: number) { this.value = value; }
getValue() { return this.value; }
},
ThresholdMonitor: class {
constructor(public name: string, public config: any) {}
check(value: number) {}
setCallbacks(callbacks: any) {}
getViolations(since?: number) { return []; }
clearViolations() {}
updateConfig(config: any) {}
},
ThresholdViolation: class {},
PerformanceReportGenerator: class {
generateReport(metrics: Map<string, number[]>, violations: any[]) {
return {
timestamp: Date.now(),
duration: 0,
metrics: {},
alerts: [],
summary: {
overallHealth: 'healthy' as const,
improvingMetrics: [],
degradingMetrics: [],
criticalAlerts: 0,
warningAlerts: 0
},
recommendations: []
};
}
clearBaselines() {}
},
PerformanceUtils: {
detectAnomalies: vi.fn((values: number[]) => [])
},
calculatePercentiles: vi.fn((values: number[], percentiles: number[]) => ({ p95: 100 }))
}));
describe('PerformanceMonitor', () => {
let monitor: PerformanceMonitor;
beforeEach(() => {
// Reset singleton instance
(PerformanceMonitor as any).instance = null;
monitor = getPerformanceMonitor();
});
afterEach(() => {
monitor.cleanup();
vi.clearAllMocks();
});
describe('Initialization', () => {
it('should create a singleton instance', () => {
const monitor1 = getPerformanceMonitor();
const monitor2 = getPerformanceMonitor();
expect(monitor1).toBe(monitor2);
});
it('should initialize with default configuration', () => {
expect(monitor).toBeInstanceOf(PerformanceMonitor);
expect(monitor).toBeInstanceOf(EventEmitter);
});
it('should accept custom configuration', () => {
(PerformanceMonitor as any).instance = null;
const customConfig: Partial<PerformanceMonitorConfig> = {
resourceMonitoringIntervalMs: 10000,
thresholds: {
cpuUsageWarning: 80,
cpuUsageCritical: 95,
memoryUsageWarning: 85,
memoryUsageCritical: 95,
executionTimeWarning: 3000,
executionTimeCritical: 10000,
errorRateWarning: 0.1,
errorRateCritical: 0.25,
queueLengthWarning: 15,
queueLengthCritical: 30
}
};
const customMonitor = getPerformanceMonitor(customConfig);
expect(customMonitor).toBeInstanceOf(PerformanceMonitor);
});
});
describe('Start/Stop Operations', () => {
it('should start monitoring', () => {
const startedHandler = vi.fn();
monitor.on('started', startedHandler);
monitor.start();
expect(startedHandler).toHaveBeenCalled();
});
it('should not start if already running', () => {
monitor.start();
const startedHandler = vi.fn();
monitor.on('started', startedHandler);
monitor.start();
expect(startedHandler).not.toHaveBeenCalled();
});
it('should stop monitoring', () => {
monitor.start();
const stoppedHandler = vi.fn();
monitor.on('stopped', stoppedHandler);
monitor.stop();
expect(stoppedHandler).toHaveBeenCalled();
});
it('should handle stop when not running', () => {
expect(() => monitor.stop()).not.toThrow();
});
it('should set up intervals on start', () => {
const setIntervalSpy = vi.spyOn(global, 'setInterval');
monitor.start();
// Should set up 3 intervals: resource monitoring, metrics cleanup, report generation
expect(setIntervalSpy).toHaveBeenCalledTimes(3);
monitor.stop();
});
it('should clear intervals on stop', () => {
const clearIntervalSpy = vi.spyOn(global, 'clearInterval');
monitor.start();
monitor.stop();
expect(clearIntervalSpy).toHaveBeenCalledTimes(3);
});
});
describe('Tool Execution Metrics', () => {
it('should record tool execution metrics', () => {
monitor.recordToolExecution('screenshot', 150, true);
const dashboard = monitor.getDashboardData();
const toolMetrics = dashboard.toolMetrics.find(m => m.toolName === 'screenshot');
expect(toolMetrics).toBeDefined();
expect(toolMetrics?.successCount).toBe(1);
expect(toolMetrics?.errorCount).toBe(0);
expect(toolMetrics?.executionTimes).toContain(150);
});
it('should track successful and failed executions', () => {
monitor.recordToolExecution('ocr', 100, true);
monitor.recordToolExecution('ocr', 200, false);
monitor.recordToolExecution('ocr', 150, true);
const dashboard = monitor.getDashboardData();
const ocrMetrics = dashboard.toolMetrics.find(m => m.toolName === 'ocr');
expect(ocrMetrics?.successCount).toBe(2);
expect(ocrMetrics?.errorCount).toBe(1);
expect(ocrMetrics?.executionTimes).toHaveLength(3);
});
it('should calculate derived metrics', () => {
for (let i = 0; i < 10; i++) {
monitor.recordToolExecution('automation', 100 + i * 10, true);
}
const dashboard = monitor.getDashboardData();
const metrics = dashboard.toolMetrics.find(m => m.toolName === 'automation');
expect(metrics?.averageExecutionTime).toBeGreaterThan(0);
expect(metrics?.p95ExecutionTime).toBe(100); // Mocked value
expect(metrics?.throughput).toBeGreaterThan(0);
});
it('should limit execution history size', () => {
// Record more than max history size (1000)
for (let i = 0; i < 1100; i++) {
monitor.recordToolExecution('bulk-test', 50, true);
}
const dashboard = monitor.getDashboardData();
const metrics = dashboard.toolMetrics.find(m => m.toolName === 'bulk-test');
expect(metrics?.executionTimes.length).toBeLessThanOrEqual(1000);
});
it('should emit threshold violations for slow executions', () => {
const violationHandler = vi.fn();
monitor.on('thresholdViolation', violationHandler);
// Record execution time above critical threshold (default 5000ms)
monitor.recordToolExecution('slow-tool', 6000, true);
// Threshold monitors are mocked, so we won't see actual violations
// In real implementation, this would trigger a violation
});
});
describe('Resource Usage Metrics', () => {
it('should collect resource metrics when monitoring is active', () => {
vi.useFakeTimers();
monitor.start();
// Fast-forward to trigger resource collection
vi.advanceTimersByTime(5000);
const dashboard = monitor.getDashboardData();
expect(dashboard.resourceMetrics).toBeTruthy();
monitor.stop();
vi.useRealTimers();
});
it('should calculate CPU and memory usage percentages', () => {
monitor.start();
// Manually trigger resource collection
(monitor as any).collectResourceMetrics();
const dashboard = monitor.getDashboardData();
const resource = dashboard.resourceMetrics;
expect(resource?.cpuUsagePercent).toBeDefined();
expect(resource?.memoryUsagePercent).toBeDefined();
expect(resource?.memoryUsagePercent).toBe(50); // 8GB free of 16GB total
monitor.stop();
});
});
describe('Queue Metrics', () => {
it('should record queue metrics', () => {
monitor.recordQueueMetrics('task-queue', 5, 100, 50);
const dashboard = monitor.getDashboardData();
const queueMetrics = dashboard.queueMetrics.find(q => q.name === 'task-queue');
expect(queueMetrics).toBeDefined();
expect(queueMetrics?.length).toBe(5);
expect(queueMetrics?.processingTime).toBe(100);
expect(queueMetrics?.waitTime).toBe(50);
expect(queueMetrics?.throughput).toBe(10); // 1000/100
});
it('should handle zero processing time', () => {
monitor.recordQueueMetrics('empty-queue', 0, 0);
const dashboard = monitor.getDashboardData();
const queueMetrics = dashboard.queueMetrics.find(q => q.name === 'empty-queue');
expect(queueMetrics?.throughput).toBe(0);
});
});
describe('Cache Metrics', () => {
it('should record cache metrics', () => {
monitor.recordCacheMetrics('image-cache', 80, 20, 5, 1024);
const dashboard = monitor.getDashboardData();
const cacheMetrics = dashboard.cacheMetrics.find(c => c.name === 'image-cache');
expect(cacheMetrics).toBeDefined();
expect(cacheMetrics?.hitCount).toBe(80);
expect(cacheMetrics?.missCount).toBe(20);
expect(cacheMetrics?.hitRate).toBe(0.8);
expect(cacheMetrics?.evictionCount).toBe(5);
expect(cacheMetrics?.size).toBe(1024);
});
it('should handle zero total requests', () => {
monitor.recordCacheMetrics('new-cache', 0, 0);
const dashboard = monitor.getDashboardData();
const cacheMetrics = dashboard.cacheMetrics.find(c => c.name === 'new-cache');
expect(cacheMetrics?.hitRate).toBe(0);
});
});
describe('OCR Worker Metrics', () => {
it('should record OCR worker metrics', () => {
monitor.recordOCRWorkerMetrics('worker-1', 50, 200, 2, true);
monitor.recordOCRWorkerMetrics('worker-2', 45, 250, 5, false);
const dashboard = monitor.getDashboardData();
expect(dashboard.ocrMetrics).toHaveLength(2);
const worker1 = dashboard.ocrMetrics.find(w => w.workerId === 'worker-1');
expect(worker1?.isHealthy).toBe(true);
expect(worker1?.tasksCompleted).toBe(50);
const worker2 = dashboard.ocrMetrics.find(w => w.workerId === 'worker-2');
expect(worker2?.isHealthy).toBe(false);
expect(worker2?.errorCount).toBe(5);
});
});
describe('System Health Assessment', () => {
it('should provide overall system health status', () => {
const health = monitor.getSystemHealth();
expect(health.overall).toMatch(/healthy|warning|critical/);
expect(health.components).toHaveProperty('tools');
expect(health.components).toHaveProperty('resources');
expect(health.components).toHaveProperty('cache');
expect(health.components).toHaveProperty('queues');
expect(health.components).toHaveProperty('ocr');
expect(health.uptime).toBeGreaterThanOrEqual(0);
expect(health.lastHealthCheck).toBeGreaterThan(0);
});
it('should assess tool health based on error rates', () => {
// High error rate tool
for (let i = 0; i < 10; i++) {
monitor.recordToolExecution('bad-tool', 100, i < 3); // 70% error rate
}
const health = monitor.getSystemHealth();
// Component health assessment depends on thresholds
expect(health.components.tools).toBeDefined();
});
it('should assess queue health based on queue lengths', () => {
monitor.recordQueueMetrics('backed-up-queue', 100, 1000); // Critical queue length
const health = monitor.getSystemHealth();
// In real implementation, this would affect queue health
expect(health.components.queues).toBeDefined();
});
it('should assess cache health based on hit rates', () => {
monitor.recordCacheMetrics('poor-cache', 10, 90); // 10% hit rate
monitor.recordCacheMetrics('good-cache', 90, 10); // 90% hit rate
const health = monitor.getSystemHealth();
expect(health.components.cache).toBeDefined();
});
it('should assess OCR health based on worker status', () => {
monitor.recordOCRWorkerMetrics('worker-1', 100, 150, 0, false);
monitor.recordOCRWorkerMetrics('worker-2', 100, 150, 0, false);
const health = monitor.getSystemHealth();
// All workers unhealthy should result in critical status
expect(health.components.ocr).toBeDefined();
});
});
describe('Performance Trends', () => {
it('should calculate performance trends for tool metrics', () => {
// Record some tool executions
for (let i = 0; i < 20; i++) {
monitor.recordToolExecution('trending-tool', 100 + i * 5, true);
}
const trends = monitor.getPerformanceTrends('tool_trending-tool');
expect(trends.timestamps).toHaveLength(20);
expect(trends.values).toHaveLength(20);
expect(trends.trend).toMatch(/improving|degrading|stable/);
expect(trends.anomalies).toBeInstanceOf(Array);
});
it('should calculate trends for resource metrics', () => {
monitor.start();
// Manually add some resource history
for (let i = 0; i < 5; i++) {
(monitor as any).collectResourceMetrics();
}
const cpuTrends = monitor.getPerformanceTrends('cpu_usage');
const memoryTrends = monitor.getPerformanceTrends('memory_usage');
expect(cpuTrends.values.length).toBeGreaterThan(0);
expect(memoryTrends.values.length).toBeGreaterThan(0);
monitor.stop();
});
it('should handle empty data gracefully', () => {
const trends = monitor.getPerformanceTrends('nonexistent_metric');
expect(trends.timestamps).toHaveLength(0);
expect(trends.values).toHaveLength(0);
expect(trends.trend).toBe('stable');
expect(trends.anomalies).toHaveLength(0);
});
});
describe('Dashboard Data', () => {
it('should provide comprehensive dashboard data', () => {
// Set up some metrics
monitor.recordToolExecution('tool1', 100, true);
monitor.recordQueueMetrics('queue1', 5, 50);
monitor.recordCacheMetrics('cache1', 100, 50);
monitor.recordOCRWorkerMetrics('worker1', 10, 100, 0, true);
const dashboard = monitor.getDashboardData();
expect(dashboard.systemHealth).toBeDefined();
expect(dashboard.toolMetrics).toBeInstanceOf(Array);
expect(dashboard.queueMetrics).toBeInstanceOf(Array);
expect(dashboard.cacheMetrics).toBeInstanceOf(Array);
expect(dashboard.ocrMetrics).toBeInstanceOf(Array);
expect(dashboard.recommendations).toBeInstanceOf(Array);
});
it('should generate recommendations based on metrics', () => {
// Create conditions that should trigger recommendations
monitor.recordToolExecution('slow-tool', 6000, true); // Slow execution
for (let i = 0; i < 10; i++) {
monitor.recordToolExecution('error-tool', 100, i < 3); // High error rate
}
monitor.recordQueueMetrics('backed-queue', 100, 100); // Long queue
monitor.recordCacheMetrics('poor-cache', 20, 80); // Low hit rate
const dashboard = monitor.getDashboardData();
expect(dashboard.recommendations.length).toBeGreaterThan(0);
});
});
describe('Configuration Updates', () => {
it('should update configuration dynamically', () => {
const newConfig: Partial<PerformanceMonitorConfig> = {
resourceMonitoringIntervalMs: 10000,
thresholds: {
cpuUsageWarning: 85,
cpuUsageCritical: 98,
memoryUsageWarning: 90,
memoryUsageCritical: 98,
executionTimeWarning: 4000,
executionTimeCritical: 8000,
errorRateWarning: 0.15,
errorRateCritical: 0.3,
queueLengthWarning: 25,
queueLengthCritical: 60
}
};
monitor.updateConfig(newConfig);
// Configuration is updated internally
expect(() => monitor.updateConfig(newConfig)).not.toThrow();
});
});
describe('Reset and Cleanup', () => {
it('should reset all metrics', () => {
// Add some metrics
monitor.recordToolExecution('tool1', 100, true);
monitor.recordQueueMetrics('queue1', 5, 50);
monitor.recordCacheMetrics('cache1', 100, 50);
const resetHandler = vi.fn();
monitor.on('reset', resetHandler);
monitor.reset();
expect(resetHandler).toHaveBeenCalled();
const dashboard = monitor.getDashboardData();
expect(dashboard.toolMetrics).toHaveLength(0);
expect(dashboard.queueMetrics).toHaveLength(0);
expect(dashboard.cacheMetrics).toHaveLength(0);
});
it('should cleanup properly', () => {
monitor.start();
monitor.cleanup();
// Should stop monitoring and clear singleton
expect((PerformanceMonitor as any).instance).toBeNull();
});
});
describe('Event Emissions', () => {
it('should emit events for significant occurrences', () => {
const events = {
started: vi.fn(),
stopped: vi.fn(),
thresholdViolation: vi.fn(),
reportGenerated: vi.fn(),
reset: vi.fn()
};
Object.entries(events).forEach(([event, handler]) => {
monitor.on(event, handler);
});
monitor.start();
expect(events.started).toHaveBeenCalled();
monitor.stop();
expect(events.stopped).toHaveBeenCalled();
monitor.reset();
expect(events.reset).toHaveBeenCalled();
});
});
describe('Edge Cases and Error Handling', () => {
it('should handle metrics collection when system info is unavailable', () => {
// Mock os functions to return edge case values
vi.mocked(os.cpus).mockReturnValueOnce([]);
vi.mocked(os.freemem).mockReturnValueOnce(0);
vi.mocked(os.totalmem).mockReturnValueOnce(0);
monitor.start();
expect(() => (monitor as any).collectResourceMetrics()).not.toThrow();
monitor.stop();
});
it('should handle very large metric values', () => {
const largeValue = Number.MAX_SAFE_INTEGER;
expect(() => {
monitor.recordToolExecution('large-tool', largeValue, true);
monitor.recordQueueMetrics('large-queue', largeValue, largeValue);
monitor.recordCacheMetrics('large-cache', largeValue, largeValue);
}).not.toThrow();
});
it('should handle concurrent metric updates', () => {
const promises = [];
// Simulate concurrent updates
for (let i = 0; i < 100; i++) {
promises.push(
Promise.resolve().then(() => {
monitor.recordToolExecution(`tool-${i % 10}`, Math.random() * 1000, Math.random() > 0.1);
})
);
}
expect(Promise.all(promises)).resolves.toBeDefined();
});
});
});