Skip to main content
Glama

Prompt Auto-Optimizer MCP

by sloth-wq
llm-adapter.test.ts17.9 kB
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; import { EventEmitter } from 'events'; // Mock child_process module vi.mock('child_process', () => ({ spawn: vi.fn(), })); import { LLMAdapter } from './llm-adapter'; import { spawn } from 'child_process'; // Get reference to the mocked spawn function const mockSpawn = vi.mocked(spawn); // Mock process class that extends EventEmitter class MockChildProcess extends EventEmitter { public stdout = new EventEmitter(); public stderr = new EventEmitter(); public stdin = { write: vi.fn(), end: vi.fn() }; public killed = false; public exitCode: number | null = null; kill() { this.killed = true; this.emit('exit', 1, 'SIGTERM'); } } describe('LLMAdapter', () => { let adapter: LLMAdapter; let mockProcess: MockChildProcess; beforeEach(() => { vi.clearAllMocks(); adapter = new LLMAdapter(); // Create a fresh mock process for each test mockProcess = new MockChildProcess(); // Make mockSpawn return the mockProcess for each test mockSpawn.mockReturnValue(mockProcess); }); afterEach(() => { adapter.shutdown(); }); describe('initialization', () => { it('should initialize with default configuration', () => { expect(adapter).toBeDefined(); expect(adapter['maxConcurrentProcesses']).toBe(3); expect(adapter['processTimeout']).toBe(300000); // 5 minutes }); it('should accept custom configuration', () => { const customAdapter = new LLMAdapter({ maxConcurrentProcesses: 5, processTimeout: 180000, }); expect(customAdapter['maxConcurrentProcesses']).toBe(5); expect(customAdapter['processTimeout']).toBe(180000); }); }); describe('Claude CLI process spawning', () => { it('should spawn claude process with correct arguments', async () => { const prompt = 'Test prompt'; // Simulate successful response setTimeout(() => { mockProcess.stdout.emit('data', JSON.stringify({ response: 'Test response', usage: { inputTokens: 10, outputTokens: 5 } })); mockProcess.emit('exit', 0); }, 10); await adapter.generateResponse(prompt); expect(mockSpawn).toHaveBeenCalledWith('claude', ['--format', 'json'], expect.objectContaining({ stdio: ['pipe', 'pipe', 'pipe'] }) ); }); it('should handle process spawn errors', async () => { mockSpawn.mockImplementation(() => { const errorProcess = new MockChildProcess(); setTimeout(() => errorProcess.emit('error', new Error('Command not found')), 10); return errorProcess; }); await expect(adapter.generateResponse('test')).rejects.toThrow('Command not found'); }); it('should send prompt to stdin correctly', async () => { const prompt = 'Test prompt with special characters: "quotes" and \nnewlines'; setTimeout(() => { mockProcess.stdout.emit('data', JSON.stringify({ response: 'ok' })); mockProcess.emit('exit', 0); }, 10); await adapter.generateResponse(prompt); expect(mockProcess.stdin.write).toHaveBeenCalledWith(prompt); expect(mockProcess.stdin.end).toHaveBeenCalled(); }); }); describe('response parsing', () => { it('should parse valid JSON response correctly', async () => { const expectedResponse = { response: 'This is a test response', usage: { inputTokens: 15, outputTokens: 8 } }; setTimeout(() => { mockProcess.stdout.emit('data', JSON.stringify(expectedResponse)); mockProcess.emit('exit', 0); }, 10); const result = await adapter.generateResponse('test'); expect(result).toEqual({ response: expectedResponse.response, usage: expectedResponse.usage, processingTime: expect.any(Number), }); }); it('should handle chunked JSON response', async () => { const response = { response: 'Long response', usage: { inputTokens: 20, outputTokens: 15 } }; const jsonString = JSON.stringify(response); const chunk1 = jsonString.slice(0, 20); const chunk2 = jsonString.slice(20); setTimeout(() => { mockProcess.stdout.emit('data', chunk1); mockProcess.stdout.emit('data', chunk2); mockProcess.emit('exit', 0); }, 10); const result = await adapter.generateResponse('test'); expect(result.response).toBe(response.response); }); it('should handle invalid JSON response', async () => { setTimeout(() => { mockProcess.stdout.emit('data', 'Invalid JSON response'); mockProcess.emit('exit', 0); }, 10); await expect(adapter.generateResponse('test')).rejects.toThrow('Invalid JSON'); }); it('should handle empty response', async () => { setTimeout(() => { mockProcess.emit('exit', 0); }, 10); await expect(adapter.generateResponse('test')).rejects.toThrow('Empty response'); }); }); describe('error handling', () => { it('should handle process exit with non-zero code', async () => { setTimeout(() => { mockProcess.stderr.emit('data', 'Authentication failed'); mockProcess.emit('exit', 1); }, 10); await expect(adapter.generateResponse('test')).rejects.toThrow('Authentication failed'); }); it('should handle stderr output', async () => { setTimeout(() => { mockProcess.stderr.emit('data', 'Warning: API rate limit approaching\n'); mockProcess.stderr.emit('data', 'Error: Request failed'); mockProcess.emit('exit', 1); }, 10); await expect(adapter.generateResponse('test')).rejects.toThrow('Request failed'); }); it('should handle process timeout', async () => { const quickTimeoutAdapter = new LLMAdapter({ processTimeout: 100 }); // Process never exits, should timeout const promise = quickTimeoutAdapter.generateResponse('test'); await expect(promise).rejects.toThrow('timeout'); expect(mockProcess.killed).toBe(true); }); it('should collect stderr for debugging', async () => { const errorMessage = 'Detailed error message'; setTimeout(() => { mockProcess.stderr.emit('data', errorMessage); mockProcess.emit('exit', 1); }, 10); try { await adapter.generateResponse('test'); } catch (error: any) { expect(error.message).toContain(errorMessage); } }); }); describe('retry mechanism', () => { it('should retry up to 3 times on failure', async () => { let attemptCount = 0; mockSpawn.mockImplementation(() => { const process = new MockChildProcess(); attemptCount++; setTimeout(() => { if (attemptCount < 3) { process.emit('exit', 1); // Fail first 2 attempts } else { process.stdout.emit('data', JSON.stringify({ response: 'Success on retry' })); process.emit('exit', 0); } }, 10); return process; }); const result = await adapter.generateResponse('test'); expect(attemptCount).toBe(3); expect(result.response).toBe('Success on retry'); }); it('should fail after max retries exceeded', async () => { mockSpawn.mockImplementation(() => { const process = new MockChildProcess(); setTimeout(() => process.emit('exit', 1), 10); return process; }); await expect(adapter.generateResponse('test')).rejects.toThrow('Max retries exceeded'); expect(mockSpawn).toHaveBeenCalledTimes(3); }); it('should use exponential backoff between retries', async () => { const attemptTimes: number[] = []; mockSpawn.mockImplementation(() => { attemptTimes.push(Date.now()); const process = new MockChildProcess(); setTimeout(() => process.emit('exit', 1), 10); return process; }); await expect(adapter.generateResponse('test')).rejects.toThrow(); // Check that delays increase (1s, 2s between attempts) expect(attemptTimes.length).toBe(3); const delay1 = attemptTimes[1] - attemptTimes[0]; const delay2 = attemptTimes[2] - attemptTimes[1]; expect(delay1).toBeGreaterThanOrEqual(1000); expect(delay2).toBeGreaterThanOrEqual(2000); }); }); describe('process pool management', () => { it('should respect max concurrent processes limit', async () => { const adapter = new LLMAdapter({ maxConcurrentProcesses: 2 }); const promises = []; const processPromises: Array<() => void> = []; // Mock processes that wait for manual resolution mockSpawn.mockImplementation(() => { const process = new MockChildProcess(); const resolveProcess = () => { process.stdout.emit('data', JSON.stringify({ response: 'ok' })); process.emit('exit', 0); }; processPromises.push(resolveProcess); return process; }); // Start 4 concurrent requests for (let i = 0; i < 4; i++) { promises.push(adapter.generateResponse(`test ${i}`)); } // Should only spawn 2 processes initially await new Promise(resolve => setTimeout(resolve, 50)); expect(mockSpawn).toHaveBeenCalledTimes(2); // Resolve first 2 processes processPromises[0](); processPromises[1](); // Wait for queue processing await new Promise(resolve => setTimeout(resolve, 50)); // Should spawn remaining 2 processes expect(mockSpawn).toHaveBeenCalledTimes(4); // Resolve remaining processes processPromises[2](); processPromises[3](); await Promise.all(promises); }); it('should queue requests when at capacity', async () => { const adapter = new LLMAdapter({ maxConcurrentProcesses: 1 }); const results: string[] = []; mockSpawn.mockImplementation(() => { const process = new MockChildProcess(); setTimeout(() => { process.stdout.emit('data', JSON.stringify({ response: `Response ${Date.now()}` })); process.emit('exit', 0); }, 100); return process; }); const promises = [ adapter.generateResponse('first').then(r => results.push('first: ' + r.response)), adapter.generateResponse('second').then(r => results.push('second: ' + r.response)), adapter.generateResponse('third').then(r => results.push('third: ' + r.response)), ]; await Promise.all(promises); expect(results).toHaveLength(3); expect(mockSpawn).toHaveBeenCalledTimes(3); }); it('should handle process pool shutdown gracefully', async () => { const spawnedProcesses: MockChildProcess[] = []; mockSpawn.mockImplementation(() => { const process = new MockChildProcess(); spawnedProcesses.push(process); // Never resolve to simulate long-running process return process; }); // Start some long-running requests adapter.generateResponse('long1'); adapter.generateResponse('long2'); await new Promise(resolve => setTimeout(resolve, 50)); // Shutdown should kill active processes adapter.shutdown(); // All spawned processes should be killed expect(spawnedProcesses.length).toBeGreaterThan(0); spawnedProcesses.forEach(process => { expect(process.killed).toBe(true); }); }); }); describe('concurrent execution limits', () => { it('should enforce global concurrency limit', async () => { const adapter = new LLMAdapter({ maxConcurrentProcesses: 2 }); let activeProcesses = 0; let maxConcurrent = 0; mockSpawn.mockImplementation(() => { const process = new MockChildProcess(); activeProcesses++; maxConcurrent = Math.max(maxConcurrent, activeProcesses); setTimeout(() => { activeProcesses--; process.stdout.emit('data', JSON.stringify({ response: 'done' })); process.emit('exit', 0); }, 100); return process; }); const promises = Array.from({ length: 5 }, (_, i) => adapter.generateResponse(`request ${i}`) ); await Promise.all(promises); expect(maxConcurrent).toBeLessThanOrEqual(2); }); it('should properly clean up completed processes from pool', async () => { const adapter = new LLMAdapter({ maxConcurrentProcesses: 2 }); mockSpawn.mockImplementation(() => { const process = new MockChildProcess(); setTimeout(() => { process.stdout.emit('data', JSON.stringify({ response: 'completed' })); process.emit('exit', 0); }, 50); return process; }); // Run multiple batches to ensure cleanup works await Promise.all([ adapter.generateResponse('batch1-1'), adapter.generateResponse('batch1-2'), ]); await Promise.all([ adapter.generateResponse('batch2-1'), adapter.generateResponse('batch2-2'), ]); expect(mockSpawn).toHaveBeenCalledTimes(4); }); }); describe('timeout handling', () => { it('should enforce 5 minute default timeout', async () => { // Use shorter timeout for testing, but verify default is 5 minutes const adapter = new LLMAdapter({ processTimeout: 200 }); expect(adapter['processTimeout']).toBe(200); // Also verify that default constructor uses 5 minutes const defaultAdapter = new LLMAdapter(); expect(defaultAdapter['processTimeout']).toBe(300000); // 5 minutes // Process that never responds mockSpawn.mockImplementation(() => new MockChildProcess()); const startTime = Date.now(); await expect(adapter.generateResponse('test')).rejects.toThrow('timeout'); // Should timeout in around 200ms const elapsed = Date.now() - startTime; expect(elapsed).toBeGreaterThanOrEqual(200); expect(elapsed).toBeLessThan(400); }); it('should allow custom timeout configuration', async () => { const adapter = new LLMAdapter({ processTimeout: 200 }); mockSpawn.mockImplementation(() => new MockChildProcess()); const startTime = Date.now(); await expect(adapter.generateResponse('test')).rejects.toThrow('timeout'); const elapsed = Date.now() - startTime; expect(elapsed).toBeGreaterThanOrEqual(200); expect(elapsed).toBeLessThan(400); }); it('should kill timed out processes', async () => { const adapter = new LLMAdapter({ processTimeout: 100 }); let killedProcess: MockChildProcess | null = null; mockSpawn.mockImplementation(() => { killedProcess = new MockChildProcess(); return killedProcess; }); await expect(adapter.generateResponse('test')).rejects.toThrow('timeout'); expect(killedProcess?.killed).toBe(true); }); }); describe('usage tracking', () => { it('should track token usage from response', async () => { const usage = { inputTokens: 25, outputTokens: 18 }; setTimeout(() => { mockProcess.stdout.emit('data', JSON.stringify({ response: 'test response', usage })); mockProcess.emit('exit', 0); }, 10); const result = await adapter.generateResponse('test'); expect(result.usage).toEqual(usage); }); it('should handle missing usage data gracefully', async () => { setTimeout(() => { mockProcess.stdout.emit('data', JSON.stringify({ response: 'test response' // No usage field })); mockProcess.emit('exit', 0); }, 10); const result = await adapter.generateResponse('test'); expect(result.usage).toBeUndefined(); }); it('should track processing time', async () => { setTimeout(() => { mockProcess.stdout.emit('data', JSON.stringify({ response: 'test' })); mockProcess.emit('exit', 0); }, 100); const result = await adapter.generateResponse('test'); expect(result.processingTime).toBeGreaterThanOrEqual(100); expect(result.processingTime).toBeLessThan(200); }); }); describe('memory and performance', () => { it('should not leak memory with many requests', async () => { mockSpawn.mockImplementation(() => { const process = new MockChildProcess(); setTimeout(() => { process.stdout.emit('data', JSON.stringify({ response: 'ok' })); process.emit('exit', 0); }, 10); return process; }); // Run many requests to check for memory leaks const promises = Array.from({ length: 50 }, (_, i) => adapter.generateResponse(`request ${i}`) ); const results = await Promise.all(promises); expect(results).toHaveLength(50); results.forEach(result => { expect(result.response).toBe('ok'); }); }); it('should handle rapid sequential requests', async () => { mockSpawn.mockImplementation(() => { const process = new MockChildProcess(); setTimeout(() => { process.stdout.emit('data', JSON.stringify({ response: 'fast' })); process.emit('exit', 0); }, 5); return process; }); const results = []; for (let i = 0; i < 10; i++) { results.push(await adapter.generateResponse(`fast ${i}`)); } expect(results).toHaveLength(10); expect(mockSpawn).toHaveBeenCalledTimes(10); }); }); });

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/sloth-wq/prompt-auto-optimizer-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server