Skip to main content
Glama

Prompt Auto-Optimizer MCP

by sloth-wq
setup.tsโ€ข5.72 kB
/** * Vitest global setup file * Configures the testing environment for all unit tests */ import { beforeAll, afterAll, beforeEach, afterEach, vi } from 'vitest'; import { promises as fs } from 'fs'; import { resolve } from 'path'; import '@testing-library/jest-dom'; // Global test configuration const TEST_DATA_DIR = resolve(process.cwd(), 'src/test/fixtures'); const TEMP_DIR = resolve(process.cwd(), 'temp/test'); /** * Global setup - runs once before all tests */ beforeAll(async (): Promise<void> => { // Ensure test directories exist await fs.mkdir(TEST_DATA_DIR, { recursive: true }); await fs.mkdir(TEMP_DIR, { recursive: true }); // Set environment variables for testing process.env.NODE_ENV = 'test'; process.env.GEPA_TEST_MODE = 'unit'; process.env.GEPA_DATA_DIR = TEMP_DIR; process.env.CLAUDE_CODE_EXECUTABLE = 'echo'; // Mock Claude Code // eslint-disable-next-line no-console // eslint-disable-next-line no-console console.log('๐Ÿงช Test environment initialized'); }); /** * Global cleanup - runs once after all tests */ afterAll(async (): Promise<void> => { // Clean up temporary test files try { await fs.rm(TEMP_DIR, { recursive: true, force: true }); } catch { // Ignore cleanup errors } // eslint-disable-next-line no-console // eslint-disable-next-line no-console console.log('๐Ÿงน Test environment cleaned up'); }); /** * Before each test - runs before every individual test */ beforeEach((): void => { // Reset all mocks before each test vi.clearAllMocks(); vi.restoreAllMocks(); // Mock console methods to reduce test noise vi.spyOn(console, 'log').mockImplementation(() => {}); vi.spyOn(console, 'info').mockImplementation(() => {}); vi.spyOn(console, 'warn').mockImplementation(() => {}); // Don't mock console.error by default - errors should be visible // eslint-disable-next-line no-console }); /** * After each test - runs after every individual test */ afterEach((): void => { // Restore console methods after each test vi.restoreAllMocks(); }); // Global test utilities declare global { // eslint-disable-next-line no-var var testUtils: { createTempFile: (content: string, filename?: string) => Promise<string>; readTempFile: (filepath: string) => Promise<string>; createMockTrajectory: (overrides?: Record<string, unknown>) => Record<string, unknown>; createMockPromptCandidate: (overrides?: Record<string, unknown>) => Record<string, unknown>; delay: (ms: number) => Promise<void>; }; } globalThis.testUtils = { /** * Create a temporary test file */ async createTempFile(content: string, filename?: string): Promise<string> { const filepath = resolve(TEMP_DIR, filename || `test-${Date.now()}.txt`); await fs.writeFile(filepath, content, 'utf-8'); return filepath; }, /** * Read a temporary test file */ async readTempFile(filepath: string): Promise<string> { return await fs.readFile(filepath, 'utf-8'); }, /** * Create a mock execution trajectory for testing */ createMockTrajectory(overrides: Record<string, unknown> = {}): Record<string, unknown> { return { id: 'test-trajectory-123', promptId: 'test-prompt-456', taskId: 'test-task-789', timestamp: new Date(), steps: [ { stepNumber: 1, action: 'analyze_task', reasoning: 'Starting task analysis', timestamp: new Date(), }, ], finalResult: { success: true, score: 0.85, output: 'Task completed successfully', }, llmCalls: [ { model: 'claude-3', prompt: 'Test prompt', response: 'Test response', tokens: { prompt: 100, completion: 50 }, latency: 1500, }, ], toolCalls: [], totalTokens: 150, executionTime: 2000, ...overrides, }; }, /** * Create a mock prompt candidate for testing */ createMockPromptCandidate(overrides: Record<string, unknown> = {}): Record<string, unknown> { return { id: 'test-prompt-123', content: 'You are a helpful assistant. Complete the task.', parentId: 'parent-prompt-456', generation: 1, taskPerformance: new Map([ ['task1', 0.85], ['task2', 0.92], ]), averageScore: 0.885, rolloutCount: 5, createdAt: new Date(), lastEvaluated: new Date(), mutationType: 'initial' as const, ...overrides, }; }, /** * Utility function to add delays in tests */ async delay(ms: number): Promise<void> { return new Promise<void>(resolve => setTimeout(resolve, ms)); }, }; // Mock external dependencies vi.mock('child_process', () => ({ spawn: vi.fn(() => ({ stdin: { write: vi.fn().mockReturnValue(true), end: vi.fn().mockReturnValue(undefined), }, stdout: { on: vi.fn().mockReturnValue(undefined), }, stderr: { on: vi.fn().mockReturnValue(undefined), }, on: vi.fn().mockReturnValue(undefined), kill: vi.fn().mockReturnValue(true), pid: 12345, })), })); // Mock file system operations for consistent testing vi.mock('fs/promises', async () => { const actual = await vi.importActual('fs/promises'); return { ...actual, writeFile: vi.fn().mockResolvedValue(undefined), readFile: vi.fn().mockResolvedValue('{}'), mkdir: vi.fn().mockResolvedValue(undefined), rm: vi.fn().mockResolvedValue(undefined), readdir: vi.fn().mockResolvedValue([]), stat: vi.fn().mockResolvedValue({ isFile: () => true, isDirectory: () => false }), rename: vi.fn().mockResolvedValue(undefined), }; }); export {};

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/sloth-wq/prompt-auto-optimizer-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server