setup.ts•2.98 kB
import { beforeAll, afterAll, beforeEach, afterEach } from 'vitest';
import { mkdirSync, existsSync, rmSync } from 'fs';
import path from 'path';
// Test environment setup
const TEST_DATA_DIR = path.join(__dirname, '../data/test');
const TEST_CACHE_DIR = path.join(__dirname, '../.cache/test');
beforeAll(() => {
// Create test directories
if (!existsSync(TEST_DATA_DIR)) {
mkdirSync(TEST_DATA_DIR, { recursive: true });
}
if (!existsSync(TEST_CACHE_DIR)) {
mkdirSync(TEST_CACHE_DIR, { recursive: true });
}
// Set test environment variables
process.env.NODE_ENV = 'test';
process.env.GEPA_DATA_DIR = TEST_DATA_DIR;
process.env.GEPA_CACHE_DIR = TEST_CACHE_DIR;
});
afterAll(() => {
// Cleanup test directories
if (existsSync(TEST_DATA_DIR)) {
rmSync(TEST_DATA_DIR, { recursive: true, force: true });
}
if (existsSync(TEST_CACHE_DIR)) {
rmSync(TEST_CACHE_DIR, { recursive: true, force: true });
}
});
beforeEach(() => {
// Reset any global state before each test
// jest.clearAllMocks?.(); // Commented out as jest is not available in vitest environment
});
afterEach(() => {
// Cleanup after each test
// Add any test-specific cleanup here
});
// Global test utilities
declare global {
namespace NodeJS {
interface Global {
testUtils: {
createMockPromptCandidate: () => any;
createMockTrajectory: () => any;
createMockReflection: () => any;
};
}
}
}
// Mock utilities for testing GEPA components
global.testUtils = {
createMockPromptCandidate: () => ({
id: 'test-prompt-1',
content: 'Test prompt content',
generation: 1,
taskPerformance: new Map([
['task1', 0.8],
['task2', 0.9],
]),
averageScore: 0.85,
rolloutCount: 5,
createdAt: new Date(),
lastEvaluated: new Date(),
mutationType: 'initial' as const,
}),
createMockTrajectory: () => ({
id: 'test-trajectory-1',
promptId: 'test-prompt-1',
taskId: 'test-task-1',
timestamp: new Date(),
steps: [
{
stepNumber: 1,
action: 'test action',
reasoning: 'test reasoning',
timestamp: new Date(),
},
],
finalResult: {
success: true,
score: 0.9,
output: 'test output',
},
llmCalls: [],
toolCalls: [],
totalTokens: 100,
executionTime: 1000,
}),
createMockReflection: () => ({
trajectoryId: 'test-trajectory-1',
promptId: 'test-prompt-1',
diagnosis: {
failurePoint: 'step 2',
rootCause: 'insufficient context',
moduleResponsibility: new Map([['query_generator', 0.7]]),
patterns: [],
},
suggestions: [
{
type: 'add_instruction' as const,
targetSection: 'main prompt',
proposedChange: 'Add more context',
rationale: 'Better context leads to better results',
expectedImpact: 0.2,
},
],
confidence: 0.8,
rationale: 'Test reflection analysis',
}),
};