ai-integration.test.ts•16.5 kB
/**
* AI Integration Tests
* Tests for the unified AI interface and chunking functions integration
*/
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
import { UnifiedAIFactory } from '../src/integration/unified-ai-interface.js';
import { SRTProcessingTodoManager } from '../src/integration/todo-tool-integration.js';
import { ContextOptimizationFactory } from '../src/integration/context-optimization.js';
import { parseSRTFile } from '../src/parsers/srt-parser.js';
// Mock SRT data for testing
const mockSRTContent = `1
00:00:01,000 --> 00:00:03,000
Hello, this is a test subtitle.
2
00:00:03,000 --> 00:00:05,000
This is the second subtitle.
3
00:00:05,000 --> 00:00:07,000
And this is the third subtitle.
4
00:00:07,000 --> 00:00:09,000
John: How are you doing today?
5
00:00:09,000 --> 00:00:11,000
Mary: I'm doing great, thank you!
6
00:00:11,000 --> 00:00:13,000
John: That's wonderful to hear.
7
00:00:13,000 --> 00:00:15,000
Mary: What about you?
8
00:00:15,000 --> 00:00:17,000
John: I'm doing well too, thanks for asking.
`;
describe('AI Integration Tests', () => {
let mockSubtitles: any[];
beforeEach(async () => {
// Parse mock SRT content
const parseResult = parseSRTFile(mockSRTContent);
expect(parseResult.success).toBe(true);
expect(parseResult.file).toBeDefined();
mockSubtitles = parseResult.file!.subtitles;
});
afterEach(() => {
// Clean up any test data
});
describe('Unified AI Interface', () => {
it('should create Claude interface', () => {
const claudeInterface = UnifiedAIFactory.createUnifiedAI('claude');
expect(claudeInterface).toBeDefined();
expect(claudeInterface.config.modelType).toBe('claude');
expect(claudeInterface.config.capabilities.supportsTodoTool).toBe(true);
});
it('should create GPT interface', () => {
const gptInterface = UnifiedAIFactory.createUnifiedAI('gpt');
expect(gptInterface).toBeDefined();
expect(gptInterface.config.modelType).toBe('gpt');
expect(gptInterface.config.capabilities.supportsFunctionCalling).toBe(true);
});
it('should create Gemini interface', () => {
const geminiInterface = UnifiedAIFactory.createUnifiedAI('gemini');
expect(geminiInterface).toBeDefined();
expect(geminiInterface.config.modelType).toBe('gemini');
expect(geminiInterface.config.capabilities.supportsMultimodal).toBe(true);
});
it('should create generic interface', () => {
const genericInterface = UnifiedAIFactory.createUnifiedAI('generic');
expect(genericInterface).toBeDefined();
expect(genericInterface.config.modelType).toBe('generic');
expect(genericInterface.config.capabilities.supportsTodoTool).toBe(false);
});
});
describe('SRT Processing', () => {
it('should process SRT with Claude', async () => {
const claudeInterface = UnifiedAIFactory.createUnifiedAI('claude');
const result = await claudeInterface.processSRT({
subtitles: mockSubtitles,
processingType: 'translation',
targetLanguage: 'es',
options: {
contextOptimization: true
}
});
expect(result).toBeDefined();
expect(result.modelType).toBe('claude');
expect(result.chunks).toBeDefined();
expect(result.metadata).toBeDefined();
expect(result.metadata.modelCapabilities).toBeDefined();
});
it('should process SRT with GPT', async () => {
const gptInterface = UnifiedAIFactory.createUnifiedAI('gpt');
const result = await gptInterface.processSRT({
subtitles: mockSubtitles,
processingType: 'analysis',
options: {
contextOptimization: true
}
});
expect(result).toBeDefined();
expect(result.modelType).toBe('gpt');
expect(result.chunks).toBeDefined();
expect(result.metadata.contextOptimization).toBe(true);
});
it('should process SRT with Gemini', async () => {
const geminiInterface = UnifiedAIFactory.createUnifiedAI('gemini');
const result = await geminiInterface.processSRT({
subtitles: mockSubtitles,
processingType: 'conversation-detection',
options: {
contextOptimization: true
}
});
expect(result).toBeDefined();
expect(result.modelType).toBe('gemini');
expect(result.chunks).toBeDefined();
expect(result.metadata.chunkingStrategy).toBeDefined();
});
it('should handle processing errors gracefully', async () => {
const claudeInterface = UnifiedAIFactory.createUnifiedAI('claude');
const result = await claudeInterface.processSRT({
subtitles: [], // Empty subtitles to trigger error
processingType: 'translation',
targetLanguage: 'es'
});
expect(result.success).toBe(false);
expect(result.errors).toBeDefined();
expect(result.errors.length).toBeGreaterThan(0);
});
});
describe('Todo Tool Integration', () => {
it('should create SRT processing todos', async () => {
const todoManager = new SRTProcessingTodoManager('claude');
const result = await todoManager.createSRTProcessingTodos(
'test.srt',
mockSubtitles.length,
'translation',
'es'
);
expect(result.success).toBe(true);
expect(result.todoIds).toBeDefined();
expect(result.todoIds.length).toBeGreaterThan(0);
});
it('should update processing progress', async () => {
const todoManager = new SRTProcessingTodoManager('claude');
// Create todos first
await todoManager.createSRTProcessingTodos(
'test.srt',
mockSubtitles.length,
'translation',
'es'
);
// Update progress
await todoManager.updateProcessingProgress('file-analysis', 'completed');
await todoManager.updateProcessingProgress('chunk-detection', 'in_progress');
// Get statistics
const stats = await todoManager.getProcessingStatistics();
expect(stats).toBeDefined();
expect(stats.total).toBeGreaterThan(0);
});
it('should get processing statistics', async () => {
const todoManager = new SRTProcessingTodoManager('claude');
// Create todos
await todoManager.createSRTProcessingTodos(
'test.srt',
mockSubtitles.length,
'analysis'
);
const stats = await todoManager.getProcessingStatistics();
expect(stats).toBeDefined();
expect(stats.total).toBeGreaterThan(0);
expect(stats.pending).toBeGreaterThan(0);
expect(stats.byPriority).toBeDefined();
expect(stats.byCategory).toBeDefined();
});
it('should get todos by stage', async () => {
const todoManager = new SRTProcessingTodoManager('claude');
// Create todos
await todoManager.createSRTProcessingTodos(
'test.srt',
mockSubtitles.length,
'translation',
'es'
);
const fileAnalysisTodos = await todoManager.getTodosByStage('file-analysis');
expect(fileAnalysisTodos).toBeDefined();
expect(fileAnalysisTodos.length).toBeGreaterThan(0);
});
});
describe('Context Optimization', () => {
it('should optimize chunks for Claude', async () => {
const optimizer = ContextOptimizationFactory.createOptimizer('claude');
// Create mock chunks
const mockChunks = [
{
id: 'chunk-1',
startIndex: 0,
endIndex: 2,
subtitles: mockSubtitles.slice(0, 3),
context: {
speaker: 'John',
conversationId: 'conv-1'
}
},
{
id: 'chunk-2',
startIndex: 3,
endIndex: 5,
subtitles: mockSubtitles.slice(3, 6),
context: {
speaker: 'Mary',
conversationId: 'conv-1'
}
}
];
const result = await optimizer.optimizeChunksForAI(mockChunks, 'translation');
expect(result).toBeDefined();
expect(result.originalChunks).toBeDefined();
expect(result.optimizedChunks).toBeDefined();
expect(result.contextAnalysis).toBeDefined();
expect(result.performanceMetrics).toBeDefined();
});
it('should optimize chunks for GPT', async () => {
const optimizer = ContextOptimizationFactory.createOptimizer('gpt');
const mockChunks = [
{
id: 'chunk-1',
startIndex: 0,
endIndex: 2,
subtitles: mockSubtitles.slice(0, 3),
context: { speaker: 'John' }
}
];
const result = await optimizer.optimizeChunksForAI(mockChunks, 'analysis');
expect(result).toBeDefined();
expect(result.optimizedChunks).toBeDefined();
expect(result.contextAnalysis.totalContextSize).toBeGreaterThan(0);
expect(result.performanceMetrics.contextEfficiency).toBeGreaterThan(0);
});
it('should optimize chunks for Gemini', async () => {
const optimizer = ContextOptimizationFactory.createOptimizer('gemini');
const mockChunks = [
{
id: 'chunk-1',
startIndex: 0,
endIndex: 2,
subtitles: mockSubtitles.slice(0, 3),
context: { speaker: 'John' }
}
];
const result = await optimizer.optimizeChunksForAI(mockChunks, 'conversation-detection');
expect(result).toBeDefined();
expect(result.optimizedChunks).toBeDefined();
expect(result.performanceMetrics.overallScore).toBeGreaterThan(0);
});
it('should handle empty chunks gracefully', async () => {
const optimizer = ContextOptimizationFactory.createOptimizer('claude');
const result = await optimizer.optimizeChunksForAI([], 'translation');
expect(result).toBeDefined();
expect(result.originalChunks).toEqual([]);
expect(result.optimizedChunks).toEqual([]);
expect(result.contextAnalysis.chunkCount).toBe(0);
});
});
describe('Model-Specific Configurations', () => {
it('should have correct Claude configuration', () => {
const claudeConfig = UnifiedAIFactory.createClaudeConfig();
expect(claudeConfig.modelType).toBe('claude');
expect(claudeConfig.capabilities.supportsTodoTool).toBe(true);
expect(claudeConfig.capabilities.supportsReasoning).toBe(true);
expect(claudeConfig.capabilities.maxContextSize).toBe(200000);
expect(claudeConfig.chunkingOptions.maxChunkSize).toBe(15);
});
it('should have correct GPT configuration', () => {
const gptConfig = UnifiedAIFactory.createGPTConfig();
expect(gptConfig.modelType).toBe('gpt');
expect(gptConfig.capabilities.supportsFunctionCalling).toBe(true);
expect(gptConfig.capabilities.supportsMultimodal).toBe(true);
expect(gptConfig.capabilities.maxContextSize).toBe(128000);
expect(gptConfig.chunkingOptions.maxChunkSize).toBe(20);
});
it('should have correct Gemini configuration', () => {
const geminiConfig = UnifiedAIFactory.createGeminiConfig();
expect(geminiConfig.modelType).toBe('gemini');
expect(geminiConfig.capabilities.supportsMultimodal).toBe(true);
expect(geminiConfig.capabilities.supportsReasoning).toBe(true);
expect(geminiConfig.capabilities.maxContextSize).toBe(1000000);
expect(geminiConfig.chunkingOptions.maxChunkSize).toBe(25);
});
it('should have correct generic configuration', () => {
const genericConfig = UnifiedAIFactory.createGenericConfig();
expect(genericConfig.modelType).toBe('generic');
expect(genericConfig.capabilities.supportsTodoTool).toBe(false);
expect(genericConfig.capabilities.supportsFunctionCalling).toBe(false);
expect(genericConfig.capabilities.maxContextSize).toBe(50000);
expect(genericConfig.chunkingOptions.maxChunkSize).toBe(10);
});
});
describe('Error Handling', () => {
it('should handle invalid model type', () => {
expect(() => {
UnifiedAIFactory.createUnifiedAI('invalid' as any);
}).not.toThrow();
});
it('should handle processing errors', async () => {
const claudeInterface = UnifiedAIFactory.createUnifiedAI('claude');
const result = await claudeInterface.processSRT({
subtitles: null as any, // Invalid input
processingType: 'translation',
targetLanguage: 'es'
});
expect(result.success).toBe(false);
expect(result.errors).toBeDefined();
expect(result.errors.length).toBeGreaterThan(0);
});
it('should handle todo creation errors', async () => {
const todoManager = new SRTProcessingTodoManager('claude');
// Test with invalid parameters
const result = await todoManager.createSRTProcessingTodos(
'', // Empty filename
-1, // Invalid chunk count
'invalid' as any, // Invalid processing type
'invalid' // Invalid target language
);
expect(result.success).toBe(false);
});
});
describe('Performance Tests', () => {
it('should process chunks efficiently', async () => {
const claudeInterface = UnifiedAIFactory.createUnifiedAI('claude');
const startTime = Date.now();
const result = await claudeInterface.processSRT({
subtitles: mockSubtitles,
processingType: 'translation',
targetLanguage: 'es',
options: {
contextOptimization: true
}
});
const processingTime = Date.now() - startTime;
expect(result.success).toBe(true);
expect(processingTime).toBeLessThan(5000); // Should complete within 5 seconds
expect(result.processingTime).toBeGreaterThan(0);
});
it('should optimize context efficiently', async () => {
const optimizer = ContextOptimizationFactory.createOptimizer('claude');
const mockChunks = [
{
id: 'chunk-1',
startIndex: 0,
endIndex: 2,
subtitles: mockSubtitles.slice(0, 3),
context: { speaker: 'John' }
}
];
const startTime = Date.now();
const result = await optimizer.optimizeChunksForAI(mockChunks, 'translation');
const optimizationTime = Date.now() - startTime;
expect(result).toBeDefined();
expect(optimizationTime).toBeLessThan(2000); // Should complete within 2 seconds
expect(result.performanceMetrics.totalProcessingTime).toBeGreaterThan(0);
});
});
describe('Integration Tests', () => {
it('should integrate todo tool with AI processing', async () => {
const claudeInterface = UnifiedAIFactory.createUnifiedAI('claude');
const todoManager = new SRTProcessingTodoManager('claude');
// Create todos
const todoResult = await todoManager.createSRTProcessingTodos(
'test.srt',
mockSubtitles.length,
'translation',
'es'
);
expect(todoResult.success).toBe(true);
// Process with AI
const result = await claudeInterface.processSRT({
subtitles: mockSubtitles,
processingType: 'translation',
targetLanguage: 'es',
options: {
contextOptimization: true
}
});
expect(result.success).toBe(true);
expect(result.todoList).toBeDefined();
});
it('should integrate context optimization with AI processing', async () => {
const claudeInterface = UnifiedAIFactory.createUnifiedAI('claude');
const optimizer = ContextOptimizationFactory.createOptimizer('claude');
// Optimize chunks first
const mockChunks = [
{
id: 'chunk-1',
startIndex: 0,
endIndex: 2,
subtitles: mockSubtitles.slice(0, 3),
context: { speaker: 'John' }
}
];
const optimizationResult = await optimizer.optimizeChunksForAI(mockChunks, 'translation');
expect(optimizationResult).toBeDefined();
// Process optimized chunks
const result = await claudeInterface.processSRT({
subtitles: mockSubtitles,
processingType: 'translation',
targetLanguage: 'es',
options: {
contextOptimization: true
}
});
expect(result.success).toBe(true);
expect(result.metadata.contextOptimization).toBe(true);
});
});
});