Skip to main content
Glama

SRT Translation MCP Server

by omd0
ai-integration-example.ts•16.8 kB
/** * AI Integration Example * Demonstrates how to use the SRT chunking functions with Claude, GPT, Gemini, and other AI models */ import { readFile, writeFile } from 'fs/promises'; import { UnifiedAIFactory } from '../src/integration/unified-ai-interface.js'; import { SRTProcessingTodoManager } from '../src/integration/todo-tool-integration.js'; import { ContextOptimizationFactory } from '../src/integration/context-optimization.js'; import { parseSRTFile, writeSRTFile } from '../src/parsers/srt-parser.js'; /** * Example 1: Basic Claude Integration with Todo Tool */ async function exampleClaudeWithTodos() { console.log('=== Claude Integration with Todo Tool ==='); try { // Parse SRT file const srtContent = await readFile('examples/sample.srt', 'utf8'); const parseResult = parseSRTFile(srtContent); if (!parseResult.success || !parseResult.file) { throw new Error('Failed to parse SRT file'); } // Create Claude interface const claudeInterface = UnifiedAIFactory.createUnifiedAI('claude'); // Create todo manager for progress tracking const todoManager = new SRTProcessingTodoManager('claude'); // Create processing todos const todoResult = await todoManager.createSRTProcessingTodos( 'sample.srt', parseResult.file.subtitles.length, 'translation', 'es' ); console.log('Todos created:', todoResult.success); console.log('Todo IDs:', todoResult.todoIds); // Process with Claude const result = await claudeInterface.processSRT({ subtitles: parseResult.file.subtitles, processingType: 'translation', targetLanguage: 'es', sourceLanguage: 'auto-detect', options: { useReasoning: true, temperature: 0.7, maxTokens: 4000, contextOptimization: true, preserveFormatting: true } }); // Update todos await todoManager.updateProcessingProgress('file-analysis', 'completed'); await todoManager.updateProcessingProgress('chunk-detection', 'completed'); await todoManager.updateProcessingProgress('processing', 'completed'); await todoManager.updateProcessingProgress('validation', 'completed'); // Get final statistics const stats = await todoManager.getProcessingStatistics(); console.log('Claude processing completed:'); console.log(' Success:', result.success); console.log(' Chunks processed:', result.chunks.length); console.log(' Processing time:', result.processingTime, 'ms'); console.log(' Context efficiency:', result.metadata.contextEfficiency); console.log(' Todos completed:', stats.completed); console.log(' Completion rate:', stats.completionRate); return result; } catch (error) { console.error('Claude processing failed:', error); throw error; } } /** * Example 2: Multi-Model Comparison */ async function exampleMultiModelComparison() { console.log('=== Multi-Model Comparison ==='); try { // Parse SRT file const srtContent = await readFile('examples/sample.srt', 'utf8'); const parseResult = parseSRTFile(srtContent); if (!parseResult.success || !parseResult.file) { throw new Error('Failed to parse SRT file'); } const models = ['claude', 'gpt', 'gemini'] as const; const results = new Map(); for (const model of models) { console.log(`\nProcessing with ${model}...`); const aiInterface = UnifiedAIFactory.createUnifiedAI(model); const startTime = Date.now(); const result = await aiInterface.processSRT({ subtitles: parseResult.file.subtitles, processingType: 'analysis', options: { contextOptimization: true, temperature: 0.7 } }); const processingTime = Date.now() - startTime; results.set(model, { processingTime, contextEfficiency: result.metadata.contextEfficiency, chunksProcessed: result.chunks.length, errors: result.errors.length, warnings: result.warnings.length, success: result.success }); console.log(` ${model} completed in ${processingTime}ms`); } // Compare results console.log('\n=== Model Comparison Results ==='); for (const [model, metrics] of results) { console.log(`\n${model.toUpperCase()}:`); console.log(` Success: ${metrics.success}`); console.log(` Processing time: ${metrics.processingTime}ms`); console.log(` Context efficiency: ${metrics.contextEfficiency.toFixed(2)}`); console.log(` Chunks processed: ${metrics.chunksProcessed}`); console.log(` Errors: ${metrics.errors}, Warnings: ${metrics.warnings}`); } return results; } catch (error) { console.error('Multi-model comparison failed:', error); throw error; } } /** * Example 3: Context Optimization */ async function exampleContextOptimization() { console.log('=== Context Optimization Example ==='); try { // Parse SRT file const srtContent = await readFile('examples/sample.srt', 'utf8'); const parseResult = parseSRTFile(srtContent); if (!parseResult.success || !parseResult.file) { throw new Error('Failed to parse SRT file'); } // Create context optimizer const optimizer = ContextOptimizationFactory.createOptimizer('claude'); // Detect conversations first const { detectConversations } = await import('../src/chunking/conversation-detector.js'); const chunks = detectConversations(parseResult.file.subtitles); console.log('Original chunks:', chunks.length); // Optimize chunks for AI context const optimizationResult = await optimizer.optimizeChunksForAI(chunks, 'translation'); console.log('Optimization completed:'); console.log(' Original chunks:', optimizationResult.originalChunks.length); console.log(' Optimized chunks:', optimizationResult.optimizedChunks.length); console.log(' Context efficiency:', optimizationResult.performanceMetrics.contextEfficiency); console.log(' Chunking quality:', optimizationResult.performanceMetrics.chunkingQuality); console.log(' Speaker consistency:', optimizationResult.performanceMetrics.speakerConsistency); console.log(' Topic coherence:', optimizationResult.performanceMetrics.topicCoherence); console.log(' Overall score:', optimizationResult.performanceMetrics.overallScore); // Show context analysis const analysis = optimizationResult.contextAnalysis; console.log('\nContext Analysis:'); console.log(' Total context size:', analysis.totalContextSize); console.log(' Available context:', analysis.availableContext); console.log(' Context efficiency:', analysis.contextEfficiency); console.log(' Complexity score:', analysis.complexityScore); console.log(' Speaker consistency:', analysis.speakerConsistency); console.log(' Topic coherence:', analysis.topicCoherence); // Show optimization suggestions if (analysis.optimizationSuggestions.length > 0) { console.log('\nOptimization Suggestions:'); analysis.optimizationSuggestions.forEach((suggestion, index) => { console.log(` ${index + 1}. ${suggestion.type.toUpperCase()}: ${suggestion.description}`); console.log(` Priority: ${suggestion.priority}`); console.log(` Expected improvement: ${suggestion.expectedImprovement}`); console.log(` Affected chunks: ${suggestion.affectedChunks.length}`); }); } return optimizationResult; } catch (error) { console.error('Context optimization failed:', error); throw error; } } /** * Example 4: Complete Translation Workflow */ async function exampleCompleteTranslation() { console.log('=== Complete Translation Workflow ==='); try { // Parse input SRT file const srtContent = await readFile('examples/sample.srt', 'utf8'); const parseResult = parseSRTFile(srtContent); if (!parseResult.success || !parseResult.file) { throw new Error('Failed to parse SRT file'); } // Create AI interface const aiInterface = UnifiedAIFactory.createUnifiedAI('claude'); // Create todo manager const todoManager = new SRTProcessingTodoManager('claude'); // Create processing todos const todoResult = await todoManager.createSRTProcessingTodos( 'sample.srt', parseResult.file.subtitles.length, 'translation', 'es' ); console.log('Translation todos created:', todoResult.success); // Process with translation const result = await aiInterface.processSRT({ subtitles: parseResult.file.subtitles, processingType: 'translation', targetLanguage: 'es', sourceLanguage: 'auto-detect', options: { useReasoning: true, temperature: 0.7, maxTokens: 4000, contextOptimization: true, preserveFormatting: true } }); if (!result.success) { throw new Error('Translation failed'); } // Update todos await todoManager.updateProcessingProgress('file-analysis', 'completed'); await todoManager.updateProcessingProgress('chunk-detection', 'completed'); await todoManager.updateProcessingProgress('processing', 'completed'); await todoManager.updateProcessingProgress('validation', 'completed'); // Simulate creating translated SRT file console.log('Translation completed successfully'); console.log(' Chunks processed:', result.chunks.length); console.log(' Processing time:', result.processingTime, 'ms'); console.log(' Context efficiency:', result.metadata.contextEfficiency); console.log(' Errors:', result.errors.length); console.log(' Warnings:', result.warnings.length); // Get final statistics const stats = await todoManager.getProcessingStatistics(); console.log(' Todos completed:', stats.completed); console.log(' Completion rate:', stats.completionRate); // In a real implementation, you would: // 1. Extract translated subtitles from result.results // 2. Create new SRT file with translated content // 3. Write to output file console.log('\nTranslation workflow completed successfully!'); return result; } catch (error) { console.error('Translation workflow failed:', error); throw error; } } /** * Example 5: Todo Tool Management */ async function exampleTodoToolManagement() { console.log('=== Todo Tool Management Example ==='); try { // Create todo manager const todoManager = new SRTProcessingTodoManager('claude'); // Create various types of todos const fileAnalysisTodos = await todoManager.createSRTProcessingTodos( 'example.srt', 20, 'file-analysis' ); const translationTodos = await todoManager.createSRTProcessingTodos( 'example.srt', 20, 'translation', 'es' ); const analysisTodos = await todoManager.createSRTProcessingTodos( 'example.srt', 20, 'analysis' ); console.log('Todos created:'); console.log(' File analysis:', fileAnalysisTodos.success); console.log(' Translation:', translationTodos.success); console.log(' Analysis:', analysisTodos.success); // Get all todos const allTodos = await todoManager.getTodoList(); console.log('\nTotal todos:', allTodos.length); // Get todos by category const fileAnalysisTodosList = await todoManager.getTodosByStage('file-analysis'); console.log('File analysis todos:', fileAnalysisTodosList.length); // Update some todos if (allTodos.length > 0) { const firstTodo = allTodos[0]; await todoManager.updateTodoStatus(firstTodo.id, 'in_progress'); console.log(`Updated todo ${firstTodo.id} to in_progress`); // Mark as completed await todoManager.updateTodoStatus(firstTodo.id, 'completed'); console.log(`Marked todo ${firstTodo.id} as completed`); } // Get statistics const stats = await todoManager.getProcessingStatistics(); console.log('\nTodo Statistics:'); console.log(' Total:', stats.total); console.log(' Pending:', stats.pending); console.log(' In Progress:', stats.inProgress); console.log(' Completed:', stats.completed); console.log(' Cancelled:', stats.cancelled); console.log(' Blocked:', stats.blocked); console.log(' Completion rate:', stats.completionRate); // Show priority distribution console.log('\nPriority Distribution:'); console.log(' Low:', stats.byPriority.low); console.log(' Medium:', stats.byPriority.medium); console.log(' High:', stats.byPriority.high); console.log(' Critical:', stats.byPriority.critical); // Show category distribution console.log('\nCategory Distribution:'); Object.entries(stats.byCategory).forEach(([category, count]) => { console.log(` ${category}: ${count}`); }); return stats; } catch (error) { console.error('Todo tool management failed:', error); throw error; } } /** * Example 6: Error Handling and Recovery */ async function exampleErrorHandling() { console.log('=== Error Handling and Recovery Example ==='); try { // Create AI interface const aiInterface = UnifiedAIFactory.createUnifiedAI('claude'); // Simulate processing with potential errors const result = await aiInterface.processSRT({ subtitles: [], // Empty subtitles to trigger error processingType: 'translation', targetLanguage: 'es', options: { contextOptimization: true } }); console.log('Processing result:'); console.log(' Success:', result.success); console.log(' Errors:', result.errors.length); console.log(' Warnings:', result.warnings.length); if (!result.success) { console.log('\nError Details:'); result.errors.forEach((error, index) => { console.log(` ${index + 1}. ${error.error}`); if (error.context) { console.log(` Context: ${error.context}`); } console.log(` Retryable: ${error.retryable}`); console.log(` Timestamp: ${error.timestamp}`); }); } if (result.warnings.length > 0) { console.log('\nWarning Details:'); result.warnings.forEach((warning, index) => { console.log(` ${index + 1}. ${warning.warning}`); if (warning.context) { console.log(` Context: ${warning.context}`); } console.log(` Timestamp: ${warning.timestamp}`); }); } // Show metadata console.log('\nProcessing Metadata:'); console.log(' Model type:', result.modelType); console.log(' Total chunks:', result.metadata.totalChunks); console.log(' Processed chunks:', result.metadata.processedChunks); console.log(' Context efficiency:', result.metadata.contextEfficiency); console.log(' Context optimization:', result.metadata.contextOptimization); console.log(' Todo integration:', result.metadata.todoIntegration); return result; } catch (error) { console.error('Error handling example failed:', error); throw error; } } /** * Main function to run all examples */ async function runAllExamples() { console.log('šŸš€ Starting AI Integration Examples\n'); try { // Example 1: Claude with Todos await exampleClaudeWithTodos(); console.log('\n' + '='.repeat(50) + '\n'); // Example 2: Multi-Model Comparison await exampleMultiModelComparison(); console.log('\n' + '='.repeat(50) + '\n'); // Example 3: Context Optimization await exampleContextOptimization(); console.log('\n' + '='.repeat(50) + '\n'); // Example 4: Complete Translation await exampleCompleteTranslation(); console.log('\n' + '='.repeat(50) + '\n'); // Example 5: Todo Tool Management await exampleTodoToolManagement(); console.log('\n' + '='.repeat(50) + '\n'); // Example 6: Error Handling await exampleErrorHandling(); console.log('\nāœ… All examples completed successfully!'); } catch (error) { console.error('āŒ Examples failed:', error); process.exit(1); } } // Run examples if this file is executed directly if (import.meta.url === `file://${process.argv[1]}`) { runAllExamples(); } export { exampleClaudeWithTodos, exampleMultiModelComparison, exampleContextOptimization, exampleCompleteTranslation, exampleTodoToolManagement, exampleErrorHandling, runAllExamples };

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/omd0/srt-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server