Skip to main content
Glama

SRT Translation MCP Server

by omd0
test-ai-workflow.js•6.04 kB
#!/usr/bin/env node /** * Test AI workflow generation and translation tools */ import { SRTTranslationMCPServer } from './dist/mcp/server.js'; import fs from 'fs'; async function testAIWorkflow() { console.log('Testing AI workflow generation and translation tools...\n'); try { // Read example SRT file const srtContent = fs.readFileSync('/home/omd/Documents/Mix/SRT-MCP/example.srt', 'utf8'); console.log('šŸ“„ Loaded SRT file with', srtContent.split('\n\n').length - 1, 'subtitle blocks\n'); // Create server instance const server = new SRTTranslationMCPServer(); // Test AI workflow generation console.log('šŸ¤– Testing AI workflow generation...'); const workflowResult = await server.handleGenerateAIWorkflow({ content: srtContent, targetLanguage: 'es', sourceLanguage: 'en' }); if (workflowResult.content && workflowResult.content[0]) { // Extract JSON from the response (skip any emoji prefix) const responseText = workflowResult.content[0].text; const jsonStart = responseText.indexOf('{'); const jsonEnd = responseText.lastIndexOf('}') + 1; const jsonText = responseText.substring(jsonStart, jsonEnd); const workflow = JSON.parse(jsonText); console.log('\nšŸ“Š AI WORKFLOW:'); console.log(`Content type: ${workflow.contentType}`); console.log(`Strategy: ${workflow.strategy}`); console.log(`Translation approach: ${workflow.translationApproach}`); console.log(`Timing strategy: ${workflow.timingStrategy}`); console.log(`Quality focus: ${workflow.qualityFocus}`); console.log('\nšŸ“ RECOMMENDATIONS:'); workflow.recommendations.forEach((rec, index) => { console.log(`${index + 1}. ${rec}`); }); console.log('\nāœ… SUCCESS: AI workflow generation working correctly!'); } else { console.log('āŒ Error: No workflow content in response'); } // Test AI context optimized chunks console.log('\n🧠 Testing AI context optimized chunks...'); const optimizedResult = await server.handleGetAIContextOptimizedChunks({ content: srtContent, maxContextSize: 50000, maxChunkSize: 8 }); if (optimizedResult.content && optimizedResult.content[0]) { // Extract JSON from the response const responseText = optimizedResult.content[0].text; const jsonStart = responseText.indexOf('{'); const jsonEnd = responseText.lastIndexOf('}') + 1; const jsonText = responseText.substring(jsonStart, jsonEnd); const optimized = JSON.parse(jsonText); console.log('\nšŸ“Š OPTIMIZATION SUMMARY:'); console.log(`Total chunks: ${optimized.optimizationSummary.totalChunks}`); console.log(`Total context size: ${optimized.optimizationSummary.totalContextSize.toLocaleString()} characters`); console.log(`Average context size: ${optimized.optimizationSummary.avgContextSize.toLocaleString()} characters`); console.log(`Context efficiency: ${optimized.optimizationSummary.contextEfficiency}`); console.log(`Chunks under limit: ${optimized.optimizationSummary.chunksUnderLimit}`); console.log(`Chunks over limit: ${optimized.optimizationSummary.chunksOverLimit}`); console.log('\nšŸ“ PROCESSING RECOMMENDATIONS:'); optimized.aiRecommendations.processingOrder.slice(0, 3).forEach((chunk, index) => { console.log(`${index + 1}. Chunk ${chunk.id}: ${chunk.priority} priority, ${chunk.contextSize} chars - ${chunk.reason}`); }); console.log('\nāœ… SUCCESS: AI context optimization working correctly!'); } else { console.log('āŒ Error: No optimization content in response'); } // Test translate_srt (structured data for AI translation) console.log('\nšŸ“Š Testing translate_srt (structured data for AI)...'); const translateResult = await server.handleTranslateSRT({ content: srtContent, targetLanguage: 'es', sourceLanguage: 'en' }); if (translateResult.content && translateResult.content[0]) { // Extract JSON from the response const responseText = translateResult.content[0].text; const jsonStart = responseText.indexOf('{'); const jsonEnd = responseText.lastIndexOf('}') + 1; const jsonText = responseText.substring(jsonStart, jsonEnd); const translationData = JSON.parse(jsonText); console.log('\nšŸ“Š TRANSLATION DATA:'); console.log(`Translation request: ${translationData.translationRequest.targetLanguage} from ${translationData.translationRequest.sourceLanguage}`); console.log(`File structure: ${translationData.fileStructure.totalSubtitles} subtitles, ${translationData.fileStructure.totalChunks} chunks`); console.log(`Total duration: ${(translationData.fileStructure.totalDuration / 1000).toFixed(2)} seconds`); console.log(`Has errors: ${translationData.fileStructure.hasErrors}`); console.log('\nšŸ“ CHUNKS FOR TRANSLATION:'); translationData.chunks.slice(0, 2).forEach((chunk, index) => { console.log(`\nChunk ${index + 1}:`); console.log(` ID: ${chunk.id}`); console.log(` Subtitles: ${chunk.subtitleCount}`); console.log(` Speaker: ${chunk.speaker || 'none'}`); console.log(` Language: ${chunk.languageInfo.primary}`); console.log(` Content type: ${chunk.contentType}`); console.log(` Complexity: ${chunk.complexity}`); console.log(` Priority: ${chunk.translationPriority}`); console.log(` Subtitles in chunk: ${chunk.subtitles.length}`); }); console.log('\nāœ… SUCCESS: Translation structured data working correctly!'); } else { console.log('āŒ Error: No translation content in response'); } } catch (error) { console.error('āŒ Test failed:', error.message); console.error('Stack trace:', error.stack); } } // Run the test testAIWorkflow().catch(console.error);

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/omd0/srt-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server