Skip to main content
Glama

SRT Translation MCP Server

by omd0
process-complete-workflow.js•3.6 kB
#!/usr/bin/env node import fs from 'fs'; console.log('šŸŽ¬ COMPLETE SRT TRANSLATION WORKFLOW'); console.log('====================================='); // Read the complete Example.srt file console.log('šŸ“– Reading complete Example.srt file...'); const srtContent = fs.readFileSync('Example.srt', 'utf8'); // Count subtitles const subtitleCount = (srtContent.match(/^\d+$/gm) || []).length; console.log(`šŸ“Š Total subtitles in file: ${subtitleCount}`); // Split into chunks for MCP processing (500 subtitles each) const lines = srtContent.split('\n'); const chunks = []; let currentChunk = []; let subtitleCountInChunk = 0; let chunkIndex = 0; for (let i = 0; i < lines.length; i++) { const line = lines[i]; currentChunk.push(line); // Check if this is a subtitle number if (/^\d+$/.test(line.trim()) && i < lines.length - 1) { subtitleCountInChunk++; // Create a chunk every 500 subtitles if (subtitleCountInChunk % 500 === 0) { chunks.push({ index: chunkIndex, content: currentChunk.join('\n'), subtitleCount: subtitleCountInChunk, startSubtitle: subtitleCountInChunk - 499, endSubtitle: subtitleCountInChunk }); currentChunk = []; chunkIndex++; } } } // Add the last chunk if it has content if (currentChunk.length > 0) { chunks.push({ index: chunkIndex, content: currentChunk.join('\n'), subtitleCount: subtitleCountInChunk, startSubtitle: subtitleCountInChunk - currentChunk.filter(l => /^\d+$/.test(l.trim())).length + 1, endSubtitle: subtitleCountInChunk }); } console.log(`\nšŸ“¦ Created ${chunks.length} chunks for processing:`); chunks.forEach((chunk, index) => { console.log(` Chunk ${index + 1}: Subtitles ${chunk.startSubtitle}-${chunk.endSubtitle} (${chunk.subtitleCount} total)`); }); // Save all chunks chunks.forEach((chunk, index) => { const filename = `chunk-${index}-${chunk.startSubtitle}-${chunk.endSubtitle}.srt`; fs.writeFileSync(filename, chunk.content); console.log(`šŸ’¾ Saved ${filename}`); }); console.log(`\nšŸŽÆ ESTIMATED CONVERSATION CHUNKS:`); console.log(` • Each 500-subtitle chunk will create ~12 conversation chunks`); console.log(` • Total estimated conversation chunks: ~${chunks.length * 12}`); console.log(` • Average subtitles per conversation chunk: ~${Math.round(subtitleCount / (chunks.length * 12))}`); console.log(`\nšŸš€ READY FOR MCP PROCESSING!`); console.log(` Use these commands to process each chunk:`); console.log(` `); chunks.forEach((chunk, index) => { const filename = `chunk-${index}-${chunk.startSubtitle}-${chunk.endSubtitle}.srt`; console.log(` # Process chunk ${index + 1}/${chunks.length}:`); console.log(` mcp_srt-mcp_detect_conversations content="$(cat ${filename})" storeInMemory=true createTodos=true`); console.log(` `); }); console.log(`\nšŸ“ˆ COMPLETE WORKFLOW SUMMARY:`); console.log(` • Source file: Example.srt (${subtitleCount} subtitles)`); console.log(` • Processing chunks: ${chunks.length} files`); console.log(` • Estimated conversation chunks: ~${chunks.length * 12}`); console.log(` • Total TODOs to be created: ~${chunks.length * 12}`); console.log(` • Translation target: Spanish`); console.log(` • Method: AI Assistant (Chat-based)`); console.log(`\n✨ The system is designed for QUALITY over QUANTITY!`); console.log(` Larger chunks = Better context = Better translations!`);

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/omd0/srt-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server