Skip to main content
Glama

SRT Translation MCP Server

by omd0
process-full-file.js3.9 kB
#!/usr/bin/env node const fs = require('fs'); const { spawn } = require('child_process'); // Read the full SRT file const srtContent = fs.readFileSync('Example.srt', 'utf8'); // Split into smaller chunks to avoid parsing errors const lines = srtContent.split('\n'); const chunks = []; let currentChunk = []; let subtitleCount = 0; for (let i = 0; i < lines.length; i++) { const line = lines[i]; currentChunk.push(line); // Check if this is a subtitle number (starts with digit and ends with nothing) if (/^\d+$/.test(line.trim()) && i < lines.length - 1) { subtitleCount++; // Process every 100 subtitles as a chunk if (subtitleCount % 100 === 0) { chunks.push(currentChunk.join('\n')); currentChunk = []; } } } // Add the last chunk if it has content if (currentChunk.length > 0) { chunks.push(currentChunk.join('\n')); } console.log(`Total subtitles found: ${subtitleCount}`); console.log(`Created ${chunks.length} chunks for processing`); // Process each chunk let totalChunks = 0; let processedChunks = 0; async function processChunk(chunkIndex, chunkContent) { return new Promise((resolve, reject) => { const child = spawn('node', ['-e', ` const { spawn } = require('child_process'); // Use MCP tools to process this chunk const mcpProcess = spawn('node', ['dist/index.js'], { stdio: ['pipe', 'pipe', 'pipe'] }); let output = ''; let error = ''; mcpProcess.stdout.on('data', (data) => { output += data.toString(); }); mcpProcess.stderr.on('data', (data) => { error += data.toString(); }); mcpProcess.on('close', (code) => { if (code === 0) { console.log('Chunk ' + ${chunkIndex} + ' processed successfully'); resolve(output); } else { console.error('Chunk ' + ${chunkIndex} + ' failed:', error); reject(new Error(error)); } }); // Send detect_conversations request const request = { jsonrpc: "2.0", id: ${chunkIndex}, method: "tools/call", params: { name: "detect_conversations", arguments: { content: \`${chunkContent.replace(/`/g, '\\`')}\`, storeInMemory: true, createTodos: false } } }; mcpProcess.stdin.write(JSON.stringify(request) + '\\n'); mcpProcess.stdin.end(); `], { stdio: 'inherit' }); child.on('close', (code) => { if (code === 0) { processedChunks++; console.log(`Processed chunk ${chunkIndex + 1}/${chunks.length}`); resolve(); } else { reject(new Error(`Chunk ${chunkIndex + 1} failed`)); } }); }); } // Process all chunks async function processAllChunks() { console.log('Starting to process all chunks...'); for (let i = 0; i < chunks.length; i++) { try { await processChunk(i, chunks[i]); // Add a small delay to avoid overwhelming the system await new Promise(resolve => setTimeout(resolve, 100)); } catch (error) { console.error(`Error processing chunk ${i + 1}:`, error.message); } } console.log(`Processing complete! Processed ${processedChunks}/${chunks.length} chunks`); } processAllChunks().catch(console.error);

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/omd0/srt-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server