process-complete-workflow.jsā¢3.6 kB
#!/usr/bin/env node
import fs from 'fs';
console.log('š¬ COMPLETE SRT TRANSLATION WORKFLOW');
console.log('=====================================');
// Read the complete Example.srt file
console.log('š Reading complete Example.srt file...');
const srtContent = fs.readFileSync('Example.srt', 'utf8');
// Count subtitles
const subtitleCount = (srtContent.match(/^\d+$/gm) || []).length;
console.log(`š Total subtitles in file: ${subtitleCount}`);
// Split into chunks for MCP processing (500 subtitles each)
const lines = srtContent.split('\n');
const chunks = [];
let currentChunk = [];
let subtitleCountInChunk = 0;
let chunkIndex = 0;
for (let i = 0; i < lines.length; i++) {
const line = lines[i];
currentChunk.push(line);
// Check if this is a subtitle number
if (/^\d+$/.test(line.trim()) && i < lines.length - 1) {
subtitleCountInChunk++;
// Create a chunk every 500 subtitles
if (subtitleCountInChunk % 500 === 0) {
chunks.push({
index: chunkIndex,
content: currentChunk.join('\n'),
subtitleCount: subtitleCountInChunk,
startSubtitle: subtitleCountInChunk - 499,
endSubtitle: subtitleCountInChunk
});
currentChunk = [];
chunkIndex++;
}
}
}
// Add the last chunk if it has content
if (currentChunk.length > 0) {
chunks.push({
index: chunkIndex,
content: currentChunk.join('\n'),
subtitleCount: subtitleCountInChunk,
startSubtitle: subtitleCountInChunk - currentChunk.filter(l => /^\d+$/.test(l.trim())).length + 1,
endSubtitle: subtitleCountInChunk
});
}
console.log(`\nš¦ Created ${chunks.length} chunks for processing:`);
chunks.forEach((chunk, index) => {
console.log(` Chunk ${index + 1}: Subtitles ${chunk.startSubtitle}-${chunk.endSubtitle} (${chunk.subtitleCount} total)`);
});
// Save all chunks
chunks.forEach((chunk, index) => {
const filename = `chunk-${index}-${chunk.startSubtitle}-${chunk.endSubtitle}.srt`;
fs.writeFileSync(filename, chunk.content);
console.log(`š¾ Saved ${filename}`);
});
console.log(`\nšÆ ESTIMATED CONVERSATION CHUNKS:`);
console.log(` ⢠Each 500-subtitle chunk will create ~12 conversation chunks`);
console.log(` ⢠Total estimated conversation chunks: ~${chunks.length * 12}`);
console.log(` ⢠Average subtitles per conversation chunk: ~${Math.round(subtitleCount / (chunks.length * 12))}`);
console.log(`\nš READY FOR MCP PROCESSING!`);
console.log(` Use these commands to process each chunk:`);
console.log(` `);
chunks.forEach((chunk, index) => {
const filename = `chunk-${index}-${chunk.startSubtitle}-${chunk.endSubtitle}.srt`;
console.log(` # Process chunk ${index + 1}/${chunks.length}:`);
console.log(` mcp_srt-mcp_detect_conversations content="$(cat ${filename})" storeInMemory=true createTodos=true`);
console.log(` `);
});
console.log(`\nš COMPLETE WORKFLOW SUMMARY:`);
console.log(` ⢠Source file: Example.srt (${subtitleCount} subtitles)`);
console.log(` ⢠Processing chunks: ${chunks.length} files`);
console.log(` ⢠Estimated conversation chunks: ~${chunks.length * 12}`);
console.log(` ⢠Total TODOs to be created: ~${chunks.length * 12}`);
console.log(` ⢠Translation target: Spanish`);
console.log(` ⢠Method: AI Assistant (Chat-based)`);
console.log(`\n⨠The system is designed for QUALITY over QUANTITY!`);
console.log(` Larger chunks = Better context = Better translations!`);