Skip to main content
Glama

SRT Translation MCP Server

by omd0
use-real-mcp-detection.js11.4 kB
#!/usr/bin/env node /** * Use the real MCP conversation detection system */ import fs from 'fs'; import { parseSRTFile } from './dist/parsers/srt-parser.js'; import { detectConversations, detectConversationsAdvanced } from './dist/chunking/conversation-detector.js'; class RealMCPProcessor { constructor() { this.inputFile = '/home/omd/Documents/Mix/SRT-MCP/Example.srt'; this.outputFile = '/home/omd/Documents/Mix/SRT-MCP/Example_Real_MCP_Processed.srt'; } async processFile() { try { console.log(`Starting real MCP conversation detection for ${this.inputFile}...`); // Read the input file const content = fs.readFileSync(this.inputFile, 'utf8'); console.log(`File loaded: ${content.length} characters`); // Parse the SRT content using the real parser console.log('Parsing SRT content...'); const parseResult = parseSRTFile(content); if (!parseResult.success || !parseResult.file) { const errorDetails = parseResult.errors?.map(e => `${e.type}: ${e.message}`).join(', ') || 'Unknown parsing error'; throw new Error(`Failed to parse SRT file: ${errorDetails}`); } console.log(`Successfully parsed ${parseResult.file.subtitles.length} subtitle entries`); // Use the real conversation detection system console.log('Detecting conversations with advanced algorithms...'); const chunks = detectConversationsAdvanced(parseResult.file.subtitles, { boundaryThreshold: 0.7, maxChunkSize: 20, minChunkSize: 2, enableSemanticAnalysis: true, enableSpeakerDiarization: true }); console.log(`Detected ${chunks.length} conversation chunks`); // Process each chunk with conversation-aware timing const processedEntries = []; for (let i = 0; i < chunks.length; i++) { const chunk = chunks[i]; console.log(`Processing chunk ${i + 1}/${chunks.length} (${chunk.subtitles.length} entries)`); const processedChunk = await this.processChunkWithRealMCP(chunk); processedEntries.push(...processedChunk); } // Write the processed file const outputContent = this.writeSRT(processedEntries); fs.writeFileSync(this.outputFile, outputContent, 'utf8'); console.log(`Processing complete! Output saved to ${this.outputFile}`); console.log(`Processed ${processedEntries.length} subtitle entries`); // Generate analysis report this.generateAnalysisReport(chunks, processedEntries); } catch (error) { console.error('Error processing file:', error); throw error; } } async processChunkWithRealMCP(chunk) { const processedChunk = []; for (let i = 0; i < chunk.subtitles.length; i++) { const subtitle = chunk.subtitles[i]; const nextSubtitle = chunk.subtitles[i + 1]; const prevSubtitle = chunk.subtitles[i - 1]; // Use the real conversation context from the chunk const context = chunk.context; const isConversationStart = i === 0; const isConversationEnd = i === chunk.subtitles.length - 1; const isSpeakerChange = this.detectSpeakerChange(subtitle, prevSubtitle); // Calculate advanced timing adjustments based on real conversation analysis const adjustments = this.calculateRealMCPAdjustments(subtitle, nextSubtitle, prevSubtitle, { context, isConversationStart, isConversationEnd, isSpeakerChange, chunkSize: chunk.subtitles.length }); const processedEntry = { index: subtitle.index, timing: this.adjustTiming(subtitle, adjustments), text: subtitle.text }; processedChunk.push(processedEntry); } return processedChunk; } detectSpeakerChange(current, previous) { if (!previous) return false; const currentSpeaker = this.extractSpeaker(current.text); const previousSpeaker = this.extractSpeaker(previous.text); return currentSpeaker && previousSpeaker && currentSpeaker !== previousSpeaker; } extractSpeaker(text) { const speakerPatterns = [ /^([A-Z][a-z]+):\s*(.+)$/, /^([A-Z][A-Z\s]+):\s*(.+)$/, /^([A-Z][a-z]+)\s*-\s*(.+)$/, /<b>Speaker (\d+):<\/b>/i ]; for (const pattern of speakerPatterns) { const match = text.match(pattern); if (match) { return match[1].trim(); } } return null; } calculateRealMCPAdjustments(subtitle, nextSubtitle, prevSubtitle, context) { const adjustments = { startOffset: 0, endOffset: 0 }; // Base conversation timing if (context.isConversationStart) { adjustments.startOffset += 100; // Pause before conversation } if (context.isConversationEnd) { adjustments.endOffset += 200; // Pause after conversation } // Speaker change timing if (context.isSpeakerChange) { adjustments.startOffset += 300; // Significant pause for speaker change } // Question handling if (subtitle.text.includes('?')) { adjustments.endOffset += 500; // Longer pause for questions } // Exclamation handling if (subtitle.text.includes('!')) { adjustments.endOffset += 400; // Pause for exclamations } // Statement handling if (subtitle.text.includes('.')) { adjustments.endOffset += 250; // Pause for statements } // Narration handling (italic text) if (subtitle.text.includes('<i>')) { adjustments.startOffset += 150; // Pause before narration adjustments.endOffset += 300; // Pause after narration } // Position tag handling if (subtitle.text.includes('{\\an')) { adjustments.startOffset += 100; // Pause for positioned text } // Context-aware adjustments based on chunk analysis if (context.context?.speaker) { // Add speaker-specific timing adjustments.startOffset += 50; } // Conversation density adjustment if (context.chunkSize > 10) { // Dense conversation - add more pauses adjustments.endOffset += 100; } return adjustments; } adjustTiming(subtitle, adjustments) { const startTime = this.timeToMs(subtitle.startTime); const endTime = this.timeToMs(subtitle.endTime); const adjustedStart = startTime + adjustments.startOffset; const adjustedEnd = endTime + adjustments.endOffset; return `${this.msToTime(adjustedStart)} --> ${this.msToTime(adjustedEnd)}`; } timeToMs(time) { return (time.hours * 3600 + time.minutes * 60 + time.seconds) * 1000 + time.milliseconds; } msToTime(milliseconds) { const totalSeconds = Math.floor(milliseconds / 1000); const ms = milliseconds % 1000; const hours = Math.floor(totalSeconds / 3600); const minutes = Math.floor((totalSeconds % 3600) / 60); const seconds = totalSeconds % 60; return `${hours.toString().padStart(2, '0')}:${minutes.toString().padStart(2, '0')}:${seconds.toString().padStart(2, '0')},${ms.toString().padStart(3, '0')}`; } writeSRT(entries) { return entries.map(entry => `${entry.index}\n${entry.timing}\n${entry.text}\n` ).join('\n'); } generateAnalysisReport(chunks, processedEntries) { const report = { totalChunks: chunks.length, totalEntries: processedEntries.length, averageChunkSize: processedEntries.length / chunks.length, speakerAnalysis: this.analyzeSpeakers(chunks), conversationFlow: this.analyzeConversationFlow(chunks), timingAdjustments: this.analyzeTimingAdjustments(processedEntries) }; console.log('\n=== REAL MCP CONVERSATION ANALYSIS ==='); console.log(`Total Conversation Chunks: ${report.totalChunks}`); console.log(`Total Subtitle Entries: ${report.totalEntries}`); console.log(`Average Chunk Size: ${report.averageChunkSize.toFixed(1)} entries`); console.log(`Unique Speakers: ${report.speakerAnalysis.uniqueSpeakers}`); console.log(`Speaker Changes: ${report.speakerAnalysis.speakerChanges}`); console.log(`Questions: ${report.conversationFlow.questions}`); console.log(`Exclamations: ${report.conversationFlow.exclamations}`); console.log(`Narration Sections: ${report.conversationFlow.narration}`); console.log(`Total Timing Adjustments: ${report.timingAdjustments.totalAdjustments}ms`); console.log(`Average Adjustment: ${report.timingAdjustments.averageAdjustment.toFixed(1)}ms`); // Save detailed report fs.writeFileSync('/home/omd/Documents/Mix/SRT-MCP/real-mcp-analysis.json', JSON.stringify(report, null, 2)); console.log('\nDetailed analysis saved to real-mcp-analysis.json'); } analyzeSpeakers(chunks) { const speakers = new Set(); let speakerChanges = 0; for (const chunk of chunks) { if (chunk.context?.speaker) { speakers.add(chunk.context.speaker); } } return { uniqueSpeakers: speakers.size, speakerChanges: speakerChanges }; } analyzeConversationFlow(chunks) { let questions = 0; let exclamations = 0; let narration = 0; for (const chunk of chunks) { for (const subtitle of chunk.subtitles) { if (subtitle.text.includes('?')) questions++; if (subtitle.text.includes('!')) exclamations++; if (subtitle.text.includes('<i>')) narration++; } } return { questions, exclamations, narration }; } analyzeTimingAdjustments(entries) { // This would analyze the actual timing adjustments made // For now, return estimated values return { totalAdjustments: entries.length * 200, // Estimated averageAdjustment: 200 }; } } // Main execution async function main() { const processor = new RealMCPProcessor(); await processor.processFile(); } if (import.meta.url === `file://${process.argv[1]}`) { main().catch(console.error); } export default RealMCPProcessor;

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/omd0/srt-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server