Skip to main content
Glama

SRT Translation MCP Server

by omd0
show-detailed-chunk.js6.8 kB
#!/usr/bin/env node /** * Show detailed view of a specific conversation chunk */ import fs from 'fs'; import { parseSRTFile } from './dist/parsers/srt-parser.js'; import { detectConversations } from './dist/chunking/conversation-detector.js'; class DetailedChunkDisplay { constructor() { this.inputFile = '/home/omd/Documents/Mix/SRT-MCP/Example.srt'; } async showDetailedChunk(chunkIndex = 0) { try { console.log(`Loading and analyzing ${this.inputFile}...`); // Read and parse the SRT file const content = fs.readFileSync(this.inputFile, 'utf8'); const parseResult = parseSRTFile(content); if (!parseResult.success || !parseResult.file) { throw new Error('Failed to parse SRT file'); } // Detect conversation chunks const chunks = detectConversations(parseResult.file.subtitles); if (chunkIndex >= chunks.length) { console.log(`Chunk index ${chunkIndex} is out of range. Total chunks: ${chunks.length}`); return; } const chunk = chunks[chunkIndex]; console.log(`\n=== DETAILED VIEW: CHUNK ${chunkIndex + 1} ===`); console.log(`ID: ${chunk.id}`); console.log(`Range: ${chunk.startIndex} - ${chunk.endIndex} (${chunk.subtitles.length} entries)`); console.log(`Speaker: ${chunk.context?.speaker || 'Unknown'}`); console.log(`Conversation ID: ${chunk.context?.conversationId || 'N/A'}`); console.log(`Previous Context: ${chunk.context?.previousContext || 'N/A'}`); console.log(`Next Context: ${chunk.context?.nextContext || 'N/A'}`); console.log(`\n=== ALL SUBTITLES IN CHUNK ===`); chunk.subtitles.forEach((subtitle, index) => { console.log(`\n${index + 1}. Entry ${subtitle.index}:`); console.log(` Time: ${this.formatTime(subtitle.startTime)} --> ${this.formatTime(subtitle.endTime)}`); console.log(` Text: ${subtitle.text}`); // Analyze content type const analysis = this.analyzeSubtitle(subtitle.text); if (analysis.length > 0) { console.log(` Analysis: ${analysis.join(', ')}`); } }); // Show conversation flow analysis this.analyzeConversationFlow(chunk); } catch (error) { console.error('Error displaying detailed chunk:', error); throw error; } } formatTime(time) { return `${time.hours.toString().padStart(2, '0')}:${time.minutes.toString().padStart(2, '0')}:${time.seconds.toString().padStart(2, '0')},${time.milliseconds.toString().padStart(3, '0')}`; } analyzeSubtitle(text) { const analysis = []; if (text.includes('?')) analysis.push('Question'); if (text.includes('!')) analysis.push('Exclamation'); if (text.includes('<i>')) analysis.push('Narration'); if (text.includes('<b>')) analysis.push('Bold'); if (text.includes('{\\an')) analysis.push('Positioned'); if (text.includes(':')) analysis.push('Speaker'); if (text.includes('.')) analysis.push('Statement'); return analysis; } analyzeConversationFlow(chunk) { console.log(`\n=== CONVERSATION FLOW ANALYSIS ===`); let questions = 0; let exclamations = 0; let narration = 0; let statements = 0; let speakerChanges = 0; chunk.subtitles.forEach((subtitle, index) => { if (subtitle.text.includes('?')) questions++; if (subtitle.text.includes('!')) exclamations++; if (subtitle.text.includes('<i>')) narration++; if (subtitle.text.includes('.')) statements++; // Check for speaker changes if (index > 0) { const prevText = chunk.subtitles[index - 1].text; if (this.detectSpeakerChange(subtitle.text, prevText)) { speakerChanges++; } } }); console.log(`Questions: ${questions}`); console.log(`Exclamations: ${exclamations}`); console.log(`Narration: ${narration}`); console.log(`Statements: ${statements}`); console.log(`Speaker Changes: ${speakerChanges}`); // Calculate timing gaps if (chunk.subtitles.length > 1) { const gaps = []; for (let i = 0; i < chunk.subtitles.length - 1; i++) { const current = chunk.subtitles[i]; const next = chunk.subtitles[i + 1]; const gap = this.calculateTimeGap(current, next); gaps.push(gap); } const avgGap = gaps.reduce((a, b) => a + b, 0) / gaps.length; console.log(`Average Time Gap: ${avgGap.toFixed(0)}ms`); console.log(`Min Gap: ${Math.min(...gaps)}ms`); console.log(`Max Gap: ${Math.max(...gaps)}ms`); } } detectSpeakerChange(currentText, previousText) { const currentSpeaker = this.extractSpeaker(currentText); const previousSpeaker = this.extractSpeaker(previousText); return currentSpeaker && previousSpeaker && currentSpeaker !== previousSpeaker; } extractSpeaker(text) { const speakerPatterns = [ /^([A-Z][a-z]+):\s*(.+)$/, /^([A-Z][A-Z\s]+):\s*(.+)$/, /^([A-Z][a-z]+)\s*-\s*(.+)$/, /<b>Speaker (\d+):<\/b>/i ]; for (const pattern of speakerPatterns) { const match = text.match(pattern); if (match) { return match[1].trim(); } } return null; } calculateTimeGap(current, next) { const currentEndMs = (current.endTime.hours * 3600 + current.endTime.minutes * 60 + current.endTime.seconds) * 1000 + current.endTime.milliseconds; const nextStartMs = (next.startTime.hours * 3600 + next.startTime.minutes * 60 + next.startTime.seconds) * 1000 + next.startTime.milliseconds; return nextStartMs - currentEndMs; } } // Main execution async function main() { const display = new DetailedChunkDisplay(); const chunkIndex = process.argv[2] ? parseInt(process.argv[2]) : 0; await display.showDetailedChunk(chunkIndex); } if (import.meta.url === `file://${process.argv[1]}`) { main().catch(console.error); } export default DetailedChunkDisplay;

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/omd0/srt-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server