Skip to main content
Glama

SRT Translation MCP Server

by omd0
example-multiple-conversations.js4.04 kB
/** * Example demonstrating the new chunked response system with multiple conversations * This shows how the conversation detector splits different conversations into separate chunks */ import { detectConversations, getNextChunk } from './dist/chunking/conversation-detector.js'; import { parseSRTFile } from './dist/parsers/srt-parser.js'; // Example usage with multiple conversations async function demonstrateMultipleConversations() { try { // Parse an SRT file with multiple conversations const srtContent = `1 00:00:01,000 --> 00:00:03,000 Speaker1: Hello, welcome to our conversation. 2 00:00:04,000 --> 00:00:06,000 Speaker2: Thank you for having me here. 3 00:00:07,000 --> 00:00:10,000 Speaker1: Let's discuss the main topic today. 4 00:00:15,000 --> 00:00:18,000 Speaker3: Hi everyone, I just joined the meeting. 5 00:00:19,000 --> 00:00:22,000 Speaker1: Welcome Speaker3, glad you could make it. 6 00:00:23,000 --> 00:00:26,000 Speaker3: Thanks, what did I miss? 7 00:00:30,000 --> 00:00:33,000 Speaker4: Hello, this is a completely different conversation. 8 00:00:34,000 --> 00:00:37,000 Speaker5: Yes, we're discussing something else entirely. 9 00:00:38,000 --> 00:00:41,000 Speaker4: Let's talk about the project timeline.`; const parseResult = parseSRTFile(srtContent); if (!parseResult.success) { throw new Error('Failed to parse SRT content'); } const subtitles = parseResult.file.subtitles; console.log('=== MULTIPLE CONVERSATIONS CHUNKED RESPONSE ===\n'); // Get first chunk with instructions const firstChunk = detectConversations(subtitles)[0]; console.log('FIRST CHUNK RESPONSE:'); console.log('===================='); console.log(`Chunk ID: ${firstChunk.id}`); console.log(`Current Chunk: ${firstChunk.context?.currentChunk} of ${firstChunk.context?.totalChunks}`); console.log(`Has More Chunks: ${firstChunk.context?.hasMoreChunks}`); console.log(`Instruction: ${firstChunk.context?.instructionText}`); console.log('\nSubtitles in this chunk:'); firstChunk.subtitles.forEach((sub, index) => { console.log(`${index + 1}. ${sub.text}`); }); console.log('\n' + '='.repeat(50) + '\n'); // Get second chunk if available if (firstChunk.context?.hasMoreChunks) { const secondChunk = getNextChunk(2, subtitles); if (secondChunk) { console.log('SECOND CHUNK RESPONSE:'); console.log('====================='); console.log(`Chunk ID: ${secondChunk.id}`); console.log(`Current Chunk: ${secondChunk.context?.currentChunk} of ${secondChunk.context?.totalChunks}`); console.log(`Has More Chunks: ${secondChunk.context?.hasMoreChunks}`); console.log(`Instruction: ${secondChunk.context?.instructionText}`); console.log('\nSubtitles in this chunk:'); secondChunk.subtitles.forEach((sub, index) => { console.log(`${index + 1}. ${sub.text}`); }); console.log('\n' + '='.repeat(50) + '\n'); // Get third chunk if available if (secondChunk.context?.hasMoreChunks) { const thirdChunk = getNextChunk(3, subtitles); if (thirdChunk) { console.log('THIRD CHUNK RESPONSE:'); console.log('===================='); console.log(`Chunk ID: ${thirdChunk.id}`); console.log(`Current Chunk: ${thirdChunk.context?.currentChunk} of ${thirdChunk.context?.totalChunks}`); console.log(`Has More Chunks: ${thirdChunk.context?.hasMoreChunks}`); console.log(`Instruction: ${thirdChunk.context?.instructionText}`); console.log('\nSubtitles in this chunk:'); thirdChunk.subtitles.forEach((sub, index) => { console.log(`${index + 1}. ${sub.text}`); }); } } } } } catch (error) { console.error('Error:', error.message); } } // Run the demonstration demonstrateMultipleConversations();

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/omd0/srt-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server