Skip to main content
Glama

SRT Translation MCP Server

by omd0
test-simple-metadata.js•3.84 kB
#!/usr/bin/env node /** * Simple test for conversation detection and metadata analysis */ import { SRTTranslationMCPServer } from './dist/mcp/server.js'; import fs from 'fs'; async function testSimpleMetadata() { console.log('Testing conversation detection and metadata analysis...\n'); try { // Read example SRT file const srtContent = fs.readFileSync('/home/omd/Documents/Mix/SRT-MCP/example.srt', 'utf8'); console.log('šŸ“„ Loaded SRT file with', srtContent.split('\n\n').length - 1, 'subtitle blocks\n'); // Create server instance const server = new SRTTranslationMCPServer(); // Test conversation detection console.log('šŸ” Testing conversation detection...'); const conversationsResult = await server.handleDetectConversations({ content: srtContent }); if (conversationsResult.content && conversationsResult.content[0]) { // Extract JSON from the response (skip the emoji prefix) const responseText = conversationsResult.content[0].text; const jsonStart = responseText.indexOf('{'); const jsonEnd = responseText.lastIndexOf('}') + 1; const jsonText = responseText.substring(jsonStart, jsonEnd); const metadata = JSON.parse(jsonText); console.log('\nšŸ“Š METADATA SUMMARY:'); console.log(`Total chunks: ${metadata.chunkCount}`); console.log(`Total duration: ${(metadata.totalDuration / 1000).toFixed(2)} seconds`); console.log(`Language distribution:`, metadata.languageDistribution); console.log(`Speaker distribution:`, metadata.speakerDistribution); console.log('\nšŸ“ CHUNK DETAILS:'); metadata.chunks.forEach((chunk, index) => { console.log(`\nChunk ${index + 1}:`); console.log(` ID: ${chunk.id}`); console.log(` Time: ${chunk.startTime} - ${chunk.endTime} (${(chunk.duration / 1000).toFixed(2)}s)`); console.log(` Subtitles: ${chunk.subtitleCount}`); console.log(` Speaker: ${chunk.speaker || 'none'}`); console.log(` Language: ${chunk.languageInfo.primary} (confidence: ${chunk.languageInfo.confidence})`); console.log(` Content type: ${chunk.contentType}`); console.log(` Complexity: ${chunk.complexity}`); console.log(` Priority: ${chunk.translationPriority}`); console.log(` Topic keywords: ${chunk.topicKeywords.join(', ')}`); }); console.log('\nāœ… SUCCESS: Conversation detection working correctly!'); } else { console.log('āŒ Error: No content in response'); } // Test content analysis console.log('\nšŸ” Testing content analysis...'); const analysisResult = await server.handleAnalyzeContentForAI({ content: srtContent }); if (analysisResult.content && analysisResult.content[0]) { const analysis = JSON.parse(analysisResult.content[0].text); console.log('\nšŸ“Š CONTENT ANALYSIS:'); console.log(`Total subtitles: ${analysis.totalSubtitles}`); console.log(`Questions: ${analysis.totalQuestions}`); console.log(`Exclamations: ${analysis.totalExclamations}`); console.log(`Narration: ${analysis.totalNarration}`); console.log(`Dialogue: ${analysis.totalDialogue}`); console.log(`Speaker changes: ${analysis.speakerChanges}`); console.log(`Average subtitle length: ${analysis.averageSubtitleLength.toFixed(1)} characters`); console.log(`Average gap: ${analysis.averageGap.toFixed(0)}ms`); console.log('\nāœ… SUCCESS: Content analysis working correctly!'); } else { console.log('āŒ Error: No analysis content in response'); } } catch (error) { console.error('āŒ Test failed:', error.message); console.error('Stack trace:', error.stack); } } // Run the test testSimpleMetadata().catch(console.error);

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/omd0/srt-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server