Skip to main content
Glama
video-production-tools.js6.24 kB
// Video Production Tools for Complete Pipeline // Handles ElevenLabs voice generation and video assembly import { ProductionOrchestrator } from '../integrations/production-orchestrator.js'; const orchestrator = new ProductionOrchestrator(); export function getVideoProductionToolDefinitions() { return [ { name: "generate_scene_audio", description: "Generate audio for all dialogue scenes using ElevenLabs voice synthesis. Creates synchronized audio files for each scene.", inputSchema: { type: "object", properties: { character_name: { type: "string", description: "Name of the character" }, breakdown_path: { type: "string", description: "Path to the scene breakdown JSON file" }, voice_id: { type: "string", description: "ElevenLabs voice ID for the character" } }, required: ["character_name", "breakdown_path", "voice_id"] } }, { name: "create_video_production_manifest", description: "Create final video production manifest combining scene images and audio files. Generates instructions for video rendering with proper timing and transitions.", inputSchema: { type: "object", properties: { character_name: { type: "string", description: "Name of the character" }, images_path: { type: "string", description: "Path to the scene images JSON file" }, audio_path: { type: "string", description: "Path to the audio files JSON (optional)" } }, required: ["character_name", "images_path"] } } ]; } export function getVideoProductionToolHandlers() { return { generate_scene_audio: { handler: async (args) => { try { const result = await orchestrator.generateScriptAudio( args.breakdown_path, args.voice_id, args.character_name ); if (result.success) { return { success: true, audio_files: result.audio_files, audio_metadata_path: result.audio_metadata_path, total_audio_files: result.total_audio_files, message: `✅ Generated audio for ${result.total_audio_files} scenes! Each dialogue line now has synchronized voice audio ready for lip sync video creation.`, next_steps: [ "Use create_video_production_manifest to combine with scene images", "Export video using FFmpeg or video editing software" ] }; } else { return { success: false, error: result.error, integration_note: result.integration_needed, message: `❌ Audio generation failed: ${result.error}` }; } } catch (error) { return { success: false, error: error.message, message: `❌ Scene audio generation failed: ${error.message}` }; } }}, create_video_production_manifest: { handler: async (args) => { try { // Load scene images const fs = await import('fs/promises'); const sceneImages = JSON.parse(await fs.readFile(args.images_path, 'utf-8')); // Load audio files if provided let audioFiles = []; if (args.audio_path) { try { audioFiles = JSON.parse(await fs.readFile(args.audio_path, 'utf-8')); } catch (error) { console.log('No audio files found, creating silent video manifest'); } } const result = await orchestrator.createVideoManifest( args.character_name, sceneImages, audioFiles ); if (result.success) { return { success: true, manifest_path: result.manifest_path, manifest: result.manifest, total_duration: result.total_duration, total_scenes: result.total_scenes, ffmpeg_command: result.ffmpeg_command, message: `✅ Video production manifest created! ${result.total_duration}s duration with ${result.total_scenes} scenes. Ready for final video rendering.`, next_steps: [ "Use FFmpeg with the generated command", "Import manifest into video editing software", "Create final lip-synced video production" ] }; } else { return { success: false, error: result.error, message: `❌ Video manifest creation failed: ${result.error}` }; } } catch (error) { return { success: false, error: error.message, message: `❌ Video production manifest failed: ${error.message}` }; } }} }; }

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/bermingham85/mcp-puppet-pipeline'

If you have feedback or need assistance with the MCP directory API, please join our Discord server