Skip to main content
Glama
production-tools.js16.3 kB
// Complete Production Pipeline Tools for MCP Server // Handles image-to-puppet conversion, script generation, and video production import { ProductionOrchestrator } from '../integrations/production-orchestrator.js'; const orchestrator = new ProductionOrchestrator(); export function getProductionToolDefinitions() { return [ { name: "process_character_image", description: "Upload and process a character image (like a giraffe) to convert it into puppet form with full analysis and Notion storage. Creates blueprint images ready for production.", inputSchema: { type: "object", properties: { image_path: { type: "string", description: "Path to the character image file to process" }, character_name: { type: "string", description: "Optional name for the character (will auto-detect if not provided)" } }, required: ["image_path"] } }, { name: "generate_production_script", description: "Generate a complete puppet show script using OpenAI agents conversation. Creates dialogue, emotions, and scene descriptions.", inputSchema: { type: "object", properties: { character_name: { type: "string", description: "Name of the character for the script" }, scenario: { type: "string", description: "The scenario or storyline for the puppet show" }, conversation_context: { type: "string", description: "Optional additional context from previous conversations" } }, required: ["character_name", "scenario"] } }, { name: "breakdown_script_to_scenes", description: "Break down a script into individual scene prompts with camera angles and emotions. Each scene becomes a visual prompt for image generation.", inputSchema: { type: "object", properties: { script_path: { type: "string", description: "Path to the script file to break down" }, character_name: { type: "string", description: "Name of the main character" } }, required: ["script_path", "character_name"] } }, { name: "generate_scene_images", description: "Generate image stills for each scene from the script breakdown. Creates all visual assets needed for video production.", inputSchema: { type: "object", properties: { breakdown_path: { type: "string", description: "Path to the scene breakdown JSON file" }, character_name: { type: "string", description: "Name of the character" } }, required: ["breakdown_path", "character_name"] } }, { name: "create_complete_production", description: "Run the complete automated production pipeline: process character image → generate script → breakdown scenes → create images. One command for full production.", inputSchema: { type: "object", properties: { image_path: { type: "string", description: "Path to the character image file (e.g., giraffe photo)" }, scenario: { type: "string", description: "The storyline/scenario for the puppet show" }, character_name: { type: "string", description: "Optional character name (will auto-detect if not provided)" } }, required: ["image_path", "scenario"] } }, { name: "generate_character_voice_profile", description: "Generate voice profile and description for ElevenLabs integration. Creates voice characteristics based on character traits.", inputSchema: { type: "object", properties: { character_name: { type: "string", description: "Name of the character" }, voice_description: { type: "string", description: "Description of desired voice characteristics" }, sample_text: { type: "string", description: "Optional sample text for voice testing" } }, required: ["character_name", "voice_description"] } } ]; } export function getProductionToolHandlers() { return { process_character_image: { handler: async (args) => { try { const result = await orchestrator.processCharacterImage( args.image_path, args.character_name ); if (result.success) { return { success: true, character_name: result.character_name, puppet_traits: result.puppet_traits, blueprint_images: result.blueprint_images, voice_profile: result.voice_profile, notion_page_id: result.notion_page_id, message: `✅ Character '${result.character_name}' processed successfully! Blueprint images created and stored in Notion. Ready for voice generation and scripting.`, next_steps: [ "Generate ElevenLabs voice using the voice_profile", "Create script using generate_production_script", "Use create_complete_production for full automation" ] }; } else { return { success: false, error: result.error, message: `❌ Failed to process character image: ${result.error}` }; } } catch (error) { return { success: false, error: error.message, message: `❌ Character processing failed: ${error.message}` }; } } }, generate_production_script: { handler: async (args) => { try { const result = await orchestrator.generateScript( args.character_name, args.scenario, args.conversation_context ); if (result.success) { return { success: true, script: result.script, script_path: result.script_path, scenes_count: result.scenes_count, message: `✅ Script generated for '${args.character_name}' with ${result.scenes_count} scenes! Saved to ${result.script_path}`, next_steps: [ "Use breakdown_script_to_scenes to create individual scene prompts", "Use generate_scene_images to create visual assets" ] }; } else { return { success: false, error: result.error, message: `❌ Script generation failed: ${result.error}` }; } } catch (error) { return { success: false, error: error.message, message: `❌ Script generation failed: ${error.message}` }; } } }, breakdown_script_to_scenes: { handler: async (args) => { try { const result = await orchestrator.breakdownScript( args.script_path, args.character_name ); if (result.success) { return { success: true, shots: result.shots, breakdown_path: result.breakdown_path, total_shots: result.total_shots, message: `✅ Script broken down into ${result.total_shots} individual shots! Each shot has detailed visual prompts, camera angles, and emotions.`, next_steps: [ "Use generate_scene_images to create image stills for each shot", "Images will be ready for lip sync and video assembly" ] }; } else { return { success: false, error: result.error, message: `❌ Script breakdown failed: ${result.error}` }; } } catch (error) { return { success: false, error: error.message, message: `❌ Script breakdown failed: ${error.message}` }; } } }, generate_scene_images: { handler: async (args) => { try { const result = await orchestrator.generateSceneImages( args.breakdown_path, args.character_name ); if (result.success) { return { success: true, images: result.images, images_path: result.images_path, total_images: result.images.length, message: `✅ Generated ${result.images.length} scene images! Each image captures specific emotions, camera angles, and character actions. Ready for lip sync video creation.`, next_steps: [ "Connect ElevenLabs for voice generation", "Use lip sync tools to create video clips from images", "Assemble clips into final continuous video" ] }; } else { return { success: false, error: result.error, message: `❌ Scene image generation failed: ${result.error}` }; } } catch (error) { return { success: false, error: error.message, message: `❌ Scene image generation failed: ${error.message}` }; } } }, create_complete_production: { handler: async (args) => { try { const result = await orchestrator.createCompleteProduction( args.image_path, args.scenario, args.character_name ); if (result.success) { const results = result.production_results; return { success: true, character_name: result.character_name, character_processing: results.character_processing, script_generation: results.script_generation, scene_breakdown: results.scene_breakdown, image_generation: results.image_generation, voice_generation: results.voice_generation, ready_for_video_assembly: results.ready_for_video_assembly, message: `🎉 COMPLETE PRODUCTION FINISHED! Character '${result.character_name}' is ready for final video assembly with ${results.image_generation.images.length} scene images generated.`, next_steps: result.next_steps }; } else { return { success: false, error: result.error, partial_results: result.partial_results, message: `❌ Production pipeline failed: ${result.error}. Some steps may have completed successfully.` }; } } catch (error) { return { success: false, error: error.message, message: `❌ Complete production failed: ${error.message}` }; } } }, generate_character_voice_profile: { handler: async (args) => { try { const result = await orchestrator.generateCharacterVoice( args.character_name, args.voice_description, args.sample_text ); if (result.success) { return { success: true, voice_id: result.voice_id, voice_description: result.voice_description, sample_text: result.sample_text, message: `✅ Voice profile created for '${args.character_name}'! Use this profile with ElevenLabs to generate character voice.`, integration_note: result.integration_needed, next_steps: [ "Set up ElevenLabs API key for voice generation", "Generate voice samples using the voice profile", "Use voice in lip sync video creation" ] }; } else { return { success: false, error: result.error, message: `❌ Voice profile generation failed: ${result.error}` }; } } catch (error) { return { success: false, error: error.message, message: `❌ Voice profile generation failed: ${error.message}` }; } } } }; }

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/bermingham85/mcp-puppet-pipeline'

If you have feedback or need assistance with the MCP directory API, please join our Discord server