Skip to main content
Glama
handlers.ts20.6 kB
import { MCPRequest, MCPResponse, LocalLLMInputSchema, CodeExecuteInputSchema, InitializeConversationSchema, SendMessageSchema, PlanningSessionSchema, TaskAssignmentSchema, GetConversationStatusSchema, TaskUpdateSchema } from './validation.js'; import { executeOllamaLLM } from './tools/ollama.js'; import { executeLMStudioLLM } from './tools/lmstudio.js'; import { executeCode } from './tools/execute.js'; import { getCharacterToolDefinitions, getCharacterToolHandlers } from './tools/character-tools.js'; import { runSingleAgentPipeline, runHybridPuppetPipeline, SceneGenerationRequest, VoiceVideoRequest } from './single-agent-pipeline.js'; import { gremlossAutomation } from './gremlos-automation.js'; export async function handleMCPRequest(request: MCPRequest): Promise<MCPResponse | null> { try { // Handle notifications (no response needed) if (request.method.startsWith('notifications/')) { console.log(`Received notification: ${request.method}`); return null; } switch (request.method) { case 'initialize': return { jsonrpc: '2.0', id: request.id ?? 0, result: { protocolVersion: '2025-06-18', capabilities: { tools: {} }, serverInfo: { name: 'puppet-production-server', version: '1.0.0' } } }; case 'tools/list': return { jsonrpc: '2.0', id: request.id ?? 0, result: { tools: [ { name: 'puppet_production_pipeline', description: 'Single-agent puppet production pipeline: reference image analysis → character identifiers → cartesian shot generation (angles × emotions × mouth states) → OpenAI image generation → automated QC validation → delivery', inputSchema: { type: 'object', properties: { reference_image_path: { type: 'string', description: 'Path to reference image file' }, character_name: { type: 'string', description: 'Name for the puppet character' }, proof_of_concept: { type: 'boolean', description: 'Run small batch (6 images: front/3q-left × neutral/happy/angry × closed mouth)' }, angles: { type: 'array', items: { type: 'string', enum: ['front', 'left', 'right', 'back', '3q-left', '3q-right'] }, description: 'Camera angles to generate (default: all angles)' }, emotions: { type: 'array', items: { type: 'string', enum: ['neutral', 'happy', 'sad', 'angry', 'surprised', 'disgust', 'fear', 'smirk'] }, description: 'Emotions to generate (default: all emotions)' }, mouth_states: { type: 'array', items: { type: 'string', enum: ['closed', 'open-small', 'open-wide', 'tongue-out', 'teeth-showing'] }, description: 'Mouth states to generate (default: all states)' } }, required: ['reference_image_path', 'character_name'] } }, { name: 'puppet_pipeline_status', description: 'Get pipeline constants and configuration details', inputSchema: { type: 'object', properties: {} } }, { name: 'hybrid_puppet_pipeline', description: 'Hybrid puppet production: OpenAI creates core puppet → Affogato creates character for scene consistency → ElevenLabs adds voice for videos', inputSchema: { type: 'object', properties: { reference_image_path: { type: 'string', description: 'Path to reference image file' }, character_name: { type: 'string', description: 'Name for the puppet character' }, proof_of_concept: { type: 'boolean', description: 'Run small batch for testing' }, create_affogato_character: { type: 'boolean', description: 'Create Affogato character from best puppet for scene consistency' }, scene_generations: { type: 'array', items: { type: 'object', properties: { scene_prompt: { type: 'string', description: 'Scene description for character generation' }, output_path: { type: 'string', description: 'Where to save the scene image' }, style: { type: 'string', description: 'Optional style for the scene' }, quality: { type: 'string', enum: ['Plus', 'Regular'], description: 'Image quality' } }, required: ['scene_prompt', 'output_path'] }, description: 'Scene generation requests using character consistency' }, voice_videos: { type: 'array', items: { type: 'object', properties: { image_path: { type: 'string', description: 'Path to image for video creation' }, script: { type: 'string', description: 'Voice script text' }, voice_id: { type: 'string', description: 'ElevenLabs voice ID' }, output_path: { type: 'string', description: 'Where to save the video' }, duration: { type: 'number', description: 'Video duration in seconds' } }, required: ['image_path', 'script', 'voice_id', 'output_path'] }, description: 'Voice video creation requests' } }, required: ['reference_image_path', 'character_name'] } }, { name: 'create_scene_image', description: 'Generate scene image with existing Affogato character for consistency', inputSchema: { type: 'object', properties: { character_id: { type: 'string', description: 'Affogato character ID' }, scene_prompt: { type: 'string', description: 'Scene description' }, output_path: { type: 'string', description: 'Where to save the image' }, style: { type: 'string', description: 'Optional style' }, quality: { type: 'string', enum: ['Plus', 'Regular'], description: 'Image quality' } }, required: ['character_id', 'scene_prompt', 'output_path'] } }, { name: 'create_voice_video', description: 'Create narrated video from image using ElevenLabs voice', inputSchema: { type: 'object', properties: { image_path: { type: 'string', description: 'Path to source image' }, script: { type: 'string', description: 'Voice script text' }, voice_id: { type: 'string', description: 'ElevenLabs voice ID' }, output_path: { type: 'string', description: 'Where to save the video' }, duration: { type: 'number', description: 'Video duration in seconds' } }, required: ['image_path', 'script', 'voice_id', 'output_path'] } }, { name: 'gremlos_weekly_pipeline', description: 'GREMLOS WORLD WEEKLY AUTOMATION: Trending analysis → viral script generation → character-consistent content → optimized video production for TikTok/Instagram/YouTube Shorts', inputSchema: { type: 'object', properties: { target_week: { type: 'string', description: 'Target week for content (optional, defaults to current week)' }, character_archetypes: { type: 'array', items: { type: 'string', enum: ['The Critic', 'The Gossip', 'The Mentor', 'The Chaos Agent', 'The Interviewer'] }, description: 'Gremlos character archetypes to use for content', default: ['The Critic', 'The Gossip', 'The Mentor', 'The Chaos Agent', 'The Interviewer'] }, content_volume: { type: 'number', description: 'Number of viral videos to create for the week', default: 7, minimum: 1, maximum: 21 }, platforms: { type: 'array', items: { type: 'string', enum: ['tiktok', 'instagram', 'youtube_shorts'] }, description: 'Target platforms for video optimization', default: ['tiktok', 'instagram', 'youtube_shorts'] } }, required: [] } } ], _pipeline_info: { description: "Single-agent pipeline with fixed constants for consistent puppet production", constants: { angles: ['front', 'left', 'right', 'back', '3q-left', '3q-right'], emotions: ['neutral', 'happy', 'sad', 'angry', 'surprised', 'disgust', 'fear', 'smirk'], mouth_states: ['closed', 'open-small', 'open-wide', 'tongue-out', 'teeth-showing'], lighting: 'soft even studio', background: 'plain light gray', output_size: '1024x1024' }, qc_thresholds: { palette_lock: 0.95, proportions_lock: 0.97, pass_rate_target: 0.80 } } } }; case 'tools/call': if (!request.params || !request.params.name) { throw new Error('Tool name is required'); } const toolName = request.params.name; const args = request.params.arguments || {}; // Handle single-agent pipeline tools if (toolName === 'puppet_production_pipeline') { try { const result = await runSingleAgentPipeline(args); return { jsonrpc: '2.0', id: request.id ?? 0, result: { success: true, message: `Pipeline completed with ${(result.pass_rate * 100).toFixed(1)}% pass rate`, batch_id: result.batch_id, pass_rate: result.pass_rate, total_images: result.items.length, passed: result.items.filter(i => i.status === 'pass').length, failed: result.items.filter(i => i.status === 'fail').length, retry_needed: result.items.filter(i => i.status === 'auto-retry').length, qc_report: result } }; } catch (error: any) { return { jsonrpc: '2.0', id: request.id ?? 0, error: { code: -32603, message: `Pipeline failed: ${error.message}`, data: error.stack } }; } } if (toolName === 'puppet_pipeline_status') { return { jsonrpc: '2.0', id: request.id ?? 0, result: { pipeline_ready: true, openai_available: !!process.env.OPENAI_API_KEY, affogato_available: !!process.env.AFFOGATO_API_KEY, elevenlabs_available: !!process.env.ELEVENLABS_API_KEY, constants: { angles: ['front', 'left', 'right', 'back', '3q-left', '3q-right'], emotions: ['neutral', 'happy', 'sad', 'angry', 'surprised', 'disgust', 'fear', 'smirk'], mouth_states: ['closed', 'open-small', 'open-wide', 'tongue-out', 'teeth-showing'] }, proof_of_concept: { description: "Small batch: 2 angles × 3 emotions × 1 mouth state = 6 images", angles: ['front', '3q-left'], emotions: ['neutral', 'happy', 'angry'], mouth_states: ['closed'] }, hybrid_workflow: { description: "OPTIMIZED: Affogato FaceLock for character consistency → OpenAI analysis → ElevenLabs voice videos" }, gremlos_automation: { description: "Weekly viral content automation for Gremlos World: Trending topics → Script generation → Character consistency → Video production", features: ["Real-time trend analysis", "Character-consistent content", "Platform optimization", "Automated scheduling"] } } }; } // Handle hybrid puppet pipeline if (toolName === 'hybrid_puppet_pipeline') { try { const result = await runHybridPuppetPipeline(args); return { jsonrpc: '2.0', id: request.id ?? 0, result: { success: true, message: `Hybrid pipeline completed with ${(result.puppet_qc_report.pass_rate * 100).toFixed(1)}% puppet pass rate`, puppet_report: result.puppet_qc_report, affogato_character: result.affogato_character, scene_results: result.scene_results, video_results: result.video_results, phases_completed: { puppet_creation: true, character_creation: !!result.affogato_character, scene_generation: (result.scene_results?.length || 0) > 0, voice_videos: (result.video_results?.length || 0) > 0 } } }; } catch (error: any) { return { jsonrpc: '2.0', id: request.id ?? 0, error: { code: -32603, message: `Hybrid pipeline failed: ${error.message}`, data: error.stack } }; } } // Handle scene image generation if (toolName === 'create_scene_image') { try { const pipeline = new (await import('./single-agent-pipeline.js')).HybridPuppetPipeline(process.env.OPENAI_API_KEY!); const character = { character_id: args.character_id, asset_id: '', best_puppet_image: '', created_at: '' }; const sceneRequest = { character_id: args.character_id, ...args }; await pipeline.generateSceneImage(character, sceneRequest); return { jsonrpc: '2.0', id: request.id ?? 0, result: { success: true, message: `Scene image generated: ${args.scene_prompt}`, output_path: args.output_path } }; } catch (error: any) { return { jsonrpc: '2.0', id: request.id ?? 0, error: { code: -32603, message: `Scene generation failed: ${error.message}`, data: error.stack } }; } } // Handle voice video creation if (toolName === 'create_voice_video') { try { const pipeline = new (await import('./single-agent-pipeline.js')).HybridPuppetPipeline(process.env.OPENAI_API_KEY!); await pipeline.createVoiceVideo(args); return { jsonrpc: '2.0', id: request.id ?? 0, result: { success: true, message: `Voice video created: ${args.script.substring(0, 50)}...`, output_path: args.output_path } }; } catch (error: any) { return { jsonrpc: '2.0', id: request.id ?? 0, error: { code: -32603, message: `Voice video failed: ${error.message}`, data: error.stack } }; } } // Legacy tool handlers const characterHandlers = getCharacterToolHandlers(); switch (toolName) { case 'local_llm.generate': const llmInput = LocalLLMInputSchema.parse(args); const llmResult = llmInput.provider === 'ollama' ? await executeOllamaLLM(llmInput) : await executeLMStudioLLM(llmInput); return { jsonrpc: '2.0', id: request.id ?? 0, result: llmResult }; case 'code_node.exec_local': const execInput = CodeExecuteInputSchema.parse(args); const execResult = await executeCode(execInput); return { jsonrpc: '2.0', id: request.id ?? 0, result: execResult }; default: // Check if it's a character production tool if (characterHandlers[toolName]) { const characterResult = await characterHandlers[toolName](args); return { jsonrpc: '2.0', id: request.id ?? 0, result: characterResult }; } } // Handle Gremlos World weekly automation if (toolName === 'gremlos_weekly_pipeline') { try { const result = await gremlossAutomation.executeWeeklyPipeline(args); return { jsonrpc: '2.0', id: request.id ?? 0, result: { success: true, message: result.schedulingReady ? `Gremlos World weekly pipeline completed! Generated ${result.viralVideoProduction.length} viral videos optimized for ${result.viralVideoProduction[0]?.platform_variants?.length || 0} platforms.` : `Gremlos World pipeline in progress...`, trends_analyzed: result.trendsAnalysis.viral_opportunities.length, scripts_generated: result.scriptGeneration.length, videos_created: result.viralVideoProduction.length, characters_used: result.characterConsistentContent.length, platforms_optimized: args.platforms || ['tiktok', 'instagram', 'youtube_shorts'], ready_for_scheduling: result.schedulingReady, weekly_summary: { trending_topics: result.trendsAnalysis.viral_opportunities.slice(0, 3), character_archetypes: result.scriptGeneration.map(s => s.character_archetype), predicted_engagement: result.trendsAnalysis.predicted_engagement }, pipeline_data: result } }; } catch (error: any) { return { jsonrpc: '2.0', id: request.id ?? 0, error: { code: -32603, message: `Gremlos World weekly automation failed: ${error.message}`, data: error.stack } }; } } throw new Error(`Unknown tool: ${toolName}`); default: throw new Error(`Unknown method: ${request.method}`); } } catch (error: any) { console.error('Handler error:', error); return { jsonrpc: '2.0', id: request.id ?? 0, error: { code: -32603, message: error.message, data: error.stack } }; } }

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/bermingham85/mcp-puppet-pipeline'

If you have feedback or need assistance with the MCP directory API, please join our Discord server