generate_video_from_video
Transform existing videos using AI for restyling or motion transfer. Apply new visual styles or animate characters based on text prompts.
Instructions
Transform an existing video using AI. Supports restyling (Lucy models) and motion transfer (Kling motion control). Use upload_file first if you have a local video.
Input Schema
TableJSON Schema
| Name | Required | Description | Default |
|---|---|---|---|
| video_url | Yes | URL of the source video to transform (use upload_file for local videos) | |
| prompt | Yes | Text description of the transformation (e.g., 'transform into anime style', 'a woman dancing gracefully') | |
| model | No | Video-to-video model. Options: decart/lucy-edit/dev (restyle), decart/lucy-edit/pro, decart/lucy-restyle, fal-ai/kling-video/v2.6/standard/motion-control (motion transfer), fal-ai/kling-video/v2.6/pro/motion-control | decart/lucy-edit/dev |
| duration | No | Duration of generated video in seconds | |
| aspect_ratio | No | Aspect ratio of the generated video | 16:9 |
| cfg_scale | No | Classifier Free Guidance scale - how closely to follow the prompt (0.0-1.0) | |
| image_url | No | [Kling motion control] Reference image URL. The character in this image will be animated using motion from video_url. | |
| character_orientation | No | [Kling motion control] 'video': orientation matches reference video (max 30s). 'image': orientation matches reference image (max 10s). | video |
| keep_original_sound | No | [Kling motion control] Whether to keep original sound from reference video. | |
| tail_image_url | No | [Kling Pro] URL of image for the end of the video (for transitions). | |
| generate_audio | No | [Kling v2.6 Pro] Generate native audio for video (supports Chinese/English). | |
| negative_prompt | No | What to avoid in the output (default: 'blur, distort, and low quality') | |
| strength | No | [Lucy models] How much to transform (0=keep original, 1=full transformation) | |
| num_frames | No | [Lucy models] Number of frames to process |
Implementation Reference
- Core handler function that processes tool arguments, resolves the AI model, prepares parameters for Fal.ai API, executes the video-to-video generation with timeout handling via queue_strategy, and returns the resulting video URL or detailed error message.async def handle_generate_video_from_video( arguments: Dict[str, Any], registry: ModelRegistry, queue_strategy: QueueStrategy, ) -> List[TextContent]: """Handle the generate_video_from_video tool for video-to-video transformations.""" model_input = arguments.get("model", "decart/lucy-edit/dev") try: model_id = await registry.resolve_model_id(model_input) except ValueError as e: return [ TextContent( type="text", text=f"❌ {e}. Use list_models to see available options.", ) ] # Build arguments based on model type fal_args: Dict[str, Any] = { "video_url": arguments["video_url"], "prompt": arguments["prompt"], } # Add optional parameters common to most models if "negative_prompt" in arguments: fal_args["negative_prompt"] = arguments["negative_prompt"] if "strength" in arguments: fal_args["strength"] = arguments["strength"] if "num_frames" in arguments: fal_args["num_frames"] = arguments["num_frames"] # Common video generation parameters if "duration" in arguments: fal_args["duration"] = arguments["duration"] if "aspect_ratio" in arguments: fal_args["aspect_ratio"] = arguments["aspect_ratio"] if "cfg_scale" in arguments: fal_args["cfg_scale"] = arguments["cfg_scale"] # Kling motion control specific parameters if "image_url" in arguments: fal_args["image_url"] = arguments["image_url"] if "character_orientation" in arguments: fal_args["character_orientation"] = arguments["character_orientation"] if "keep_original_sound" in arguments: fal_args["keep_original_sound"] = arguments["keep_original_sound"] # Kling Pro parameters (for transition videos and audio) if "tail_image_url" in arguments: fal_args["tail_image_url"] = arguments["tail_image_url"] if "generate_audio" in arguments: fal_args["generate_audio"] = arguments["generate_audio"] # Use queue strategy with extended timeout for video processing logger.info( "Starting video-to-video transformation with %s from %s", model_id, ( arguments["video_url"][:50] + "..." if len(arguments["video_url"]) > 50 else arguments["video_url"] ), ) try: # Video-to-video can take longer, use 300s timeout video_result = await asyncio.wait_for( queue_strategy.execute(model_id, fal_args, timeout=300), timeout=305, # Slightly longer than internal timeout ) except asyncio.TimeoutError: logger.error( "Video-to-video transformation timed out after 300s. Model: %s, Video: %s", model_id, ( arguments["video_url"][:50] + "..." if len(arguments["video_url"]) > 50 else arguments["video_url"] ), ) return [ TextContent( type="text", text=f"❌ Video transformation timed out after 300 seconds with {model_id}. Video processing can take several minutes for longer videos.", ) ] except Exception as e: logger.exception( "Video-to-video transformation failed. Model: %s, Video: %s", model_id, ( arguments["video_url"][:50] + "..." if len(arguments["video_url"]) > 50 else arguments["video_url"] ), ) return [ TextContent( type="text", text=f"❌ Video transformation failed: {e}", ) ] if video_result is None: logger.error( "Video-to-video transformation returned None. Model: %s, Video: %s", model_id, ( arguments["video_url"][:50] + "..." if len(arguments["video_url"]) > 50 else arguments["video_url"] ), ) return [ TextContent( type="text", text=f"❌ Video transformation failed or timed out with {model_id}", ) ] # Check for error in response if "error" in video_result: error_msg = video_result.get("error", "Unknown error") logger.error( "Video-to-video transformation failed for %s: %s", model_id, error_msg, ) return [ TextContent( type="text", text=f"❌ Video transformation failed: {error_msg}", ) ] # Extract video URL from result (handle different response formats) video_dict = video_result.get("video", {}) if isinstance(video_dict, dict): video_url = video_dict.get("url") else: video_url = video_result.get("url") if video_url: source_preview = ( arguments["video_url"][:50] + "..." if len(arguments["video_url"]) > 50 else arguments["video_url"] ) return [ TextContent( type="text", text=f"🎬 Video transformed with {model_id}:\n\n**Source**: {source_preview}\n**Result**: {video_url}", ) ] logger.warning( "Video transformation completed but no video URL in response. Model: %s, Video: %s, Response keys: %s", model_id, ( arguments["video_url"][:50] + "..." if len(arguments["video_url"]) > 50 else arguments["video_url"] ), list(video_result.keys()) if video_result else "None", ) return [ TextContent( type="text", text="❌ Video transformation completed but no video URL was returned. Please try again.", ) ]
- Defines the tool schema with name, detailed description, and comprehensive inputSchema specifying all parameters (required: video_url, prompt; optionals: model, duration, strength, Kling-specific params like image_url, etc.) for input validation.Tool( name="generate_video_from_video", description="Transform an existing video using AI. Supports restyling (Lucy models) and motion transfer (Kling motion control). Use upload_file first if you have a local video.", inputSchema={ "type": "object", "properties": { "video_url": { "type": "string", "description": "URL of the source video to transform (use upload_file for local videos)", }, "prompt": { "type": "string", "description": "Text description of the transformation (e.g., 'transform into anime style', 'a woman dancing gracefully')", }, "model": { "type": "string", "default": "decart/lucy-edit/dev", "description": "Video-to-video model. Options: decart/lucy-edit/dev (restyle), decart/lucy-edit/pro, decart/lucy-restyle, fal-ai/kling-video/v2.6/standard/motion-control (motion transfer), fal-ai/kling-video/v2.6/pro/motion-control", }, "duration": { "type": "integer", "enum": [5, 10], "default": 5, "description": "Duration of generated video in seconds", }, "aspect_ratio": { "type": "string", "enum": ["16:9", "9:16", "1:1"], "default": "16:9", "description": "Aspect ratio of the generated video", }, "cfg_scale": { "type": "number", "default": 0.5, "description": "Classifier Free Guidance scale - how closely to follow the prompt (0.0-1.0)", }, "image_url": { "type": "string", "description": "[Kling motion control] Reference image URL. The character in this image will be animated using motion from video_url.", }, "character_orientation": { "type": "string", "enum": ["image", "video"], "default": "video", "description": "[Kling motion control] 'video': orientation matches reference video (max 30s). 'image': orientation matches reference image (max 10s).", }, "keep_original_sound": { "type": "boolean", "default": True, "description": "[Kling motion control] Whether to keep original sound from reference video.", }, "tail_image_url": { "type": "string", "description": "[Kling Pro] URL of image for the end of the video (for transitions).", }, "generate_audio": { "type": "boolean", "default": True, "description": "[Kling v2.6 Pro] Generate native audio for video (supports Chinese/English).", }, "negative_prompt": { "type": "string", "description": "What to avoid in the output (default: 'blur, distort, and low quality')", }, "strength": { "type": "number", "minimum": 0, "maximum": 1, "default": 0.75, "description": "[Lucy models] How much to transform (0=keep original, 1=full transformation)", }, "num_frames": { "type": "integer", "description": "[Lucy models] Number of frames to process", }, }, "required": ["video_url", "prompt"], }, ),
- src/fal_mcp_server/server.py:61-85 (registration)Registers the tool by mapping the name 'generate_video_from_video' to its handler in the TOOL_HANDLERS dictionary, which is used in the @server.call_tool() method to route tool calls to the correct implementation.TOOL_HANDLERS = { # Utility tools (no queue needed) "list_models": handle_list_models, "recommend_model": handle_recommend_model, "get_pricing": handle_get_pricing, "get_usage": handle_get_usage, "upload_file": handle_upload_file, # Image generation tools "generate_image": handle_generate_image, "generate_image_structured": handle_generate_image_structured, "generate_image_from_image": handle_generate_image_from_image, # Image editing tools "remove_background": handle_remove_background, "upscale_image": handle_upscale_image, "edit_image": handle_edit_image, "inpaint_image": handle_inpaint_image, "resize_image": handle_resize_image, "compose_images": handle_compose_images, # Video tools "generate_video": handle_generate_video, "generate_video_from_image": handle_generate_video_from_image, "generate_video_from_video": handle_generate_video_from_video, # Audio tools "generate_music": handle_generate_music, }
- src/fal_mcp_server/handlers/video_handlers.py:33-209 (registration)No, wrong. Wait, this is the handler file itself.fal_args: Dict[str, Any] = { "prompt": arguments["prompt"], } # image_url is optional - only needed for image-to-video models if "image_url" in arguments: fal_args["image_url"] = arguments["image_url"] if "duration" in arguments: fal_args["duration"] = arguments["duration"] if "aspect_ratio" in arguments: fal_args["aspect_ratio"] = arguments["aspect_ratio"] if "negative_prompt" in arguments: fal_args["negative_prompt"] = arguments["negative_prompt"] if "cfg_scale" in arguments: fal_args["cfg_scale"] = arguments["cfg_scale"] # Use queue strategy with timeout protection for long-running video generation logger.info("Starting video generation with %s", model_id) try: video_result = await asyncio.wait_for( queue_strategy.execute(model_id, fal_args, timeout=180), timeout=185, # Slightly longer than internal timeout ) except asyncio.TimeoutError: return [ TextContent( type="text", text=f"❌ Video generation timed out after 180 seconds with {model_id}", ) ] if video_result is None: return [ TextContent( type="text", text=f"❌ Video generation failed or timed out with {model_id}", ) ] # Check for error in response if "error" in video_result: error_msg = video_result.get("error", "Unknown error") return [ TextContent( type="text", text=f"❌ Video generation failed: {error_msg}", ) ] # Extract video URL from result video_dict = video_result.get("video", {}) if isinstance(video_dict, dict): video_url = video_dict.get("url") else: video_url = video_result.get("url") if video_url: return [ TextContent( type="text", text=f"🎬 Video generated with {model_id}: {video_url}", ) ] return [ TextContent( type="text", text="❌ Video generation completed but no video URL was returned. Please try again.", ) ] async def handle_generate_video_from_image( arguments: Dict[str, Any], registry: ModelRegistry, queue_strategy: QueueStrategy, ) -> List[TextContent]: """Handle the generate_video_from_image tool.""" model_input = arguments.get("model", "fal-ai/wan-i2v") try: model_id = await registry.resolve_model_id(model_input) except ValueError as e: return [ TextContent( type="text", text=f"❌ {e}. Use list_models to see available options.", ) ] # Both image_url and prompt are required for this tool fal_args: Dict[str, Any] = { "image_url": arguments["image_url"], "prompt": arguments["prompt"], } if "duration" in arguments: fal_args["duration"] = arguments["duration"] if "aspect_ratio" in arguments: fal_args["aspect_ratio"] = arguments["aspect_ratio"] if "negative_prompt" in arguments: fal_args["negative_prompt"] = arguments["negative_prompt"] if "cfg_scale" in arguments: fal_args["cfg_scale"] = arguments["cfg_scale"] # Use queue strategy with timeout protection logger.info( "Starting image-to-video generation with %s from %s", model_id, ( arguments["image_url"][:50] + "..." if len(arguments["image_url"]) > 50 else arguments["image_url"] ), ) try: video_result = await asyncio.wait_for( queue_strategy.execute(model_id, fal_args, timeout=180), timeout=185, # Slightly longer than internal timeout ) except asyncio.TimeoutError: logger.error( "Image-to-video generation timed out after 180s. Model: %s, Image: %s", model_id, ( arguments["image_url"][:50] + "..." if len(arguments["image_url"]) > 50 else arguments["image_url"] ), ) return [ TextContent( type="text", text=f"❌ Video generation timed out after 180 seconds with {model_id}", ) ] if video_result is None: return [ TextContent( type="text", text=f"❌ Video generation failed or timed out with {model_id}", ) ] # Check for error in response if "error" in video_result: error_msg = video_result.get("error", "Unknown error") return [ TextContent( type="text", text=f"❌ Video generation failed: {error_msg}", ) ] # Extract video URL from result video_dict = video_result.get("video", {}) if isinstance(video_dict, dict): video_url = video_dict.get("url") else: video_url = video_result.get("url") if video_url: return [ TextContent( type="text", text=f"🎬 Video generated with {model_id}: {video_url}", ) ] return [ TextContent( type="text", text="❌ Video generation completed but no video URL was returned. Please try again.", ) ] async def handle_generate_video_from_video(