compose_images
Overlay images like watermarks or logos onto base images with precise positioning controls, scaling, and opacity adjustments for custom compositions.
Instructions
Overlay one image on top of another (e.g., add watermark, logo). Uses PIL for precise positioning - no AI needed.
Input Schema
TableJSON Schema
| Name | Required | Description | Default |
|---|---|---|---|
| base_image_url | Yes | URL of the background/base image | |
| overlay_image_url | Yes | URL of the image to overlay (e.g., logo, watermark). Use upload_file for local images. | |
| position | No | Where to place the overlay. Use 'custom' with x,y for exact positioning. | bottom-right |
| x | No | Custom X position in pixels (required if position='custom') | |
| y | No | Custom Y position in pixels (required if position='custom') | |
| scale | No | Scale overlay relative to base image width (0.01-1.0). Default 0.15 = 15% of base width. | |
| padding | No | Padding from edges in pixels (for preset positions) | |
| opacity | No | Overlay opacity (0.0=transparent, 1.0=opaque) | |
| output_format | No | Output image format | png |
Implementation Reference
- Main handler function that downloads two images, resizes and positions the overlay using PIL, applies opacity if needed, composites them, uploads the result to Fal storage, and returns the URL.async def handle_compose_images( arguments: Dict[str, Any], registry: ModelRegistry, queue_strategy: QueueStrategy, ) -> List[TextContent]: """ Handle the compose_images tool for overlaying images. Uses PIL for compositing and uploads result to Fal storage. """ base_url = arguments["base_image_url"] overlay_url = arguments["overlay_image_url"] position = arguments.get("position", "bottom-right") scale = arguments.get("scale", 0.15) padding = arguments.get("padding", 20) opacity = arguments.get("opacity", 1.0) output_format = arguments.get("output_format", "png") # Validate custom position BEFORE any processing if position == "custom": if arguments.get("x") is None or arguments.get("y") is None: return [ TextContent( type="text", text="❌ Custom position requires both 'x' and 'y' parameters.", ) ] logger.info( "Composing images: overlay at %s with scale=%.2f, opacity=%.2f", position, scale, opacity, ) tmp_path: str | None = None try: # Download both images with timeout async with httpx.AsyncClient(timeout=30.0) as client: base_response = await client.get(base_url) base_response.raise_for_status() overlay_response = await client.get(overlay_url) overlay_response.raise_for_status() # Open images with PIL base_img = Image.open(BytesIO(base_response.content)).convert("RGBA") overlay_img = Image.open(BytesIO(overlay_response.content)).convert("RGBA") # Scale overlay relative to base width overlay_width = int(base_img.width * scale) overlay_ratio = overlay_width / overlay_img.width overlay_height = int(overlay_img.height * overlay_ratio) overlay_img = overlay_img.resize( (overlay_width, overlay_height), Image.Resampling.LANCZOS ) # Calculate position x, y = _calculate_overlay_position( base_img.size, (overlay_width, overlay_height), position, padding, arguments.get("x"), arguments.get("y"), ) # Apply opacity if not fully opaque if opacity < 1.0: overlay_img = _apply_opacity(overlay_img, opacity) # Composite the images # Create a copy to avoid modifying the original result_img = base_img.copy() result_img.paste(overlay_img, (x, y), overlay_img) # Convert to RGB if saving as JPEG if output_format.lower() == "jpeg": result_img = result_img.convert("RGB") # Save to temporary file with tempfile.NamedTemporaryFile( suffix=f".{output_format}", delete=False ) as tmp: result_img.save(tmp.name, format=output_format.upper()) tmp_path = tmp.name # Upload to Fal storage logger.info("Uploading composed image to Fal storage") result_url = await fal_client.upload_file_async(Path(tmp_path)) response = "🖼️ Images composed successfully!\n\n" response += f"**Position**: {position}" if position == "custom": response += f" ({x}, {y})" response += "\n" response += f"**Overlay scale**: {scale:.0%} of base width\n" if opacity < 1.0: response += f"**Opacity**: {opacity:.0%}\n" response += f"\n**Result**: {result_url}" return [TextContent(type="text", text=response)] except httpx.HTTPError as e: logger.exception("Failed to download images: %s", e) return [ TextContent( type="text", text=f"❌ Failed to download images: {e}", ) ] except Exception as e: logger.exception("Image composition failed: %s", e) return [ TextContent( type="text", text=f"❌ Image composition failed: {e}", ) ] finally: # Always clean up temp file if tmp_path and os.path.exists(tmp_path): try: os.unlink(tmp_path) except OSError as cleanup_error: logger.warning( "Failed to clean up temp file %s: %s", tmp_path, cleanup_error )
- Helper functions used by the handler: _calculate_overlay_position computes overlay placement based on preset or custom coordinates; _apply_opacity adjusts the alpha channel for transparency.def _calculate_overlay_position( base_size: Tuple[int, int], overlay_size: Tuple[int, int], position: str, padding: int, custom_x: int | None, custom_y: int | None, ) -> Tuple[int, int]: """Calculate the x, y position for the overlay based on position preset.""" base_w, base_h = base_size overlay_w, overlay_h = overlay_size positions = { "top-left": (padding, padding), "top-right": (base_w - overlay_w - padding, padding), "bottom-left": (padding, base_h - overlay_h - padding), "bottom-right": (base_w - overlay_w - padding, base_h - overlay_h - padding), "center": ((base_w - overlay_w) // 2, (base_h - overlay_h) // 2), "custom": (custom_x or 0, custom_y or 0), } return positions.get(position, positions["bottom-right"]) def _apply_opacity(image: Image.Image, opacity: float) -> Image.Image: """Apply opacity to an RGBA image.""" # Split into channels r, g, b, a = image.split() # Apply opacity to alpha channel a = a.point(lambda x: int(x * opacity)) # Merge back return Image.merge("RGBA", (r, g, b, a))
- Tool schema defining the input parameters, descriptions, defaults, and validation for the compose_images tool.Tool( name="compose_images", description="Overlay one image on top of another (e.g., add watermark, logo). Uses PIL for precise positioning - no AI needed.", inputSchema={ "type": "object", "properties": { "base_image_url": { "type": "string", "description": "URL of the background/base image", }, "overlay_image_url": { "type": "string", "description": "URL of the image to overlay (e.g., logo, watermark). Use upload_file for local images.", }, "position": { "type": "string", "enum": [ "top-left", "top-right", "bottom-left", "bottom-right", "center", "custom", ], "default": "bottom-right", "description": "Where to place the overlay. Use 'custom' with x,y for exact positioning.", }, "x": { "type": "integer", "description": "Custom X position in pixels (required if position='custom')", }, "y": { "type": "integer", "description": "Custom Y position in pixels (required if position='custom')", }, "scale": { "type": "number", "default": 0.15, "minimum": 0.01, "maximum": 1.0, "description": "Scale overlay relative to base image width (0.01-1.0). Default 0.15 = 15% of base width.", }, "padding": { "type": "integer", "default": 20, "minimum": 0, "description": "Padding from edges in pixels (for preset positions)", }, "opacity": { "type": "number", "default": 1.0, "minimum": 0.0, "maximum": 1.0, "description": "Overlay opacity (0.0=transparent, 1.0=opaque)", }, "output_format": { "type": "string", "enum": ["png", "jpeg", "webp"], "default": "png", "description": "Output image format", }, }, "required": ["base_image_url", "overlay_image_url"], }, ),
- src/fal_mcp_server/server.py:61-85 (registration)TOOL_HANDLERS dictionary that maps the 'compose_images' tool name to its handler function for execution routing.TOOL_HANDLERS = { # Utility tools (no queue needed) "list_models": handle_list_models, "recommend_model": handle_recommend_model, "get_pricing": handle_get_pricing, "get_usage": handle_get_usage, "upload_file": handle_upload_file, # Image generation tools "generate_image": handle_generate_image, "generate_image_structured": handle_generate_image_structured, "generate_image_from_image": handle_generate_image_from_image, # Image editing tools "remove_background": handle_remove_background, "upscale_image": handle_upscale_image, "edit_image": handle_edit_image, "inpaint_image": handle_inpaint_image, "resize_image": handle_resize_image, "compose_images": handle_compose_images, # Video tools "generate_video": handle_generate_video, "generate_video_from_image": handle_generate_video_from_image, "generate_video_from_video": handle_generate_video_from_video, # Audio tools "generate_music": handle_generate_music, }
- src/fal_mcp_server/handlers/__init__.py:10-11 (registration)Import of the handle_compose_images function into handlers package for re-export.from fal_mcp_server.handlers.image_editing_handlers import ( handle_compose_images,