"""Game Assets Generator MCP Server."""
import os
import sys
from pathlib import Path
# Load .env file FIRST before any other imports that might read env vars
try:
from dotenv import load_dotenv
# Find .env file relative to this file's directory
_env_path = Path(__file__).parent.parent / ".env"
if _env_path.exists():
load_dotenv(_env_path)
print(f"[MCP] Loaded .env from {_env_path}", file=sys.stderr)
else:
print(f"[MCP] No .env file found at {_env_path}", file=sys.stderr)
except ImportError:
print("[MCP] python-dotenv not installed, using system env vars only", file=sys.stderr)
import json
import base64
import asyncio
from typing import Optional, List
from datetime import datetime
from PIL import Image
from mcp.server.fastmcp import FastMCP
from .backends import MockBackend, ComfyUIBackend, BaseBackend
from .presets import get_preset, list_presets, PRESETS
from .utils import (
ensure_directory,
generate_filename,
image_to_base64,
resize_image,
create_spritesheet,
split_spritesheet,
remove_background,
add_outline,
validate_path,
hash_image,
create_depth_map,
create_canny_edge
)
# Initialize MCP server
mcp = FastMCP(
"ComfyAI-MCP-GameAssets",
instructions="Generate 2D game assets using AI, integrated with ComfyUI"
)
# Configuration from environment (now .env is already loaded)
BACKEND_TYPE = os.getenv("BACKEND_TYPE", "mock")
COMFYUI_HOST = os.getenv("COMFYUI_HOST", "127.0.0.1")
COMFYUI_PORT = int(os.getenv("COMFYUI_PORT", "8188"))
OUTPUT_DIR = Path(os.getenv("OUTPUT_DIR", "./output"))
UNITY_ASSETS_DIR = os.getenv("UNITY_ASSETS_DIR", "")
ALLOW_WRITE_ROOTS = os.getenv("ALLOW_WRITE_ROOTS", "").split(";")
print(f"[MCP] BACKEND_TYPE = {BACKEND_TYPE}", file=sys.stderr)
print(f"[MCP] COMFYUI_HOST = {COMFYUI_HOST}:{COMFYUI_PORT}", file=sys.stderr)
# Initialize backend
def get_backend() -> BaseBackend:
if BACKEND_TYPE == "comfyui":
print("[MCP] Using ComfyUI backend", file=sys.stderr)
return ComfyUIBackend(host=COMFYUI_HOST, port=COMFYUI_PORT)
print("[MCP] Using Mock backend", file=sys.stderr)
return MockBackend()
backend = get_backend()
@mcp.tool()
async def ping() -> str:
"""Check if the MCP server and backend are healthy.
Returns:
Status message indicating server and backend health
"""
backend_healthy = await backend.health_check()
return json.dumps({
"status": "ok",
"backend": backend.get_name(),
"backend_type": BACKEND_TYPE,
"backend_healthy": backend_healthy,
"timestamp": datetime.now().isoformat()
}, indent=2)
@mcp.tool()
async def list_available_presets() -> str:
"""List all available style presets for asset generation.
Returns:
JSON object containing all presets with their configurations
"""
return json.dumps(list_presets(), indent=2)
@mcp.tool()
async def generate_sprite(
prompt: str,
preset: str = "default",
width: Optional[int] = None,
height: Optional[int] = None,
negative_prompt: Optional[str] = None,
seed: Optional[int] = None,
steps: Optional[int] = None,
save_to_file: bool = False,
filename: Optional[str] = None
) -> str:
"""Generate a single game sprite from a text description.
Args:
prompt: Description of the sprite to generate (e.g., "a blue slime monster")
preset: Style preset to use (default, pixel_16, pixel_32, flat_ui, handpainted, etc.)
width: Override width in pixels
height: Override height in pixels
negative_prompt: Additional things to avoid (appended to preset's negative prompt)
seed: Random seed for reproducibility
steps: Number of generation steps (more = higher quality but slower)
save_to_file: Whether to save the image to disk
filename: Custom filename (auto-generated if not provided)
Returns:
JSON with base64 image data and metadata
"""
preset_config = get_preset(preset)
# Build full prompt
full_prompt = f"{preset_config.prompt_prefix}{prompt}{preset_config.prompt_suffix}"
full_negative = preset_config.negative_prompt
if negative_prompt:
full_negative = f"{full_negative}, {negative_prompt}"
# Use preset defaults or overrides
img_width = width or preset_config.default_width
img_height = height or preset_config.default_height
gen_steps = steps or preset_config.steps
render_width = img_width
render_height = img_height
should_downscale = (img_width < preset_config.default_width) or (img_height < preset_config.default_height)
if should_downscale:
render_width = preset_config.default_width
render_height = preset_config.default_height
# Generate image
image_bytes = await backend.generate_image(
prompt=full_prompt,
negative_prompt=full_negative,
width=render_width,
height=render_height,
seed=seed,
steps=gen_steps,
cfg_scale=preset_config.cfg_scale,
sampler=preset_config.sampler,
scheduler=preset_config.scheduler
)
if should_downscale:
resample = Image.Resampling.NEAREST if preset.startswith("pixel") else Image.Resampling.LANCZOS
image_bytes = resize_image(image_bytes, img_width, img_height, resample=resample)
result = {
"success": True,
"backend": backend.get_name(),
"backend_type": BACKEND_TYPE,
"image_base64": image_to_base64(image_bytes),
"width": img_width,
"height": img_height,
"preset": preset,
"prompt": full_prompt,
"hash": hash_image(image_bytes)
}
# Save to file if requested
if save_to_file:
output_dir = ensure_directory(OUTPUT_DIR / "sprites")
fname = filename or generate_filename(prefix="sprite", suffix=preset)
file_path = output_dir / fname
file_path.write_bytes(image_bytes)
result["file_path"] = str(file_path)
return json.dumps(result, indent=2)
@mcp.tool()
async def generate_icons(
prompts: List[str],
preset: str = "icon",
size: int = 64,
seed: Optional[int] = None,
create_atlas: bool = False,
save_to_file: bool = False
) -> str:
"""Generate multiple game icons from a list of descriptions.
Args:
prompts: List of icon descriptions (e.g., ["sword", "shield", "potion"])
preset: Style preset to use (default: icon). Options: icon, icon_item, flat_ui
size: Icon size in pixels (square)
seed: Base seed for reproducibility (each icon gets seed+index)
create_atlas: Whether to combine icons into a single spritesheet
save_to_file: Whether to save images to disk
Returns:
JSON with base64 images for each icon and optional atlas
"""
if not prompts:
return json.dumps({"success": False, "error": "No prompts provided"}, indent=2)
preset_config = get_preset(preset)
icons = []
for i, prompt in enumerate(prompts):
full_prompt = f"{preset_config.prompt_prefix}{prompt}{preset_config.prompt_suffix}"
gen_seed = (seed + i) if seed is not None else None
image_bytes = await backend.generate_image(
prompt=full_prompt,
negative_prompt=preset_config.negative_prompt,
width=preset_config.default_width,
height=preset_config.default_height,
seed=gen_seed,
steps=preset_config.steps,
cfg_scale=preset_config.cfg_scale,
sampler=preset_config.sampler,
scheduler=preset_config.scheduler
)
resample = Image.Resampling.NEAREST if preset.startswith("pixel") else Image.Resampling.LANCZOS
image_bytes = resize_image(image_bytes, size, size, resample=resample)
icon_data = {
"index": i,
"prompt": prompt,
"image_base64": image_to_base64(image_bytes),
"size": size
}
if save_to_file:
output_dir = ensure_directory(OUTPUT_DIR / "icons")
fname = generate_filename(prefix=f"icon_{i}", suffix=prompt[:20].replace(" ", "_"))
file_path = output_dir / fname
file_path.write_bytes(image_bytes)
icon_data["file_path"] = str(file_path)
icons.append(icon_data)
result = {
"success": True,
"count": len(icons),
"icons": icons
}
# Create atlas if requested
if create_atlas and icons:
all_images = [base64.b64decode(icon["image_base64"]) for icon in icons]
atlas_bytes = create_spritesheet(all_images, columns=min(4, len(icons)))
result["atlas_base64"] = image_to_base64(atlas_bytes)
if save_to_file:
output_dir = ensure_directory(OUTPUT_DIR / "atlases")
atlas_path = output_dir / generate_filename(prefix="icon_atlas")
atlas_path.write_bytes(atlas_bytes)
result["atlas_file_path"] = str(atlas_path)
return json.dumps(result, indent=2)
@mcp.tool()
async def generate_tileset(
theme: str,
tile_types: List[str],
preset: str = "tileset",
tile_size: int = 32,
seed: Optional[int] = None,
save_to_file: bool = False
) -> str:
"""Generate a set of tileable game tiles.
Args:
theme: Overall theme for the tileset (e.g., "forest", "dungeon", "sci-fi")
tile_types: List of tile types to generate (e.g., ["ground", "wall", "water"])
preset: Style preset to use (default: tileset). Options: tileset, topdown_tile
tile_size: Size of each tile in pixels (square)
seed: Base seed for reproducibility (each tile gets seed+index)
save_to_file: Whether to save images to disk
Returns:
JSON with base64 images for each tile type
"""
if not tile_types:
return json.dumps({"success": False, "error": "No tile_types provided"}, indent=2)
preset_config = get_preset(preset)
tiles = []
for i, tile_type in enumerate(tile_types):
prompt = f"{theme} {tile_type} tile"
full_prompt = f"{preset_config.prompt_prefix}{prompt}{preset_config.prompt_suffix}"
gen_seed = (seed + i) if seed is not None else None
render_size = tile_size
should_downscale = tile_size < min(preset_config.default_width, preset_config.default_height)
if should_downscale:
render_size = min(preset_config.default_width, preset_config.default_height)
image_bytes = await backend.generate_image(
prompt=full_prompt,
negative_prompt=preset_config.negative_prompt,
width=render_size,
height=render_size,
seed=gen_seed,
steps=preset_config.steps,
cfg_scale=preset_config.cfg_scale,
sampler=preset_config.sampler,
scheduler=preset_config.scheduler
)
if should_downscale:
resample = Image.Resampling.NEAREST if preset.startswith("pixel") else Image.Resampling.LANCZOS
image_bytes = resize_image(image_bytes, tile_size, tile_size, resample=resample)
tile_data = {
"index": i,
"type": tile_type,
"theme": theme,
"image_base64": image_to_base64(image_bytes),
"size": tile_size
}
if save_to_file:
output_dir = ensure_directory(OUTPUT_DIR / "tiles" / theme)
fname = generate_filename(prefix=f"tile_{tile_type}")
file_path = output_dir / fname
file_path.write_bytes(image_bytes)
tile_data["file_path"] = str(file_path)
tiles.append(tile_data)
return json.dumps({
"success": True,
"theme": theme,
"tile_size": tile_size,
"count": len(tiles),
"tiles": tiles
}, indent=2)
@mcp.tool()
async def generate_character(
description: str,
poses: List[str] = None,
preset: str = "character",
width: int = 256,
height: int = 512,
seed: Optional[int] = None,
steps: Optional[int] = None,
save_to_file: bool = False
) -> str:
"""Generate character sprites with optional multiple poses.
Args:
description: Character description (e.g., "a knight in silver armor")
poses: List of poses to generate (e.g., ["idle", "walking", "attacking"])
preset: Style preset to use (default: character). Options: character, character_portrait, pixel_character
width: Sprite width in pixels
height: Sprite height in pixels
seed: Base seed for reproducibility (each pose gets seed+index)
steps: Override generation steps (higher = better quality but slower)
save_to_file: Whether to save images to disk
Returns:
JSON with base64 images for each pose
"""
if poses is None:
poses = ["idle"]
preset_config = get_preset(preset)
gen_steps = steps or preset_config.steps
render_width = width
render_height = height
should_downscale = (width < preset_config.default_width) or (height < preset_config.default_height)
if should_downscale:
render_width = preset_config.default_width
render_height = preset_config.default_height
characters = []
for i, pose in enumerate(poses):
prompt = f"{description}, {pose} pose"
full_prompt = f"{preset_config.prompt_prefix}{prompt}{preset_config.prompt_suffix}"
gen_seed = (seed + i) if seed is not None else None
image_bytes = await backend.generate_image(
prompt=full_prompt,
negative_prompt=preset_config.negative_prompt,
width=render_width,
height=render_height,
seed=gen_seed,
steps=gen_steps,
cfg_scale=preset_config.cfg_scale,
sampler=preset_config.sampler,
scheduler=preset_config.scheduler
)
if should_downscale:
resample = Image.Resampling.NEAREST if preset.startswith("pixel") else Image.Resampling.LANCZOS
image_bytes = resize_image(image_bytes, width, height, resample=resample)
char_data = {
"index": i,
"pose": pose,
"image_base64": image_to_base64(image_bytes),
"width": width,
"height": height
}
if save_to_file:
output_dir = ensure_directory(OUTPUT_DIR / "characters")
fname = generate_filename(prefix=f"char_{pose}")
file_path = output_dir / fname
file_path.write_bytes(image_bytes)
char_data["file_path"] = str(file_path)
characters.append(char_data)
return json.dumps({
"success": True,
"description": description,
"count": len(characters),
"characters": characters
}, indent=2)
@mcp.tool()
async def generate_character_animations(
reference_image_base64: str,
description: str,
poses: List[str],
denoise: float = 0.35,
seed: Optional[int] = None,
preset: str = "character",
pose_denoise_boost: float = 0.25,
save_to_file: bool = False
) -> str:
"""Generate multiple character poses with consistent identity using img2img.
This tool takes a reference character image and generates variations with different
poses while maintaining the same character identity (outfit, colors, style).
Args:
reference_image_base64: Base64 encoded reference character image (front/idle view)
description: Character description (e.g., "a knight in silver armor")
poses: List of poses to generate (e.g., ["walking", "attacking", "jumping"])
denoise: How much to change from reference (0.2=very similar, 0.5=more different). Default 0.35
seed: Random seed for reproducibility (same seed = same variations)
preset: Style preset to use (default: character)
pose_denoise_boost: Additional denoise for action poses (default: 0.25)
save_to_file: Whether to save images to disk
Returns:
JSON with base64 images for each pose, all maintaining character identity
Example workflow:
1. First generate a base character with generate_character(description, poses=["idle"])
2. Take the best result's image_base64 as reference_image_base64
3. Call this tool with poses=["walking", "running", "attacking"]
"""
preset_config = get_preset(preset)
reference_bytes = base64.b64decode(reference_image_base64)
# Use fixed seed for all poses if provided (better consistency)
if seed is None:
import random
seed = random.randint(0, 2**32 - 1)
animations = []
for i, pose in enumerate(poses):
import hashlib
pose_key = pose.strip().lower()
# Stronger pose templates to prevent img2img from copying the reference pose.
# (img2img without ControlNet can only change pose to a limited extent, so we need to be explicit.)
pose_hint_map = {
"idle": "idle stance, neutral pose",
"walk": "walking pose, one leg forward, arms swinging",
"walking": "walking pose, one leg forward, arms swinging",
"run": "running pose, dynamic motion, leaning forward",
"running": "running pose, dynamic motion, leaning forward",
"attack": "attacking pose, dynamic action, weapon swing",
"attacking": "attacking pose, dynamic action, weapon swing",
"jump": "jumping pose, mid-air, dynamic",
"jumping": "jumping pose, mid-air, dynamic",
}
pose_hint = next((v for k, v in pose_hint_map.items() if k in pose_key), "dynamic pose")
pose_hash = int.from_bytes(hashlib.sha256(pose_key.encode("utf-8")).digest()[:4], "little")
pose_seed = (seed + (pose_hash % 1000003) + (i * 10000019)) % (2**32 - 1)
pose_is_action = any(k in pose_key for k in ["walk", "run", "attack", "hit", "slash", "jump", "kick", "punch", "cast", "shoot", "dash"])
pose_denoise = denoise
if pose_is_action:
pose_denoise = min(0.75, denoise + pose_denoise_boost)
pose_denoise = max(0.2, min(0.75, pose_denoise))
prompt = f"{description}, {pose_hint}, {pose} pose, full body, same character identity, same outfit, same colors, consistent character sheet"
full_prompt = f"{preset_config.prompt_prefix}{prompt}{preset_config.prompt_suffix}"
full_negative = f"{preset_config.negative_prompt}, same pose as reference, idle pose, standing straight, front view, identical composition"
try:
image_bytes = await backend.generate_img2img(
reference_image=reference_bytes,
prompt=full_prompt,
negative_prompt=full_negative,
denoise=pose_denoise,
seed=pose_seed,
steps=preset_config.steps,
cfg_scale=preset_config.cfg_scale,
sampler=preset_config.sampler,
scheduler=preset_config.scheduler
)
except NotImplementedError as e:
return json.dumps({
"success": False,
"error": str(e),
"backend": backend.get_name(),
"backend_type": BACKEND_TYPE
}, indent=2)
anim_data = {
"index": i,
"pose": pose,
"image_base64": image_to_base64(image_bytes),
"denoise": pose_denoise,
"seed": pose_seed
}
if save_to_file:
output_dir = ensure_directory(OUTPUT_DIR / "characters" / "animations")
fname = generate_filename(prefix=f"anim_{pose}")
file_path = output_dir / fname
file_path.write_bytes(image_bytes)
anim_data["file_path"] = str(file_path)
animations.append(anim_data)
return json.dumps({
"success": True,
"description": description,
"denoise": denoise,
"base_seed": seed,
"count": len(animations),
"animations": animations
}, indent=2)
@mcp.tool()
async def generate_with_viewpoint(
prompt: str,
view_type: str = "topdown",
shape: str = "flat",
preset: str = "topdown_prop",
controlnet_model: str = "diffusers_xl_depth_full.safetensors",
control_strength: float = 0.95,
width: int = 1024,
height: int = 1024,
seed: Optional[int] = None,
save_to_file: bool = False
) -> str:
"""Generate a game asset with precise camera viewpoint control using ControlNet.
This tool uses depth maps to guide the generation, ensuring consistent camera angles
like top-down, side view, front view, etc.
Args:
prompt: Description of the asset to generate (e.g., "a wooden barrel")
view_type: Camera angle - "topdown", "side", "front", "3/4"
shape: Object shape hint - "flat", "sphere", "cylinder", "box"
preset: Style preset to use (default: topdown_prop)
controlnet_model: ControlNet model (default: diffusers_xl_depth_full.safetensors)
control_strength: How strongly to follow viewpoint (0.0-1.0, default: 0.8)
width: Output width in pixels
height: Output height in pixels
seed: Random seed for reproducibility
save_to_file: Whether to save the image to disk
Returns:
JSON with base64 image data and metadata
Note:
Requires ControlNet models installed in ComfyUI. Common depth models:
- diffusers_xl_depth_full.safetensors (SDXL)
- control_v11f1p_sd15_depth.pth (SD1.5)
"""
preset_config = get_preset(preset)
# Build full prompt with preset
full_prompt = f"{preset_config.prompt_prefix}{prompt}{preset_config.prompt_suffix}"
full_negative = preset_config.negative_prompt
img_width = width
img_height = height
render_width = img_width
render_height = img_height
should_downscale = (img_width < preset_config.default_width) or (img_height < preset_config.default_height)
if should_downscale:
scale = max(preset_config.default_width / max(1, img_width), preset_config.default_height / max(1, img_height))
render_width = int(round(img_width * scale))
render_height = int(round(img_height * scale))
# Clamp render dimensions to match backend constraints (SDXL-safe)
render_width = max(512, min(2048, (render_width // 8) * 8))
render_height = max(512, min(2048, (render_height // 8) * 8))
# Create depth map for the specified viewpoint
depth_map = create_depth_map(render_width, render_height, view_type=view_type, shape=shape)
try:
image_bytes = await backend.generate_with_controlnet(
prompt=full_prompt,
control_image=depth_map,
controlnet_model=controlnet_model,
control_strength=control_strength,
negative_prompt=full_negative,
width=render_width,
height=render_height,
seed=seed,
steps=preset_config.steps,
cfg_scale=preset_config.cfg_scale,
sampler=preset_config.sampler,
scheduler=preset_config.scheduler
)
except NotImplementedError as e:
return json.dumps({
"success": False,
"error": str(e),
"hint": "ControlNet requires ComfyUI backend with ControlNet models installed",
"backend": backend.get_name(),
"backend_type": BACKEND_TYPE
}, indent=2)
except Exception as e:
return json.dumps({
"success": False,
"error": str(e),
"hint": "Check if ControlNet model exists in ComfyUI/models/controlnet/",
"backend": backend.get_name()
}, indent=2)
if should_downscale:
resample = Image.Resampling.NEAREST if preset.startswith("pixel") else Image.Resampling.LANCZOS
image_bytes = resize_image(image_bytes, img_width, img_height, resample=resample)
result = {
"success": True,
"backend": backend.get_name(),
"image_base64": image_to_base64(image_bytes),
"width": img_width,
"height": img_height,
"view_type": view_type,
"shape": shape,
"control_strength": control_strength,
"preset": preset,
"prompt": full_prompt,
"hash": hash_image(image_bytes)
}
if save_to_file:
output_dir = ensure_directory(OUTPUT_DIR / "controlnet")
fname = generate_filename(prefix=f"cn_{view_type}", suffix=shape)
file_path = output_dir / fname
file_path.write_bytes(image_bytes)
result["file_path"] = str(file_path)
# Also save the depth map for reference
depth_path = output_dir / f"depth_{fname}"
depth_path.write_bytes(depth_map)
result["depth_map_path"] = str(depth_path)
return json.dumps(result, indent=2)
@mcp.tool()
async def generate_topdown_asset(
prompt: str,
asset_type: str = "prop",
size: int = 512,
control_strength: float = 0.95,
seed: Optional[int] = None,
save_to_file: bool = False
) -> str:
"""Simplified tool to generate top-down 2D game assets with guaranteed viewpoint.
This is a convenience wrapper around generate_with_viewpoint specifically for
top-down games (RPG, strategy, etc.).
Args:
prompt: Description of the asset (e.g., "wooden treasure chest", "stone well")
asset_type: Type of asset - "prop", "character", "creature", "tile", "effect"
size: Output size in pixels (square)
control_strength: How strictly to enforce top-down view (0.5-1.0)
seed: Random seed for reproducibility
save_to_file: Whether to save the image to disk
Returns:
JSON with base64 image data
"""
# Map asset type to preset and shape
preset_map = {
"prop": ("topdown_prop", "box"),
"character": ("topdown_character", "humanoid"),
"creature": ("topdown_creature", "humanoid"),
"tile": ("topdown_tile", "flat"),
"effect": ("effect", "sphere"),
}
preset, shape = preset_map.get(asset_type, ("topdown_prop", "flat"))
effective_strength = control_strength
effective_prompt = prompt
if asset_type == "character":
effective_strength = max(control_strength, 0.90)
effective_prompt = (
f"{prompt}, single character, one body, one head, full body, "
f"no visible face, no eyes, no mouth, helmet top view, "
f"no duplicated weapons, no duplicated armor, no floating parts, no separate objects"
)
elif asset_type == "creature":
effective_strength = max(control_strength, 0.90)
effective_prompt = (
f"{prompt}, single creature, one body, full body, "
f"no duplicated limbs, no floating parts, no separate objects"
)
elif asset_type == "prop":
effective_prompt = f"{prompt}, single object"
elif asset_type == "effect":
effective_prompt = f"{prompt}, centered effect, radial glow"
# Use the viewpoint tool
return await generate_with_viewpoint(
prompt=effective_prompt,
view_type="topdown",
shape=shape,
preset=preset,
control_strength=effective_strength,
width=size,
height=size,
seed=seed,
save_to_file=save_to_file
)
@mcp.tool()
async def process_image(
image_base64: str,
operations: List[str],
resize_width: Optional[int] = None,
resize_height: Optional[int] = None,
outline_color: str = "#000000",
outline_thickness: int = 1,
save_to_file: bool = False
) -> str:
"""Apply post-processing operations to an image.
Args:
image_base64: Base64 encoded input image
operations: List of operations to apply: "resize", "remove_background", "add_outline"
resize_width: Target width for resize operation
resize_height: Target height for resize operation
outline_color: Hex color for outline (e.g., "#000000")
outline_thickness: Thickness of outline in pixels
save_to_file: Whether to save processed image to disk
Returns:
JSON with processed image as base64
"""
import base64
image_bytes = base64.b64decode(image_base64)
for op in operations:
if op == "resize" and resize_width and resize_height:
image_bytes = resize_image(image_bytes, resize_width, resize_height)
elif op == "remove_background":
image_bytes = remove_background(image_bytes)
elif op == "add_outline":
# Parse hex color
color = outline_color.lstrip("#")
r, g, b = tuple(int(color[i:i+2], 16) for i in (0, 2, 4))
image_bytes = add_outline(image_bytes, color=(r, g, b, 255), thickness=outline_thickness)
result = {
"success": True,
"image_base64": image_to_base64(image_bytes),
"operations_applied": operations
}
if save_to_file:
output_dir = ensure_directory(OUTPUT_DIR / "processed")
fname = generate_filename(prefix="processed")
file_path = output_dir / fname
file_path.write_bytes(image_bytes)
result["file_path"] = str(file_path)
return json.dumps(result, indent=2)
@mcp.tool()
async def create_sprite_atlas(
images_base64: List[str],
columns: int = 4,
padding: int = 0,
save_to_file: bool = False,
filename: Optional[str] = None
) -> str:
"""Combine multiple images into a sprite atlas/spritesheet.
Args:
images_base64: List of base64 encoded images
columns: Number of columns in the atlas
padding: Padding between sprites in pixels
save_to_file: Whether to save atlas to disk
filename: Custom filename for the atlas
Returns:
JSON with the combined atlas as base64
"""
import base64
images = [base64.b64decode(img) for img in images_base64]
atlas_bytes = create_spritesheet(images, columns=columns, padding=padding)
result = {
"success": True,
"image_base64": image_to_base64(atlas_bytes),
"sprite_count": len(images),
"columns": columns
}
if save_to_file:
output_dir = ensure_directory(OUTPUT_DIR / "atlases")
fname = filename or generate_filename(prefix="atlas")
file_path = output_dir / fname
file_path.write_bytes(atlas_bytes)
result["file_path"] = str(file_path)
return json.dumps(result, indent=2)
@mcp.tool()
async def export_to_unity(
image_base64: str,
asset_name: str,
asset_type: str = "Sprites",
subfolder: str = ""
) -> str:
"""Export a generated asset directly to a Unity project.
Args:
image_base64: Base64 encoded image to export
asset_name: Name for the asset file (without extension)
asset_type: Unity folder type (Sprites, Textures, UI, etc.)
subfolder: Optional subfolder within the asset type folder
Returns:
JSON with export status and file path
"""
if not UNITY_ASSETS_DIR:
return json.dumps({
"success": False,
"error": "UNITY_ASSETS_DIR environment variable not set"
}, indent=2)
unity_path = Path(UNITY_ASSETS_DIR)
if not unity_path.exists():
return json.dumps({
"success": False,
"error": f"Unity assets directory not found: {UNITY_ASSETS_DIR}"
}, indent=2)
# Build target path
target_dir = unity_path / asset_type
if subfolder:
target_dir = target_dir / subfolder
# Validate path is within allowed roots
if ALLOW_WRITE_ROOTS and ALLOW_WRITE_ROOTS[0]:
if not validate_path(str(target_dir), ALLOW_WRITE_ROOTS):
return json.dumps({
"success": False,
"error": "Target path not in allowed write roots"
}, indent=2)
ensure_directory(target_dir)
import base64
image_bytes = base64.b64decode(image_base64)
file_path = target_dir / f"{asset_name}.png"
file_path.write_bytes(image_bytes)
return json.dumps({
"success": True,
"file_path": str(file_path),
"asset_name": asset_name,
"asset_type": asset_type
}, indent=2)
@mcp.tool()
async def batch_generate(
prompts: List[str],
preset: str = "default",
width: Optional[int] = None,
height: Optional[int] = None,
seed: Optional[int] = None,
save_to_file: bool = False
) -> str:
"""Generate multiple assets in batch from a list of prompts.
Args:
prompts: List of asset descriptions
preset: Style preset to use for all generations
width: Override width for all assets
height: Override height for all assets
seed: Base seed for reproducibility (each prompt gets seed+index)
save_to_file: Whether to save all images to disk
Returns:
JSON with all generated assets
"""
if not prompts:
return json.dumps({"success": False, "error": "No prompts provided"}, indent=2)
preset_config = get_preset(preset)
results = []
for i, prompt in enumerate(prompts):
full_prompt = f"{preset_config.prompt_prefix}{prompt}{preset_config.prompt_suffix}"
img_width = width or preset_config.default_width
img_height = height or preset_config.default_height
gen_seed = (seed + i) if seed is not None else None
image_bytes = await backend.generate_image(
prompt=full_prompt,
negative_prompt=preset_config.negative_prompt,
width=img_width,
height=img_height,
seed=gen_seed,
steps=preset_config.steps,
cfg_scale=preset_config.cfg_scale,
sampler=preset_config.sampler,
scheduler=preset_config.scheduler
)
asset_data = {
"index": i,
"prompt": prompt,
"image_base64": image_to_base64(image_bytes),
"width": img_width,
"height": img_height
}
if save_to_file:
output_dir = ensure_directory(OUTPUT_DIR / "batch")
fname = generate_filename(prefix=f"batch_{i}")
file_path = output_dir / fname
file_path.write_bytes(image_bytes)
asset_data["file_path"] = str(file_path)
results.append(asset_data)
return json.dumps({
"success": True,
"preset": preset,
"count": len(results),
"assets": results
}, indent=2)
# Run the server
def main():
transport = os.getenv("MCP_TRANSPORT", "stdio")
mount_path = os.getenv("MCP_MOUNT_PATH") or None
mcp.run(transport=transport, mount_path=mount_path)
if __name__ == "__main__":
main()