"""ComfyUI MCP Server -- generate images via a local ComfyUI instance."""
import json
import logging
from mcp.server.fastmcp import FastMCP
from .comfyui_client import ComfyUIClient, extract_image_filenames
from .config import settings
from .workflows import build_txt2img_workflow
logger = logging.getLogger(__name__)
mcp = FastMCP(
"comfyui",
instructions="MCP server for local image generation via ComfyUI",
)
def _get_client() -> ComfyUIClient:
"""Create a ComfyUIClient from current settings."""
return ComfyUIClient(base_url=settings.url)
@mcp.tool()
async def generate_image(
prompt: str,
negative_prompt: str = "",
model: str | None = None,
width: int | None = None,
height: int | None = None,
steps: int | None = None,
cfg_scale: float | None = None,
seed: int = -1,
) -> str:
"""Generate an image from a text prompt using ComfyUI.
Args:
prompt: The text prompt describing the image to generate.
negative_prompt: Things to avoid in the generated image.
model: Checkpoint model name. Uses default if not specified.
width: Image width in pixels (default 1024).
height: Image height in pixels (default 1024).
steps: Number of sampling steps (default 20).
cfg_scale: Classifier-free guidance scale (default 7.0).
seed: Random seed (-1 for random).
Returns:
JSON string with prompt_id, status, image filenames, and elapsed time.
"""
workflow = build_txt2img_workflow(
prompt=prompt,
negative_prompt=negative_prompt,
model=model,
width=width,
height=height,
steps=steps,
cfg_scale=cfg_scale,
seed=seed,
)
async with _get_client() as client:
prompt_id = await client.submit_workflow(workflow)
logger.info("Submitted workflow, prompt_id=%s", prompt_id)
result = await client.poll_until_complete(prompt_id)
return json.dumps(
{
"prompt_id": result.prompt_id,
"status": result.status,
"images": result.images,
"elapsed_seconds": result.elapsed_seconds,
},
indent=2,
)
@mcp.tool()
async def list_models() -> str:
"""List available checkpoint models in ComfyUI.
Returns:
JSON string with an array of model objects (name, filename, type).
"""
async with _get_client() as client:
models = await client.list_checkpoints()
return json.dumps(
[{"name": m.name, "filename": m.filename, "type": m.type} for m in models],
indent=2,
)
@mcp.tool()
async def get_queue_status() -> str:
"""Get the current ComfyUI queue status.
Returns:
JSON string with pending, running, and completed counts.
"""
async with _get_client() as client:
status = await client.get_queue()
return json.dumps(
{
"pending": status.pending,
"running": status.running,
},
indent=2,
)
@mcp.tool()
async def get_generation(prompt_id: str) -> str:
"""Get details about a specific generation by its prompt ID.
Args:
prompt_id: The prompt ID returned from generate_image.
Returns:
JSON string with generation history details and image paths,
or a message if the generation is not found.
"""
async with _get_client() as client:
history = await client.get_history(prompt_id)
if history is None:
return json.dumps(
{"prompt_id": prompt_id, "status": "not_found"},
indent=2,
)
outputs = history.get("outputs", {})
images = extract_image_filenames(outputs)
return json.dumps(
{
"prompt_id": prompt_id,
"status": "completed" if outputs else "pending",
"images": images,
},
indent=2,
)
def main() -> None:
"""Entry point for the comfyui-mcp console script."""
mcp.run(transport="stdio")
if __name__ == "__main__":
main()