"""FastMCP server definition for Imagen MCP."""
import json
from fastmcp import FastMCP
from Imagen_MCP.constants import (
DEFAULT_MODEL,
DEFAULT_SIZE,
DEFAULT_QUALITY,
DEFAULT_STYLE,
DEFAULT_BATCH_COUNT,
DEFAULT_GENERATION_TIMEOUT,
)
from Imagen_MCP.resources.models import (
get_model_catalog,
get_model_info,
list_model_ids,
)
from Imagen_MCP.services.model_registry import get_model_registry
from Imagen_MCP.tools.batch_generate import (
get_batch_status as _get_batch_status,
get_next_image as _get_next_image,
start_image_batch as _start_image_batch,
)
from Imagen_MCP.tools.generate_image import generate_image as _generate_image
# Create the MCP server instance
mcp = FastMCP("Imagen MCP Server")
# ============================================================================
# TOOLS
# ============================================================================
@mcp.tool()
async def generate_image(
prompt: str,
model: str = DEFAULT_MODEL,
size: str = DEFAULT_SIZE,
quality: str = DEFAULT_QUALITY,
style: str = DEFAULT_STYLE,
) -> dict:
"""Generate a single image from a text prompt.
Args:
prompt: Text description of the image to generate.
model: Model to use (imagen-4, imagen-4-fast, imagen-4-ultra, flux-1.1-pro, gpt-image-1).
size: Image size (256x256, 512x512, 1024x1024, 1792x1024, 1024x1792).
quality: Image quality (standard, hd).
style: Image style (vivid, natural).
Returns:
Dictionary with success status, file path, and metadata.
"""
result = await _generate_image(
prompt=prompt,
model=model,
size=size,
quality=quality,
style=style,
)
return result.model_dump()
@mcp.tool()
async def start_image_batch(
prompt: str,
count: int = DEFAULT_BATCH_COUNT,
model: str = DEFAULT_MODEL,
size: str = DEFAULT_SIZE,
quality: str = DEFAULT_QUALITY,
style: str = DEFAULT_STYLE,
) -> dict:
"""Start generating multiple images and return the first one.
This tool starts generating multiple images in the background.
It blocks until the first image is ready, then returns it along
with a session ID for retrieving the remaining images.
Args:
prompt: Text description of the image to generate.
count: Number of images to generate (2-10).
model: Model to use for generation.
size: Image size.
quality: Image quality (standard, hd).
style: Image style (vivid, natural).
Returns:
Dictionary with session_id, first_image_path, and pending_count.
"""
result = await _start_image_batch(
prompt=prompt,
count=count,
model=model,
size=size,
quality=quality,
style=style,
)
return result.model_dump()
@mcp.tool()
async def get_next_image(
session_id: str,
timeout: float = DEFAULT_GENERATION_TIMEOUT,
) -> dict:
"""Get the next available image from a batch generation session.
This tool retrieves the next image from an ongoing batch generation.
If an image is already available, it returns immediately.
Otherwise, it blocks until an image becomes available or timeout.
Args:
session_id: Session ID from start_image_batch.
timeout: Maximum time to wait for an image (seconds, 1-300).
Returns:
Dictionary with file_path, has_more flag, and pending_count.
"""
result = await _get_next_image(
session_id=session_id,
timeout=timeout,
)
return result.model_dump()
@mcp.tool()
async def get_batch_status(session_id: str) -> dict:
"""Get the current status of a batch generation session.
Args:
session_id: Session ID from start_image_batch.
Returns:
Dictionary with session status, completed/pending counts, and errors.
"""
result = await _get_batch_status(session_id=session_id)
return result.model_dump()
@mcp.tool()
def list_models() -> dict:
"""List all available image generation models with their descriptions.
Returns a comprehensive list of all available models including:
- Model ID and display name
- Provider (Google, OpenAI, etc.)
- Description and intended use cases
- Strengths and weaknesses
- Supported image sizes
- Rate limits and capabilities
Use this tool to discover which models are available and choose
the best one for your image generation needs.
Returns:
Dictionary with models list and default model information.
"""
registry = get_model_registry()
models = []
for model in registry.get_all_models():
models.append(
{
"id": model.id,
"name": model.name,
"provider": model.provider,
"description": model.description,
"use_cases": model.use_cases,
"strengths": model.strengths,
"weaknesses": model.weaknesses,
"supported_sizes": model.capabilities.supported_sizes,
"max_images_per_request": model.capabilities.max_images_per_request,
"supports_hd_quality": model.capabilities.supports_hd_quality,
"rate_limit": f"{model.rate_limit.messages_per_period} messages per {model.rate_limit.period_hours} hours",
}
)
return {
"models": models,
"total_count": len(models),
"default_model": DEFAULT_MODEL,
"usage_hint": "Use the 'model' parameter in generate_image or start_image_batch to specify which model to use.",
}
@mcp.tool()
def get_model_details(model_id: str) -> dict:
"""Get detailed information about a specific image generation model.
Args:
model_id: The model identifier (e.g., 'imagen-4', 'imagen-4-fast', 'dall-e-3').
Returns:
Dictionary with complete model details including capabilities,
rate limits, use cases, strengths, and weaknesses.
Returns an error if the model is not found.
"""
registry = get_model_registry()
model = registry.get_model(model_id)
if model is None:
return {
"error": f"Model not found: {model_id}",
"available_models": [m.id for m in registry.get_all_models()],
"hint": "Use list_models() to see all available models.",
}
return {
"id": model.id,
"name": model.name,
"api_id": model.api_id,
"provider": model.provider,
"description": model.description,
"use_cases": model.use_cases,
"strengths": model.strengths,
"weaknesses": model.weaknesses,
"capabilities": {
"supported_sizes": model.capabilities.supported_sizes,
"max_images_per_request": model.capabilities.max_images_per_request,
"supports_hd_quality": model.capabilities.supports_hd_quality,
"supports_style_parameter": model.capabilities.supports_style_parameter,
"max_prompt_length": model.capabilities.max_prompt_length,
},
"rate_limit": {
"messages_per_period": model.rate_limit.messages_per_period,
"period_hours": model.rate_limit.period_hours,
"category": model.rate_limit.category,
},
}
# ============================================================================
# RESOURCES
# ============================================================================
@mcp.resource("models://image-generation")
def models_catalog() -> str:
"""Get the complete catalog of available image generation models.
Returns a comprehensive list of all available models with their
capabilities, rate limits, use cases, strengths, and weaknesses.
"""
catalog = get_model_catalog()
return json.dumps(catalog, indent=2)
@mcp.resource("models://image-generation/{model_id}")
def model_details(model_id: str) -> str:
"""Get detailed information about a specific model.
Args:
model_id: The model identifier (e.g., 'imagen-4', 'flux-1.1-pro').
Returns:
JSON string with model details, or error if not found.
"""
info = get_model_info(model_id)
if info is None:
return json.dumps(
{
"error": f"Model not found: {model_id}",
"available_models": list_model_ids(),
}
)
return json.dumps(info, indent=2)
# ============================================================================
# MAIN
# ============================================================================
if __name__ == "__main__":
mcp.run()