generate_image
Create custom images based on text prompts using AI. Specify dimensions, aspect ratio, output format, and quality. Save images to a defined path for integration with Printify's print-on-demand platform.
Input Schema
TableJSON Schema
| Name | Required | Description | Default |
|---|---|---|---|
| aspectRatio | No | Aspect ratio (e.g., '16:9', '4:3', '1:1'). If provided, overrides width and height | |
| guidanceScale | No | Guidance scale | |
| height | No | Image height in pixels | |
| imagePromptStrength | No | Image prompt strength 0-1 (Flux 1.1 Pro Ultra only) | |
| model | No | Optional: Override the default model. Use get_defaults to see available models | |
| negativePrompt | No | Negative prompt | low quality, bad quality, sketches |
| numInferenceSteps | No | Number of inference steps | |
| outputFormat | No | Output format | png |
| outputPath | Yes | Full path where the generated image should be saved | |
| outputQuality | No | Output quality 1-100 (Flux 1.1 Pro only) | |
| prompt | Yes | Text prompt for image generation | |
| promptUpsampling | No | Enable prompt upsampling (Flux 1.1 Pro only) | |
| raw | No | Generate less processed, more natural-looking images (Flux 1.1 Pro Ultra only) | |
| safetyTolerance | No | Safety tolerance (0-6) | |
| seed | No | Random seed for reproducible generation | |
| width | No | Image width in pixels |
Implementation Reference
- src/index.ts:1247-1431 (registration)Registration of the 'generate_image' MCP tool using server.tool(), including inline schema and handler.server.tool( "generate_image", { prompt: z.string().describe("Text prompt for image generation"), outputPath: z.string().describe("Full path where the generated image should be saved"), // Optional model override model: z.string().optional() .describe("Optional: Override the default model. Use get_defaults to see available models"), // Common parameters for both models width: z.number().optional().default(1024).describe("Image width in pixels"), height: z.number().optional().default(1024).describe("Image height in pixels"), aspectRatio: z.string().optional().describe("Aspect ratio (e.g., '16:9', '4:3', '1:1'). If provided, overrides width and height"), outputFormat: z.enum(["jpeg", "png", "webp"]).optional().default("png").describe("Output format"), safetyTolerance: z.number().optional().default(2).describe("Safety tolerance (0-6)"), seed: z.number().optional().describe("Random seed for reproducible generation"), numInferenceSteps: z.number().optional().default(25).describe("Number of inference steps"), guidanceScale: z.number().optional().default(7.5).describe("Guidance scale"), negativePrompt: z.string().optional().default("low quality, bad quality, sketches").describe("Negative prompt"), // Flux 1.1 Pro specific parameters promptUpsampling: z.boolean().optional() .describe("Enable prompt upsampling (Flux 1.1 Pro only)"), outputQuality: z.number().optional() .describe("Output quality 1-100 (Flux 1.1 Pro only)"), // Flux 1.1 Pro Ultra specific parameters raw: z.boolean().optional() .describe("Generate less processed, more natural-looking images (Flux 1.1 Pro Ultra only)"), imagePromptStrength: z.number().optional() .describe("Image prompt strength 0-1 (Flux 1.1 Pro Ultra only)") }, async ({ prompt, outputPath, model, width, height, aspectRatio, outputFormat, safetyTolerance, seed, numInferenceSteps, guidanceScale, negativePrompt, promptUpsampling, outputQuality, raw, imagePromptStrength }): Promise<{ content: any[], isError?: boolean }> => { // Import the services const { generateImage } = await import('./services/image-generator.js'); const { formatSuccessResponse } = await import('./utils/error-handler.js'); const fs = await import('fs'); const path = await import('path'); // Check if Replicate client is initialized if (!replicateClient) { return { content: [{ type: "text", text: "Replicate API client is not initialized. The REPLICATE_API_TOKEN environment variable may not be set." }], isError: true }; } // Extract filename from the output path const fileName = path.basename(outputPath); // Check if we're using the Ultra model which requires ImgBB // Determine which model to use (user-specified or default) const modelToUse = model || replicateClient.getDefaultModel(); console.log(`Starting generate_image with prompt: ${prompt}`); console.log(`Using model: ${modelToUse}`); console.log(`Output path: ${outputPath}`); // Get default parameters first const defaults = replicateClient.getAllDefaults(); // Generate the image with Replicate and process with Sharp // Start with defaults, then override with parameters from the tool call const generationResult = await generateImage( replicateClient, prompt, fileName, { // Start with defaults model: defaults.model, width: defaults.width, height: defaults.height, aspectRatio: defaults.aspectRatio, outputFormat: defaults.outputFormat, safetyTolerance: defaults.safetyTolerance, numInferenceSteps: defaults.numInferenceSteps, guidanceScale: defaults.guidanceScale, negativePrompt: defaults.negativePrompt, raw: defaults.raw, promptUpsampling: defaults.promptUpsampling, outputQuality: defaults.outputQuality, // Override with parameters from the tool call (if provided) ...(model !== undefined && { model }), ...(width !== undefined && { width }), ...(height !== undefined && { height }), ...(aspectRatio !== undefined && { aspectRatio }), ...(outputFormat !== undefined && { outputFormat }), ...(safetyTolerance !== undefined && { safetyTolerance }), ...(seed !== undefined && { seed }), ...(numInferenceSteps !== undefined && { numInferenceSteps }), ...(guidanceScale !== undefined && { guidanceScale }), ...(negativePrompt !== undefined && { negativePrompt }), ...(promptUpsampling !== undefined && { promptUpsampling }), ...(outputQuality !== undefined && { outputQuality }), ...(raw !== undefined && { raw }), ...(imagePromptStrength !== undefined && { imagePromptStrength }) } ); // If image generation failed, return the error if (!generationResult.success) { return generationResult.errorResponse as { content: any[], isError: boolean }; } const imageBuffer = generationResult.buffer; const finalFileName = generationResult.fileName; const usingModel = generationResult.model; const dimensions = generationResult.dimensions; // Make sure we have valid image data if (!imageBuffer) { return { content: [{ type: "text", text: "Failed to get valid image data from the image generator." }], isError: true }; } try { // Create the directory if it doesn't exist const outputDir = path.dirname(outputPath); if (!fs.existsSync(outputDir)) { fs.mkdirSync(outputDir, { recursive: true }); } // Save the buffer directly to the specified output path if (imageBuffer) { fs.writeFileSync(outputPath, imageBuffer); } else { throw new Error('No image data available to save'); } // Return success response const response = formatSuccessResponse( 'Image Generated Successfully', { Prompt: prompt, Model: usingModel.split('/')[1], 'Output Path': outputPath, 'File Name': finalFileName, 'File Size': `${imageBuffer ? imageBuffer.length : 0} bytes`, 'Dimensions': dimensions || `${width}x${height}`, 'Format': outputFormat || 'png', 'Generation Parameters': { // Use the actual dimensions from the generated image ...(generationResult.dimensions ? { 'Dimensions': generationResult.dimensions } : {}), // Show the aspect ratio that was actually used (from tool call or defaults) 'Aspect Ratio': aspectRatio || defaults.aspectRatio || '1:1', 'Inference Steps': numInferenceSteps || defaults.numInferenceSteps, 'Guidance Scale': guidanceScale || defaults.guidanceScale, 'Negative Prompt': negativePrompt || defaults.negativePrompt, ...(raw !== undefined ? { 'Raw Mode': raw } : {}), ...(promptUpsampling !== undefined ? { 'Prompt Upsampling': promptUpsampling } : {}), ...(outputQuality !== undefined ? { 'Output Quality': outputQuality } : {}), ...(imagePromptStrength !== undefined ? { 'Image Prompt Strength': imagePromptStrength } : {}), ...(seed !== undefined ? { 'Seed': seed } : {}) } }, `Image has been successfully generated and saved to: ${outputPath}` ) as { content: any[], isError?: boolean }; return response; } catch (error: any) { return { content: [{ type: "text", text: `Error saving image to ${outputPath}: ${error.message || String(error)}` }], isError: true }; } } );
- src/index.ts:1280-1430 (handler)The main handler function for the 'generate_image' tool. It merges defaults with parameters, calls the image-generator service, saves the buffer to the output file path, and formats a success response.async ({ prompt, outputPath, model, width, height, aspectRatio, outputFormat, safetyTolerance, seed, numInferenceSteps, guidanceScale, negativePrompt, promptUpsampling, outputQuality, raw, imagePromptStrength }): Promise<{ content: any[], isError?: boolean }> => { // Import the services const { generateImage } = await import('./services/image-generator.js'); const { formatSuccessResponse } = await import('./utils/error-handler.js'); const fs = await import('fs'); const path = await import('path'); // Check if Replicate client is initialized if (!replicateClient) { return { content: [{ type: "text", text: "Replicate API client is not initialized. The REPLICATE_API_TOKEN environment variable may not be set." }], isError: true }; } // Extract filename from the output path const fileName = path.basename(outputPath); // Check if we're using the Ultra model which requires ImgBB // Determine which model to use (user-specified or default) const modelToUse = model || replicateClient.getDefaultModel(); console.log(`Starting generate_image with prompt: ${prompt}`); console.log(`Using model: ${modelToUse}`); console.log(`Output path: ${outputPath}`); // Get default parameters first const defaults = replicateClient.getAllDefaults(); // Generate the image with Replicate and process with Sharp // Start with defaults, then override with parameters from the tool call const generationResult = await generateImage( replicateClient, prompt, fileName, { // Start with defaults model: defaults.model, width: defaults.width, height: defaults.height, aspectRatio: defaults.aspectRatio, outputFormat: defaults.outputFormat, safetyTolerance: defaults.safetyTolerance, numInferenceSteps: defaults.numInferenceSteps, guidanceScale: defaults.guidanceScale, negativePrompt: defaults.negativePrompt, raw: defaults.raw, promptUpsampling: defaults.promptUpsampling, outputQuality: defaults.outputQuality, // Override with parameters from the tool call (if provided) ...(model !== undefined && { model }), ...(width !== undefined && { width }), ...(height !== undefined && { height }), ...(aspectRatio !== undefined && { aspectRatio }), ...(outputFormat !== undefined && { outputFormat }), ...(safetyTolerance !== undefined && { safetyTolerance }), ...(seed !== undefined && { seed }), ...(numInferenceSteps !== undefined && { numInferenceSteps }), ...(guidanceScale !== undefined && { guidanceScale }), ...(negativePrompt !== undefined && { negativePrompt }), ...(promptUpsampling !== undefined && { promptUpsampling }), ...(outputQuality !== undefined && { outputQuality }), ...(raw !== undefined && { raw }), ...(imagePromptStrength !== undefined && { imagePromptStrength }) } ); // If image generation failed, return the error if (!generationResult.success) { return generationResult.errorResponse as { content: any[], isError: boolean }; } const imageBuffer = generationResult.buffer; const finalFileName = generationResult.fileName; const usingModel = generationResult.model; const dimensions = generationResult.dimensions; // Make sure we have valid image data if (!imageBuffer) { return { content: [{ type: "text", text: "Failed to get valid image data from the image generator." }], isError: true }; } try { // Create the directory if it doesn't exist const outputDir = path.dirname(outputPath); if (!fs.existsSync(outputDir)) { fs.mkdirSync(outputDir, { recursive: true }); } // Save the buffer directly to the specified output path if (imageBuffer) { fs.writeFileSync(outputPath, imageBuffer); } else { throw new Error('No image data available to save'); } // Return success response const response = formatSuccessResponse( 'Image Generated Successfully', { Prompt: prompt, Model: usingModel.split('/')[1], 'Output Path': outputPath, 'File Name': finalFileName, 'File Size': `${imageBuffer ? imageBuffer.length : 0} bytes`, 'Dimensions': dimensions || `${width}x${height}`, 'Format': outputFormat || 'png', 'Generation Parameters': { // Use the actual dimensions from the generated image ...(generationResult.dimensions ? { 'Dimensions': generationResult.dimensions } : {}), // Show the aspect ratio that was actually used (from tool call or defaults) 'Aspect Ratio': aspectRatio || defaults.aspectRatio || '1:1', 'Inference Steps': numInferenceSteps || defaults.numInferenceSteps, 'Guidance Scale': guidanceScale || defaults.guidanceScale, 'Negative Prompt': negativePrompt || defaults.negativePrompt, ...(raw !== undefined ? { 'Raw Mode': raw } : {}), ...(promptUpsampling !== undefined ? { 'Prompt Upsampling': promptUpsampling } : {}), ...(outputQuality !== undefined ? { 'Output Quality': outputQuality } : {}), ...(imagePromptStrength !== undefined ? { 'Image Prompt Strength': imagePromptStrength } : {}), ...(seed !== undefined ? { 'Seed': seed } : {}) } }, `Image has been successfully generated and saved to: ${outputPath}` ) as { content: any[], isError?: boolean }; return response; } catch (error: any) { return { content: [{ type: "text", text: `Error saving image to ${outputPath}: ${error.message || String(error)}` }], isError: true }; } } );
- src/index.ts:1250-1279 (schema)Zod schema defining the input parameters for the generate_image tool, including all generation options and defaults.prompt: z.string().describe("Text prompt for image generation"), outputPath: z.string().describe("Full path where the generated image should be saved"), // Optional model override model: z.string().optional() .describe("Optional: Override the default model. Use get_defaults to see available models"), // Common parameters for both models width: z.number().optional().default(1024).describe("Image width in pixels"), height: z.number().optional().default(1024).describe("Image height in pixels"), aspectRatio: z.string().optional().describe("Aspect ratio (e.g., '16:9', '4:3', '1:1'). If provided, overrides width and height"), outputFormat: z.enum(["jpeg", "png", "webp"]).optional().default("png").describe("Output format"), safetyTolerance: z.number().optional().default(2).describe("Safety tolerance (0-6)"), seed: z.number().optional().describe("Random seed for reproducible generation"), numInferenceSteps: z.number().optional().default(25).describe("Number of inference steps"), guidanceScale: z.number().optional().default(7.5).describe("Guidance scale"), negativePrompt: z.string().optional().default("low quality, bad quality, sketches").describe("Negative prompt"), // Flux 1.1 Pro specific parameters promptUpsampling: z.boolean().optional() .describe("Enable prompt upsampling (Flux 1.1 Pro only)"), outputQuality: z.number().optional() .describe("Output quality 1-100 (Flux 1.1 Pro only)"), // Flux 1.1 Pro Ultra specific parameters raw: z.boolean().optional() .describe("Generate less processed, more natural-looking images (Flux 1.1 Pro Ultra only)"), imagePromptStrength: z.number().optional() .describe("Image prompt strength 0-1 (Flux 1.1 Pro Ultra only)") },
- Helper service function that prepares options, generates image via ReplicateClient, processes with Sharp, and returns buffer or error response.export async function generateImage( replicateClient: ReplicateClient, prompt: string, fileName: string, options: any = {} ) { // No need to track files anymore since we're keeping everything in memory try { // Prepare options with proper naming for the API const modelOptions: any = {}; // Set aspect ratio or dimensions if (options.aspectRatio) { modelOptions.aspectRatio = options.aspectRatio; } else { // If no aspect ratio is provided, use width and height // These will be overridden by the defaults in the DefaultsManager if not provided modelOptions.width = options.width || 1024; modelOptions.height = options.height || 1024; } // Add common parameters if (options.numInferenceSteps) modelOptions.numInferenceSteps = options.numInferenceSteps; if (options.guidanceScale) modelOptions.guidanceScale = options.guidanceScale; if (options.negativePrompt) modelOptions.negativePrompt = options.negativePrompt; if (options.seed !== undefined) modelOptions.seed = options.seed; // Always set outputFormat, defaulting to png unless explicitly specified modelOptions.outputFormat = options.outputFormat || "png"; if (options.safetyTolerance !== undefined) modelOptions.safetyTolerance = options.safetyTolerance; // Add model-specific parameters if provided if (options.promptUpsampling !== undefined) modelOptions.promptUpsampling = options.promptUpsampling; if (options.outputQuality !== undefined) modelOptions.outputQuality = options.outputQuality; if (options.raw !== undefined) modelOptions.raw = options.raw; if (options.imagePromptStrength !== undefined) modelOptions.imagePromptStrength = options.imagePromptStrength; // Add model override if provided if (options.model) modelOptions.model = options.model; // Get the current default model for informational purposes const defaultModel = replicateClient.getDefaultModel(); const usingModel = options.model || defaultModel; console.log(`Using model: ${usingModel} (${options.model ? 'override' : 'default'})`); console.log(`Prompt: ${prompt}`); // STEP 1: Generate the image with Replicate console.log('Generating image with Replicate...'); const imageBuffer = await replicateClient.generateImage(prompt, modelOptions); console.log(`Image generated successfully, buffer size: ${imageBuffer.length} bytes`); // STEP 2: Process the image with Sharp console.log('Processing image with Sharp...'); // Get the output format from options (already defaulted to png earlier) const outputFormat = modelOptions.outputFormat; let mimeType: string; if (outputFormat === 'jpeg' || outputFormat === 'jpg') { mimeType = 'image/jpeg'; } else if (outputFormat === 'webp') { mimeType = 'image/webp'; } else { // Default to PNG mimeType = 'image/png'; } // Process with Sharp and get buffer directly let sharpInstance = sharp(imageBuffer); // Apply format-specific options if (outputFormat === 'png') { sharpInstance = sharpInstance.png({ quality: 100 }); } else if (outputFormat === 'jpeg' || outputFormat === 'jpg') { sharpInstance = sharpInstance.jpeg({ quality: 100 }); } else if (outputFormat === 'webp') { sharpInstance = sharpInstance.webp({ quality: 100 }); } // Get the processed image as a buffer const processedBuffer = await sharpInstance.toBuffer(); console.log(`Image processed successfully, buffer size: ${processedBuffer.length} bytes`); // Determine the final filename with extension const fileExtension = outputFormat === 'jpeg' ? 'jpg' : outputFormat; const finalFileName = fileName.endsWith(`.${fileExtension}`) ? fileName : `${fileName}.${fileExtension}`; // No need to clean up files since we're keeping everything in memory // Get dimensions from the Sharp metadata const metadata = await sharpInstance.metadata(); const dimensions = `${metadata.width}x${metadata.height}`; return { success: true, buffer: processedBuffer, mimeType, fileName: finalFileName, model: usingModel, dimensions }; } catch (error: any) { console.error('Error generating or processing image:', error); // No need to clean up files since we're keeping everything in memory // Get the current default model for informational purposes const defaultModel = replicateClient.getDefaultModel(); const usingModel = options.model || defaultModel; // Determine which step failed const errorStep = error.message.includes('Sharp') ? 'Image Processing' : 'Image Generation'; return { success: false, error, errorResponse: formatErrorResponse( error, errorStep, { Prompt: prompt, Model: usingModel.split('/')[1], Step: errorStep }, [ 'Check that your REPLICATE_API_TOKEN is valid', 'Try a different model using set-model', 'Try a more descriptive prompt', 'Try a different aspect ratio', ...(errorStep === 'Image Processing' ? [ 'Make sure Sharp is properly installed' ] : []) ] ) }; } }
- src/replicate-client.ts:79-191 (helper)Core ReplicateClient method that calls the Replicate API, handles output conversion to Buffer, supporting multiple model parameters.async generateImage(prompt: string, options: any = {}, modelId?: string): Promise<Buffer> { try { // Convert camelCase options to snake_case for the API const apiOptions: any = {}; // Map common options if (options.aspectRatio) apiOptions.aspect_ratio = options.aspectRatio; if (options.width) apiOptions.width = options.width; if (options.height) apiOptions.height = options.height; if (options.seed !== undefined) apiOptions.seed = options.seed; if (options.numInferenceSteps) apiOptions.num_inference_steps = options.numInferenceSteps; if (options.guidanceScale) apiOptions.guidance_scale = options.guidanceScale; if (options.negativePrompt) apiOptions.negative_prompt = options.negativePrompt; // Always set output_format, defaulting to png unless explicitly specified apiOptions.output_format = options.outputFormat || "png"; if (options.safetyTolerance !== undefined) apiOptions.safety_tolerance = options.safetyTolerance; // Map model-specific options if (options.promptUpsampling !== undefined) apiOptions.prompt_upsampling = options.promptUpsampling; if (options.outputQuality !== undefined) apiOptions.output_quality = options.outputQuality; if (options.raw !== undefined) apiOptions.raw = options.raw; if (options.imagePromptStrength !== undefined) apiOptions.image_prompt_strength = options.imagePromptStrength; // Use the defaults manager to prepare the input with merged options const mergedOptions = { ...options, ...apiOptions }; const { modelId: selectedModelId, input } = this.defaultsManager.prepareModelInput(prompt, mergedOptions); console.log(`Using model: ${selectedModelId}`); console.log(`Input parameters: ${JSON.stringify(input, null, 2)}`); // Run the model using the Replicate client const output = await this.client.run(selectedModelId as any, { input }); console.log('Replicate output type:', output ? (output.constructor ? output.constructor.name : typeof output) : 'null'); // Handle different output types from Replicate let imageData: Buffer; if (output === null || output === undefined) { throw new Error('Replicate returned null or undefined output'); } else if (typeof output === 'string') { // If output is a URL, download the image console.log('Replicate returned a string (URL):', output); const response = await axios.get(output, { responseType: 'arraybuffer' }); imageData = Buffer.from(response.data); } else if (Buffer.isBuffer(output)) { // If output is already a Buffer console.log('Replicate returned a Buffer'); imageData = output; } else if (output instanceof Uint8Array || output instanceof ArrayBuffer) { // If output is a Uint8Array or ArrayBuffer console.log('Replicate returned a Uint8Array or ArrayBuffer'); imageData = Buffer.from(output); } else if (typeof output === 'object' && output !== null) { // If output is a FileOutput object or similar console.log('Replicate returned an object:', Object.keys(output)); // Try to get the file content if ('file' in output && output.file) { console.log('Object has a file property'); // Use type assertion to handle FileOutput object const fileContent = await (output.file as any).arrayBuffer(); imageData = Buffer.from(fileContent); } else if ('arrayBuffer' in output && typeof output.arrayBuffer === 'function') { console.log('Object has an arrayBuffer method'); // Use type assertion for the arrayBuffer method const arrayBuffer = await (output as any).arrayBuffer(); imageData = Buffer.from(arrayBuffer); } else if ('blob' in output && typeof output.blob === 'function') { console.log('Object has a blob method'); // Use type assertion for the blob method const blob = await (output as any).blob(); const arrayBuffer = await (blob as any).arrayBuffer(); imageData = Buffer.from(arrayBuffer); } else if ('text' in output && typeof output.text === 'function') { console.log('Object has a text method'); // Use type assertion for the text method const text = await (output as any).text(); // If the text is a URL, download the image if (text.startsWith('http')) { const response = await axios.get(text, { responseType: 'arraybuffer' }); imageData = Buffer.from(response.data); } else { imageData = Buffer.from(text); } } else { // Last resort: try to stringify the object and see if it's a URL const str = output.toString(); console.log('Object toString():', str); if (str.startsWith('http')) { const response = await axios.get(str, { responseType: 'arraybuffer' }); imageData = Buffer.from(response.data); } else { throw new Error(`Unsupported Replicate output type: ${output.constructor ? output.constructor.name : typeof output}`); } } } else { throw new Error(`Unsupported Replicate output type: ${typeof output}`); } return imageData; } catch (error: any) { // Provide detailed error information const errorDetails = { message: error.message, prompt: prompt, options: JSON.stringify(options), modelId: modelId || this.getDefault('model') }; throw new Error(`Replicate API error: ${error.message}\nDetails: ${JSON.stringify(errorDetails, null, 2)}`); } }