# =============================================================================
# ComfyUI Flux MCP Server Configuration
# Optimized setup with PyTorch 2.5.1 and BuildKit caching
# =============================================================================
# -----------------------------------------------------------------------------
# Docker Compose Project Name (IMPORTANT - DO NOT CHANGE)
# -----------------------------------------------------------------------------
# This ensures consistent container naming across all Docker operations
# Containers will be named: mcp-comfyui-flux-{service}-1
# Changing this will break Claude Desktop MCP integration!
COMPOSE_PROJECT_NAME=mcp-comfyui-flux
# -----------------------------------------------------------------------------
# Network Configuration
# -----------------------------------------------------------------------------
# Port for ComfyUI web interface (default: 8188)
# Change this if port 8188 is already in use
PORT=8188
# ComfyUI service configuration (for inter-container communication)
COMFYUI_HOST=comfyui
COMFYUI_PORT=8188
# MCP Server Port (when running outside Docker)
# MCP_SERVER_PORT=3000
# -----------------------------------------------------------------------------
# Model Configuration
# -----------------------------------------------------------------------------
# Hugging Face Configuration (required for Flux.1-dev)
# Get your token from: https://huggingface.co/settings/tokens
# Accept the license at: https://huggingface.co/black-forest-labs/FLUX.1-dev
# Leave empty to use only Flux.1-schnell (no token required)
HF_TOKEN=
# Model Precision (optional)
# Options: fp32 (best quality, most VRAM), fp16 (balanced), bf16 (if supported), fp8 (most efficient)
# Default: fp16 (fp8 models like FLUX schnell-fp8 will auto-detect)
# Uncomment to override default auto-detection
# MODEL_PRECISION=fp16
# -----------------------------------------------------------------------------
# GPU Configuration
# -----------------------------------------------------------------------------
# CUDA device selection
# Options:
# -1 = CPU only (no GPU)
# 0 = First GPU
# 0,1 = Multiple GPUs
# all = All available GPUs
CUDA_VISIBLE_DEVICES=0
# NVIDIA runtime visibility
# Usually matches CUDA_VISIBLE_DEVICES or 'all'
NVIDIA_VISIBLE_DEVICES=all
# -----------------------------------------------------------------------------
# Memory & Performance Settings
# -----------------------------------------------------------------------------
# PyTorch memory allocation (optional)
# Reduce if you encounter CUDA out of memory errors
# The optimized setup uses PyTorch 2.5.1 with better memory management
# PYTORCH_CUDA_ALLOC_CONF=max_split_size_mb:512
# CPU Thread count (optional)
# Set based on your CPU cores for optimal performance
# OMP_NUM_THREADS=4
# Shared memory size for containers (set in docker-compose.yml)
# Increase if you encounter shared memory errors with large models
# Default: 8g in docker-compose.yml
# -----------------------------------------------------------------------------
# Environment Settings
# -----------------------------------------------------------------------------
# Node.js environment
# Options: development, production
NODE_ENV=production
# Disable telemetry and auto-updates
CLAUDE_CODE_DISABLE_TELEMETRY=1
DISABLE_AUTOUPDATER=1
# -----------------------------------------------------------------------------
# API Keys & Authentication
# -----------------------------------------------------------------------------
# Claude Code / Anthropic API Configuration
# Get your API key from: https://console.anthropic.com
# Or use Claude Pro/Max subscription
# Required only if using Claude Code features
ANTHROPIC_API_KEY=
# -----------------------------------------------------------------------------
# Logging & Debugging
# -----------------------------------------------------------------------------
# Log level (optional)
# Options: error, warn, info, debug
# LOG_LEVEL=info
# Enable debug mode (optional)
# Set to 1 for verbose logging
# DEBUG=0
# -----------------------------------------------------------------------------
# Security Settings
# -----------------------------------------------------------------------------
# Rate Limiting Configuration
# Maximum requests per time interval (default: 20)
RATE_LIMIT_TOKENS=20
# Time interval for rate limiting in milliseconds (default: 60000 = 1 minute)
RATE_LIMIT_INTERVAL=60000
# Maximum burst capacity for rate limiting (default: 30)
RATE_LIMIT_BURST=30
# Enable rate limit logging (default: true)
RATE_LIMIT_LOGGING=true
# Input Sanitization
# Enable input sanitization logging (default: true)
SANITIZER_LOGGING=true
# Secrets Management
# Enable secrets logging (default: false for security)
SECRETS_LOGGING=false
# Authentication Token (if not using Docker secrets)
# Generate with: openssl rand -hex 32
# MCP_AUTH_TOKEN=your_secure_token_here
# -----------------------------------------------------------------------------
# Connection Resilience Settings
# -----------------------------------------------------------------------------
# Maximum number of reconnection attempts (default: 10)
# Set to 0 to disable automatic reconnection
COMFYUI_MAX_RECONNECT_ATTEMPTS=10
# Initial delay between reconnection attempts in milliseconds (default: 1000)
# This delay doubles with each failed attempt (exponential backoff)
COMFYUI_RECONNECT_DELAY=1000
# Maximum delay between reconnection attempts in milliseconds (default: 30000)
# The exponential backoff will not exceed this value
COMFYUI_MAX_RECONNECT_DELAY=30000
# WebSocket heartbeat interval in milliseconds (default: 30000)
# Used to detect stale connections and trigger reconnection
# Set to 0 to disable heartbeat monitoring
COMFYUI_HEARTBEAT_INTERVAL=30000
# -----------------------------------------------------------------------------
# Advanced Settings (rarely needed)
# -----------------------------------------------------------------------------
# Docker project name (for multiple deployments)
# PROJECT_NAME=mcp-comfyui
# Custom model paths (if not using default ./models directory)
# MODELS_PATH=./models
# OUTPUT_PATH=./output
# INPUT_PATH=./input
# ComfyUI extra arguments (optional)
# Default: --highvram is set for GPUs with 20GB+ VRAM
# For lower VRAM, use --normalvram or --lowvram
# COMFYUI_EXTRA_ARGS=--highvram
# Security settings (optional)
# ENABLE_CORS=false
# ALLOWED_ORIGINS=http://localhost:3000