.env.example•4.39 kB
# n8n Documentation MCP Server Configuration
# ====================
# COMMON CONFIGURATION
# ====================
# Database Configuration
# For local development: ./data/nodes.db
# For Docker: /app/data/nodes.db
# Custom paths supported in v2.7.16+ (must end with .db)
NODE_DB_PATH=./data/nodes.db
# Logging Level (debug, info, warn, error)
MCP_LOG_LEVEL=info
# Node Environment (development, production)
NODE_ENV=development
# Rebuild database on startup (true/false)
REBUILD_ON_START=false
# =========================
# LOCAL MODE CONFIGURATION
# =========================
# Used when running: npm run start:v2 or npm run dev:v2
# Local MCP Server Configuration
MCP_SERVER_PORT=3000
MCP_SERVER_HOST=localhost
# MCP_AUTH_TOKEN=optional-for-local-development
# =========================
# SIMPLE HTTP MODE
# =========================
# Used for private single-user deployments
# Server mode: stdio (local) or http (remote)
MCP_MODE=stdio
# Use fixed HTTP implementation (recommended for stability)
# Set to true to bypass StreamableHTTPServerTransport issues
USE_FIXED_HTTP=true
# HTTP Server Configuration (only used when MCP_MODE=http)
PORT=3000
HOST=0.0.0.0
# Base URL Configuration (optional)
# Set this when running behind a proxy or when the server is accessed via a different URL
# than what it binds to. If not set, URLs will be auto-detected from proxy headers (if TRUST_PROXY is set)
# or constructed from HOST and PORT.
# Examples:
# BASE_URL=https://n8n-mcp.example.com
# BASE_URL=https://your-domain.com:8443
# PUBLIC_URL=https://n8n-mcp.mydomain.com (alternative to BASE_URL)
# Authentication token for HTTP mode (REQUIRED)
# Generate with: openssl rand -base64 32
AUTH_TOKEN=your-secure-token-here
# CORS origin for HTTP mode (optional)
# Default: * (allow all origins)
# For production, set to your specific domain
# CORS_ORIGIN=https://your-client-domain.com
# Trust proxy configuration for correct IP logging (0=disabled, 1=trust first proxy)
# Set to 1 when running behind a reverse proxy (Nginx, Traefik, etc.)
# Set to the number of proxy hops if behind multiple proxies
# Default: 0 (disabled)
# TRUST_PROXY=0
# =========================
# MULTI-TENANT CONFIGURATION
# =========================
# Enable multi-tenant mode for dynamic instance support
# When enabled, n8n API tools will be available for all sessions,
# and instance configuration will be determined from HTTP headers
# Default: false (single-tenant mode using environment variables)
ENABLE_MULTI_TENANT=false
# Session isolation strategy for multi-tenant mode
# - "instance": Create separate sessions per instance ID (recommended)
# - "shared": Share sessions but switch contexts (advanced)
# Default: instance
# MULTI_TENANT_SESSION_STRATEGY=instance
# =========================
# N8N API CONFIGURATION
# =========================
# Optional: Enable n8n management tools by providing API credentials
# These tools allow creating, updating, and executing workflows
# n8n instance API URL (without /api/v1 suffix)
# Example: https://your-n8n-instance.com
# N8N_API_URL=
# n8n API Key (get from Settings > API in your n8n instance)
# N8N_API_KEY=
# n8n API request timeout in milliseconds (default: 30000)
# N8N_API_TIMEOUT=30000
# Maximum number of API request retries (default: 3)
# N8N_API_MAX_RETRIES=3
# =========================
# CACHE CONFIGURATION
# =========================
# Optional: Configure instance cache settings for flexible instance support
# Maximum number of cached instances (default: 100, min: 1, max: 10000)
# INSTANCE_CACHE_MAX=100
# Cache TTL in minutes (default: 30, min: 1, max: 1440/24 hours)
# INSTANCE_CACHE_TTL_MINUTES=30
# =========================
# OPENAI API CONFIGURATION
# =========================
# Optional: Enable AI-powered template metadata generation
# Provides structured metadata for improved template discovery
# OpenAI API Key (get from https://platform.openai.com/api-keys)
# OPENAI_API_KEY=
# OpenAI Model for metadata generation (default: gpt-4o-mini)
# OPENAI_MODEL=gpt-4o-mini
# Batch size for metadata generation (default: 100)
# Templates are processed in batches using OpenAI's Batch API for 50% cost savings
# OPENAI_BATCH_SIZE=100
# Enable metadata generation during template fetch (default: false)
# Set to true to automatically generate metadata when running fetch:templates
# METADATA_GENERATION_ENABLED=false