# MCP LangChain Agent Configuration
# ===================================
# Gateway Configuration
# ----------------------
# URL of the MCP Gateway instance
MCP_GATEWAY_URL=http://localhost:4444
# JWT token for authenticating with the MCP Gateway
# Generate with: python3 -m mcpgateway.utils.create_jwt_token -u admin --secret my-test-key
MCPGATEWAY_BEARER_TOKEN=your-jwt-token-here
# LLM Provider Configuration
# ===========================
# Supported providers: openai, azure, bedrock, ollama, anthropic
LLM_PROVIDER=openai
# OpenAI Configuration (for LLM_PROVIDER=openai)
# -----------------------------------------------
OPENAI_API_KEY=your-openai-api-key-here
# OPENAI_BASE_URL=https://api.openai.com/v1 # Optional: Custom base URL
# OPENAI_ORGANIZATION=your-org-id # Optional: Organization ID
# Azure OpenAI Configuration (for LLM_PROVIDER=azure)
# ---------------------------------------------------
# AZURE_OPENAI_API_KEY=your-azure-api-key
# AZURE_OPENAI_ENDPOINT=https://your-resource.openai.azure.com/
# AZURE_OPENAI_API_VERSION=2024-02-15-preview
# AZURE_DEPLOYMENT_NAME=your-deployment-name
# AWS Bedrock Configuration (for LLM_PROVIDER=bedrock)
# ----------------------------------------------------
# AWS_ACCESS_KEY_ID=your-access-key-id
# AWS_SECRET_ACCESS_KEY=your-secret-access-key
# AWS_REGION=us-east-1
# BEDROCK_MODEL_ID=anthropic.claude-3-sonnet-20240229-v1:0
# OLLAMA Configuration (for LLM_PROVIDER=ollama)
# ----------------------------------------------
# OLLAMA_BASE_URL=http://localhost:11434
# OLLAMA_MODEL=llama2:7b
# Anthropic Configuration (for LLM_PROVIDER=anthropic)
# ----------------------------------------------------
# ANTHROPIC_API_KEY=your-anthropic-api-key
# Tool Configuration (Optional)
# ------------------------------
# Comma-separated list of tools to allow (if unset, all tools from gateway are used)
# Leave empty for development, set for production security
TOOLS=
# Example tool allowlist for production:
# TOOLS=list-users,books-search,weather-tool
# Agent Configuration
# -------------------
# Default OpenAI model to use
DEFAULT_MODEL=gpt-4o-mini
# Maximum number of tool call iterations
MAX_ITERATIONS=10
# Temperature for LLM responses (0.0-2.0)
TEMPERATURE=0.7
# Enable streaming responses (true/false)
STREAMING_ENABLED=true
# Enable debug logging (true/false)
DEBUG_MODE=false
# Agent Behavior
# --------------
# Agent name for identification
AGENT_NAME=MCP LangChain Agent
# Agent description
AGENT_DESCRIPTION=A LangChain agent with MCP tool integration
# Application Configuration
# -------------------------
# Port to run the agent on
PORT=8000
# Host to bind to
HOST=0.0.0.0
# Log level (DEBUG, INFO, WARNING, ERROR)
LOG_LEVEL=INFO
# CORS Configuration
# ------------------
# Allowed origins for CORS (comma-separated)
CORS_ORIGINS=*
# Enable CORS credentials
CORS_CREDENTIALS=true
# Security Configuration
# ----------------------
# Enable authentication for agent endpoints (true/false)
AUTH_ENABLED=false
# API key for agent endpoints (if auth enabled)
AGENT_API_KEY=your-agent-api-key
# Performance Configuration
# -------------------------
# Timeout for MCP Gateway requests (seconds)
GATEWAY_TIMEOUT=30
# Maximum concurrent requests
MAX_CONCURRENT_REQUESTS=10
# Request rate limiting (requests per minute)
RATE_LIMIT=60