# OLLAMA Configuration Example
# ============================
# Gateway Configuration
MCP_GATEWAY_URL=http://localhost:4444
MCPGATEWAY_BEARER_TOKEN=your-jwt-token-here
# OLLAMA Provider
LLM_PROVIDER=ollama
# OLLAMA Settings
OLLAMA_BASE_URL=http://localhost:11434
OLLAMA_MODEL=llama2:7b
DEFAULT_MODEL=llama2:7b
# Agent Settings
TEMPERATURE=0.7
MAX_ITERATIONS=10
STREAMING_ENABLED=true
# Performance Settings
REQUEST_TIMEOUT=60 # OLLAMA can be slower
MAX_TOKENS=1000
# Tool Configuration (optional)
TOOLS=
# Debug Mode
DEBUG_MODE=false
# OLLAMA Setup Instructions:
# 1. Install OLLAMA: https://ollama.ai/
# 2. Pull a model: ollama pull llama2:7b
# 3. Start OLLAMA service: ollama serve
# 4. Verify: curl http://localhost:11434/api/tags
# Popular OLLAMA Models:
# - llama2:7b, llama2:13b, llama2:70b
# - llama3:8b, llama3:70b
# - codellama:7b, codellama:13b
# - mistral:7b, mixtral:8x7b
# - gemma:2b, gemma:7b
# - phi3:mini, phi3:medium
# For remote OLLAMA:
# OLLAMA_BASE_URL=http://your-ollama-server:11434