# Server Configuration
PORT=8000
LOG_LEVEL=INFO
ENV=development
# CORS Configuration
ALLOWED_ORIGINS=*
# LLM Provider Configuration
# Set to "ollama", "openai", "anthropic", or "google" for different providers
LLM_PROVIDER=ollama
# Ollama Configuration (when LLM_PROVIDER=ollama)
OLLAMA_BASE_URL=http://localhost:11434
OLLAMA_MODEL=llama3:latest
# Additional Ollama models (comma-separated for dropdown)
# OLLAMA_MODELS=llama3:latest,llama3.1:latest,mistral:latest,mixtral:latest
# OpenAI Configuration (when LLM_PROVIDER=openai)
# OPENAI_API_KEY=sk-your-api-key-here
# OPENAI_MODEL=gpt-4
# OPENAI_BASE_URL=https://api.openai.com/v1 # Optional, defaults to OpenAI
# Additional OpenAI models (comma-separated for dropdown)
# OPENAI_MODELS=gpt-4,gpt-4-turbo,gpt-3.5-turbo,gpt-4o
# Anthropic (Claude) Configuration (when LLM_PROVIDER=anthropic)
# ANTHROPIC_API_KEY=sk-ant-your-api-key-here
# ANTHROPIC_MODEL=claude-3-5-sonnet-20241022
# Additional Anthropic models (comma-separated for dropdown)
# ANTHROPIC_MODELS=claude-3-5-sonnet-20241022,claude-3-opus-20240229,claude-3-sonnet-20240229,claude-3-haiku-20240307
# Google (Gemini) Configuration (when LLM_PROVIDER=google)
# GOOGLE_API_KEY=your-api-key-here
# GOOGLE_MODEL=gemini-pro
# Additional Google models (comma-separated for dropdown)
# GOOGLE_MODELS=gemini-pro,gemini-pro-vision,gemini-1.5-pro,gemini-1.5-flash