.env.example•4.03 kB
# CodeGraph Configuration Example
# Copy this file to .env and update the values for your environment
# ============================================================================
# CodeGraph Core Configuration (Simplified Setup)
# ============================================================================
# Minimal Setup - Auto-detect embedding provider (ONNX, Ollama, or OpenAI)
CODEGRAPH_EMBEDDING_PROVIDER=auto
# That's it for basic usage! CodeGraph will auto-detect everything else.
# Uncomment and customize the settings below if you need more control.
# Embedding Provider Configuration
# ----------------------------------
# Provider options: "auto", "onnx", "ollama", "openai", or "lmstudio"
# CODEGRAPH_EMBEDDING_PROVIDER=auto
# ONNX: Specify model path (or leave empty for auto-detection from HuggingFace cache)
# CODEGRAPH_LOCAL_MODEL=/path/to/your/onnx/model
# Ollama: Specify embedding model name
# CODEGRAPH_EMBEDDING_MODEL=all-minilm:latest
# CODEGRAPH_OLLAMA_URL=http://localhost:11434
# LM Studio: Best for MLX + Flash Attention 2 (recommended on macOS)
# Default: jina-code-embeddings-1.5b (1536 dimensions)
# CODEGRAPH_EMBEDDING_PROVIDER=lmstudio
# CODEGRAPH_EMBEDDING_MODEL=jinaai/jina-embeddings-v3
# CODEGRAPH_LMSTUDIO_URL=http://localhost:1234
# CODEGRAPH_EMBEDDING_DIMENSION=1536
# OpenAI: Model name (API key configured below in Security section)
# CODEGRAPH_EMBEDDING_MODEL=text-embedding-3-small
# LLM Configuration (for local insights generation)
# --------------------------------------------------
# Leave empty to use context-only mode (fastest, recommended for agents like Claude/GPT-4)
# Set to enable local LLM insights generation
# LM Studio with DeepSeek Coder v2 Lite Instruct (recommended)
# Superior MLX support and Flash Attention 2 on macOS
# CODEGRAPH_LLM_PROVIDER=lmstudio
# CODEGRAPH_MODEL=lmstudio-community/DeepSeek-Coder-V2-Lite-Instruct-GGUF/DeepSeek-Coder-V2-Lite-Instruct-Q4_K_M.gguf
# CODEGRAPH_LMSTUDIO_URL=http://localhost:1234
# CODEGRAPH_CONTEXT_WINDOW=32000
# CODEGRAPH_TEMPERATURE=0.1
# Ollama (alternative)
# LLM model (e.g., "qwen2.5-coder:14b", "codellama:13b")
# CODEGRAPH_MODEL=qwen2.5-coder:14b
# CODEGRAPH_OLLAMA_URL=http://localhost:11434
# LLM context window size (tokens)
# CODEGRAPH_CONTEXT_WINDOW=32000
# LLM temperature (0.0 = deterministic, 1.0 = creative)
# CODEGRAPH_TEMPERATURE=0.1
# Logging
# -------
# Log level: trace, debug, info, warn, error
# Use "warn" during indexing for clean TUI output (recommended)
# Use "info" for development/debugging
RUST_LOG=warn
# ============================================================================
# Security Configuration (for production deployments)
# ============================================================================
# JWT Authentication
JWT_SECRET=replace_with_secure_random_secret_minimum_32_characters_long
JWT_EXPIRY_HOURS=24
# API Key Configuration
API_KEY_PREFIX=cgk
# Server Configuration
HOST=127.0.0.1
PORT=8080
ENVIRONMENT=development
# TLS/HTTPS Configuration (for production)
# TLS_CERT_PATH=/path/to/certificate.pem
# TLS_KEY_PATH=/path/to/private-key.pem
# REQUIRE_TLS=true
# Database Configuration
# DATABASE_URL=postgresql://user:password@localhost/codegraph
# REDIS_URL=redis://localhost:6379
# Rate Limiting
RATE_LIMIT_ANONYMOUS=60
RATE_LIMIT_USER=1000
RATE_LIMIT_PREMIUM=5000
RATE_LIMIT_ADMIN=10000
# Security Settings
MAX_REQUEST_SIZE=10485760 # 10MB
SESSION_TIMEOUT_HOURS=24
PASSWORD_MIN_LENGTH=12
# Logging (see RUST_LOG above for CodeGraph core logging)
# LOG_LEVEL=info # For application-level logging
SECURITY_LOG_LEVEL=warn
LOG_FORMAT=json
# Monitoring
METRICS_ENABLED=true
PROMETHEUS_PORT=9090
# OpenAI API (if using external AI services)
# OPENAI_API_KEY=your_openai_api_key_here
# OPENAI_ORG_ID=your_org_id_here
# External Services
# SENTRY_DSN=https://your-sentry-dsn
# ANALYTICS_KEY=your_analytics_key
# Development/Testing Only
DEV_MODE=true
DISABLE_AUTH=false # Never set to true in production!
ENABLE_DEBUG_ENDPOINTS=false