# =============================================================================
# Recall MCP Server Configuration
# =============================================================================
# Copy this file to .env and customize as needed.
# All options use the RECALL_ prefix.
# =============================================================================
# REQUIRED: Ollama Model Configuration
# =============================================================================
# These environment variables MUST be set - there are no defaults.
# Embedding model for semantic search (REQUIRED)
# Recommended: mxbai-embed-large, nomic-embed-text
RECALL_OLLAMA_MODEL=mxbai-embed-large
# LLM model for relationship classification and reasoning (REQUIRED)
# Recommended: gemma3:12b, llama3.2, mistral
RECALL_OLLAMA_LLM_MODEL=gemma3:12b
# =============================================================================
# Optional: Storage Paths
# =============================================================================
# Path to SQLite database
# Default: ~/.recall/recall.db
# RECALL_SQLITE_PATH=~/.recall/recall.db
# Path to ChromaDB vector storage
# Default: ~/.recall/chroma_db
# RECALL_CHROMA_PATH=~/.recall/chroma_db
# ChromaDB collection name
# Default: memories
# RECALL_COLLECTION_NAME=memories
# =============================================================================
# Optional: Ollama Server Configuration
# =============================================================================
# Ollama server host URL
# Default: http://localhost:11434
# RECALL_OLLAMA_HOST=http://localhost:11434
# Request timeout in seconds
# Default: 30
# RECALL_OLLAMA_TIMEOUT=30
# =============================================================================
# Optional: Embedding Backend Configuration
# =============================================================================
# Embedding backend to use: 'ollama' or 'mlx'
# Default: ollama
# - ollama: Uses Ollama server for embeddings (works on any platform)
# - mlx: Uses mlx-embeddings for local embeddings (Apple Silicon only, faster)
RECALL_EMBEDDING_BACKEND=mlx
# MLX embedding model identifier (used when embedding_backend='mlx')
# Default: mlx-community/mxbai-embed-large-v1
# RECALL_MLX_MODEL=mlx-community/mxbai-embed-large-v1
# =============================================================================
# Optional: Logging
# =============================================================================
# Logging level: DEBUG, INFO, WARNING, ERROR, CRITICAL
# Default: INFO
# RECALL_LOG_LEVEL=INFO
# =============================================================================
# Optional: Memory Defaults
# =============================================================================
# Default namespace for memories
# Default: global
# RECALL_DEFAULT_NAMESPACE=global
# Default importance score for new memories (0.0 to 1.0)
# Default: 0.5
# RECALL_DEFAULT_IMPORTANCE=0.5
# Default token budget for context generation
# Default: 4000
# RECALL_DEFAULT_TOKEN_BUDGET=4000
# =============================================================================
# Optional: Monitoring Configuration
# =============================================================================
# Enable background monitoring
# Default: false
# RECALL_MONITOR_ENABLED=false
# Monitoring interval in seconds
# Default: 30
# RECALL_MONITOR_INTERVAL=30
# Local Ollama model for monitoring tasks
# Default: llama3.2
# RECALL_OLLAMA_MONITOR_MODEL=llama3.2
# Anthropic API key for cloud-based monitoring (optional)
# Default: (none)
# RECALL_ANTHROPIC_API_KEY=
# Claude model for quick checks
# Default: claude-3-5-haiku-20241022
# RECALL_HAIKU_MODEL=claude-3-5-haiku-20241022
# Claude model for deep analysis
# Default: claude-opus-4-20250514
# RECALL_OPUS_MODEL=claude-opus-4-20250514