env.example•1.71 kB
# FS-MCP Environment Configuration
# Copy this file to .env and fill in your values
# Security Settings
# =================
# Directory restriction (required) - only files in this directory and subdirectories can be accessed
SAFE_DIRECTORY=.
# Maximum file size limit in MB
MAX_FILE_SIZE_MB=100
# Encoding Settings
# =================
# Default file encoding for text files
DEFAULT_ENCODING=utf-8
# AI Embeddings Configuration (for vector search)
# ===============================================
# API key for your embedding service (required for vector search)
OPENAI_EMBEDDINGS_API_KEY=your-api-key-here
# Base URL for your embedding service API
# Examples:
# - OpenAI-compatible services: http://localhost:11434/v1
# - Ollama: http://localhost:11434/v1
# - LM Studio: http://localhost:1234/v1
OPENAI_EMBEDDINGS_BASE_URL=http://your-embedding-service/v1
# Embedding model name
# Popular options:
# - BAAI/bge-m3 (multilingual, 1024 dimensions)
# - text-embedding-ada-002 (OpenAI)
# - sentence-transformers/all-MiniLM-L6-v2 (local)
EMBEDDING_MODEL_NAME=BAAI/bge-m3
# Text Processing Settings
# =======================
# Size of text chunks for processing (characters)
EMBEDDING_CHUNK_SIZE=1000
# Overlap between chunks (characters)
EMBEDDING_CHUNK_OVERLAP=100
# Number of retry attempts for API calls
EMBEDDING_MAX_RETRIES=3
# Timeout for API calls (seconds)
EMBEDDING_TIMEOUT=30
# Logging Settings
# ===============
# Log level: DEBUG, INFO, WARNING, ERROR
LOG_LEVEL=INFO
# Performance Settings
# ===================
# Number of concurrent workers for indexing
INDEX_WORKERS=4
# Batch size for processing documents
BATCH_SIZE=10
# Cache expiry time in hours
CACHE_EXPIRY_HOURS=24