# Qdrant MCP Server Configuration Example
# Copy this file to .env and update with your values
# Required: OpenAI API Key for generating embeddings
OPENAI_API_KEY=sk-your-openai-api-key-here
# Qdrant Configuration
QDRANT_URL=http://localhost:6333
# QDRANT_API_KEY= # Optional, only needed for Qdrant Cloud
QDRANT_COLLECTION_NAME=codebase
# Embedding Configuration
EMBEDDING_MODEL=text-embedding-3-small
EMBEDDING_DIMENSIONS=1536
# Indexing Configuration
MAX_FILE_SIZE=1048576 # 1MB in bytes
BATCH_SIZE=10 # Number of files to process in parallel
MAX_TOKENS_PER_CHUNK=500
CHUNK_OVERLAP=50
# File Patterns (glob patterns)
INCLUDE_PATTERNS=**/*.{js,ts,jsx,tsx,py,java,go,rs,cpp,c,h,rb,php,swift,kt,scala,r,m,mm}
EXCLUDE_PATTERNS=**/node_modules/**,**/.git/**,**/dist/**,**/build/**,**/__pycache__/**,**/venv/**,**/.env/**
# Background Indexer Configuration
WATCH_INTERVAL=5000 # milliseconds
INDEX_ON_START=true
INCREMENTAL_INDEXING=true
# Logging Configuration
LOG_LEVEL=info # debug, info, warn, error
LOG_FILE=~/.qdrant-mcp/logs/indexer.log
LOG_MAX_SIZE=10485760 # 10MB
LOG_MAX_FILES=5
# Performance Tuning
MAX_CONCURRENT_REQUESTS=5
REQUEST_TIMEOUT=30000 # milliseconds
RETRY_ATTEMPTS=3
RETRY_DELAY=1000 # milliseconds
# Cost Control
DAILY_TOKEN_LIMIT=1000000 # Stop indexing after this many tokens
COST_PER_1K_TOKENS=0.00002 # For text-embedding-3-small
# MCP Server Configuration
MCP_SERVER_PORT=3333 # For standalone mode
MCP_SERVER_HOST=localhost
# Cache Configuration
ENABLE_CACHE=true
CACHE_DIR=~/.qdrant-mcp/cache
CACHE_TTL=86400 # 24 hours in seconds
# Development/Debug Options
DRY_RUN=false # Set to true to simulate indexing without making API calls
VERBOSE=false # Set to true for detailed logging
TEST_MODE=false # Set to true to use mock data instead of real API calls