# Cloudflare Configuration for MCP Memory Service
# ================================================
# Copy this file to .env and replace with your actual credentials
#
# Setup Instructions:
# 1. Copy this file: cp .env.example .env
# 2. Create Cloudflare API Token at: https://dash.cloudflare.com/profile/api-tokens
# 3. Replace placeholder values below with your actual credentials
# 4. Never commit your .env file to git (it's already in .gitignore)
# =============================================================================
# REQUIRED: Cloudflare API Token
# =============================================================================
# Create at: https://dash.cloudflare.com/profile/api-tokens
# Required permissions:
# - Account: Cloudflare Workers:Edit
# - Zone Resources: Include All zones
# - Account Resources: Include All accounts
#
# IMPORTANT: Test your token with the account-scoped endpoint:
# curl "https://api.cloudflare.com/client/v4/accounts/{ACCOUNT_ID}/tokens/verify" \
# -H "Authorization: Bearer {YOUR_TOKEN}"
#
# DO NOT use the generic endpoint (will fail for scoped tokens):
# curl "https://api.cloudflare.com/client/v4/user/tokens/verify" ❌
CLOUDFLARE_API_TOKEN=your-cloudflare-api-token-here
# =============================================================================
# REQUIRED: Cloudflare Account ID
# =============================================================================
# Find in: Cloudflare Dashboard > Right sidebar under "Account ID"
# Example: be0e35a26715043ef8df90253268c33f
CLOUDFLARE_ACCOUNT_ID=your-account-id-here
# =============================================================================
# REQUIRED: D1 Database ID
# =============================================================================
# Create with: wrangler d1 create mcp-memory-database
# Or find existing: wrangler d1 list
# Example: f745e9b4-ba8e-4d47-b38f-12af91060d5a
CLOUDFLARE_D1_DATABASE_ID=your-d1-database-id-here
# =============================================================================
# REQUIRED: Vectorize Index Name
# =============================================================================
# Create with: wrangler vectorize create mcp-memory-index --dimensions=384
# Or find existing: wrangler vectorize list
# Example: mcp-memory-index
CLOUDFLARE_VECTORIZE_INDEX=your-vectorize-index-name
# =============================================================================
# OPTIONAL: R2 Bucket for Large Content Storage
# =============================================================================
# Create with: wrangler r2 bucket create mcp-memory-content
# Only needed if you plan to store large content (>1MB)
# CLOUDFLARE_R2_BUCKET=mcp-memory-content
# =============================================================================
# STORAGE BACKEND CONFIGURATION
# =============================================================================
# Options: sqlite_vec | cloudflare | hybrid
# - sqlite_vec: Fast local storage (development)
# - cloudflare: Cloud storage with Cloudflare (production)
# - hybrid: Best of both - local speed + cloud persistence (recommended)
MCP_MEMORY_STORAGE_BACKEND=cloudflare
# =============================================================================
# OPTIONAL: Advanced Configuration
# =============================================================================
# Cloudflare embedding model (default is recommended)
# CLOUDFLARE_EMBEDDING_MODEL=@cf/baai/bge-base-en-v1.5
# Large content threshold for R2 storage (bytes)
# CLOUDFLARE_LARGE_CONTENT_THRESHOLD=1048576
# HTTP Interface (Web Dashboard)
# MCP_HTTP_ENABLED=true
# MCP_HTTP_PORT=8888
# MCP_HTTPS_ENABLED=true
# MCP_HTTPS_PORT=8443
# OAuth 2.1 Authentication (for web interface)
# MCP_OAUTH_ENABLED=false
# Hybrid Backend Configuration (if using hybrid)
# MCP_HYBRID_SYNC_INTERVAL=300 # Sync every 5 minutes
# MCP_HYBRID_BATCH_SIZE=50 # Sync 50 operations at a time
# MCP_HYBRID_SYNC_ON_STARTUP=true # Initial sync on startup
# =============================================================================
# GRAPH DATABASE CONFIGURATION (v8.51.0+)
# =============================================================================
# Controls how memory associations are stored
# Options:
# - memories_only: Store in memories.metadata.associations (backward compatible, v8.48.0 behavior)
# - dual_write: Write to both memories.metadata AND memory_graph table (migration mode, default)
# - graph_only: Write to memory_graph table only (future mode, requires migration complete)
#
# Migration Path:
# 1. Start with dual_write (default) - maintains compatibility while building graph data
# 2. Run migration script to backfill memory_graph from existing associations
# 3. Validate graph data completeness
# 4. Switch to graph_only mode after validation
#
# Benefits of graph_only mode:
# - 5-10x faster association queries (indexed graph table vs JSON metadata parsing)
# - Efficient bidirectional traversal (A→B and B→A)
# - Support for multiple connection types (semantic, temporal, causal, thematic)
# - Foundation for advanced graph analytics (PageRank, clustering, path finding)
#
MCP_GRAPH_STORAGE_MODE=dual_write
# =============================================================================
# TROUBLESHOOTING
# =============================================================================
# Common issues:
# 1. "Invalid API Token" - Check token permissions and expiry
# 2. "Database not found" - Verify D1 database ID is correct
# 3. "Vectorize index not found" - Check index name and dimensions (384)
# 4. "Account access denied" - Ensure API token has account permissions
#
# Documentation: https://github.com/doobidoo/mcp-memory-service/wiki
# Support: https://github.com/doobidoo/mcp-memory-service/issues