.env.ai.sample•5.51 kB
# AI Model Configuration for DP-MCP Server
# Copy this file to .env.ai and configure your AI models
# =============================================================================
# GLOBAL AI SETTINGS
# =============================================================================
# AI Environment (development, production, enterprise, demo)
# - development: Includes local and cloud models with moderate privacy
# - production: Balanced cloud/local setup with strict privacy
# - enterprise: Local-only models with maximum security
# - demo: Mock models for safe demonstrations
AI_ENVIRONMENT=development
# Privacy Level (none, basic, moderate, strict, paranoid)
# - none: No data filtering (for public data only)
# - basic: Remove obvious PII (emails, phones)
# - moderate: Mask sensitive patterns, limit data size
# - strict: Schema-only mode, heavy sanitization
# - paranoid: No actual data sent to AI models
AI_PRIVACY_LEVEL=moderate
# =============================================================================
# CLAUDE (ANTHROPIC) CONFIGURATION
# =============================================================================
# Claude API Key (get from https://console.anthropic.com/)
# CLAUDE_API_KEY=sk-ant-api03-...
# Claude Model to use (claude-3-sonnet-20240229, claude-3-haiku-20240307)
CLAUDE_MODEL=claude-3-haiku-20240307
# Claude API Base URL (leave default unless using proxy)
# CLAUDE_BASE_URL=https://api.anthropic.com
# =============================================================================
# OPENAI CONFIGURATION
# =============================================================================
# OpenAI API Key (get from https://platform.openai.com/api-keys)
# OPENAI_API_KEY=sk-...
# OpenAI Model to use (gpt-4, gpt-3.5-turbo, gpt-4-turbo)
OPENAI_MODEL=gpt-3.5-turbo
# OpenAI API Base URL (leave default unless using proxy)
# OPENAI_BASE_URL=https://api.openai.com
# =============================================================================
# LOCAL MODELS (OLLAMA) CONFIGURATION
# =============================================================================
# Ollama server URL (install from https://ollama.com/)
OLLAMA_BASE_URL=http://localhost:11434
# Available Ollama models (install with: ollama pull <model>)
# Popular models: llama2, codellama, mistral, vicuna
OLLAMA_DEFAULT_MODEL=llama2
# =============================================================================
# AI TOOL SETTINGS
# =============================================================================
# Maximum tokens for AI responses
AI_MAX_TOKENS=4000
# Temperature for AI responses (0.0 = deterministic, 1.0 = creative)
AI_TEMPERATURE=0.1
# Timeout for AI requests (seconds)
AI_REQUEST_TIMEOUT=30
# Maximum rows to send to AI for analysis
AI_MAX_ANALYSIS_ROWS=100
# Maximum sample size for pattern analysis
AI_MAX_SAMPLE_SIZE=1000
# =============================================================================
# PRIVACY AND SECURITY SETTINGS
# =============================================================================
# Columns to always block from AI analysis (comma-separated)
AI_BLOCKED_COLUMNS=password,ssn,credit_card,api_key,secret,token
# Allowed columns for AI analysis (empty = all allowed, except blocked)
# AI_ALLOWED_COLUMNS=id,name,email,created_at
# Enable audit logging for AI interactions
AI_ENABLE_AUDIT_LOG=true
# Maximum string length to send to AI
AI_MAX_STRING_LENGTH=200
# =============================================================================
# EXAMPLE CONFIGURATIONS
# =============================================================================
# DEVELOPMENT SETUP (local + cloud models, moderate privacy)
# AI_ENVIRONMENT=development
# AI_PRIVACY_LEVEL=moderate
# OLLAMA_BASE_URL=http://localhost:11434
# OLLAMA_DEFAULT_MODEL=llama2
# PRODUCTION SETUP (cloud models with strict privacy)
# AI_ENVIRONMENT=production
# AI_PRIVACY_LEVEL=strict
# CLAUDE_API_KEY=your-key-here
# CLAUDE_MODEL=claude-3-sonnet-20240229
# ENTERPRISE SETUP (local only, maximum security)
# AI_ENVIRONMENT=enterprise
# AI_PRIVACY_LEVEL=paranoid
# OLLAMA_BASE_URL=http://localhost:11434
# OLLAMA_DEFAULT_MODEL=llama2
# DEMO SETUP (mock models, safe for presentations)
# AI_ENVIRONMENT=demo
# AI_PRIVACY_LEVEL=strict
# =============================================================================
# SETUP INSTRUCTIONS
# =============================================================================
# 1. Copy this file: cp .env.ai.sample .env.ai
# 2. Choose your AI_ENVIRONMENT and AI_PRIVACY_LEVEL
# 3. Configure at least one AI provider:
# - For Claude: Set CLAUDE_API_KEY
# - For OpenAI: Set OPENAI_API_KEY
# - For Local: Install Ollama and set OLLAMA_BASE_URL
# 4. Restart your DP-MCP server
# 5. Test with: uv run python src/dp_mcp/server.py --ai-env development --debug
# =============================================================================
# TROUBLESHOOTING
# =============================================================================
# If AI features aren't working:
# 1. Check server logs for AI initialization messages
# 2. Verify API keys are correctly set
# 3. For Ollama: ensure service is running (ollama serve)
# 4. Test AI status: call get_ai_system_status MCP tool
# 5. Start with demo environment for testing
# Security Notes:
# - Never commit API keys to version control
# - Use environment variables for production
# - Local models (Ollama) provide best privacy
# - Higher privacy levels may reduce AI effectiveness