# Environment Variables Template for Case Study Generator MCP Server
# Copy this file to .env and fill in your actual values
# =============================================================================
# OLLAMA CONFIGURATION
# =============================================================================
# Ollama server URL (default: http://localhost:11434)
# Change if Ollama is running on a different host or port
OLLAMA_HOST=http://localhost:11434
# Ollama model to use for processing (default: gemma3n:e4b)
# Available models: gemma3n:e4b, gemma3:4b, gemma3:8b, gemma3:27b, llama3.1, etc.
OLLAMA_MODEL=gemma3n:e4b
# =============================================================================
# GITHUB API CONFIGURATION
# =============================================================================
# GitHub Personal Access Token for repository analysis
# Generate at: https://github.com/settings/tokens
# Required scopes: public_repo (or repo for private repos)
GITHUB_TOKEN=ghp_your_github_token_here
# =============================================================================
# TAVILY API CONFIGURATION (Optional)
# =============================================================================
# Tavily API Key for enhanced company research with web search
# Sign up at: https://tavily.com/
# Leave empty to use basic pattern-matching research
TAVILY_API_KEY=
# =============================================================================
# MCP SERVER CONFIGURATION (Optional)
# =============================================================================
# MCP server host (default: localhost)
MCP_HOST=localhost
# MCP server port (default: auto-assigned)
# MCP_PORT=8000
# Logging level (DEBUG, INFO, WARNING, ERROR)
LOG_LEVEL=INFO
# =============================================================================
# GEMMA3 MODEL CONFIGURATION (Optional)
# =============================================================================
# Temperature for text generation (0.0-1.0, default: 0.1)
# Lower values = more focused, higher values = more creative
GEMMA3_TEMPERATURE=0.1
# Context window size (default: 4096 for 4b model)
GEMMA3_CONTEXT_SIZE=4096
# Maximum tokens in response (default: 800)
GEMMA3_MAX_TOKENS=800
# Top-p sampling (default: 0.9)
GEMMA3_TOP_P=0.9
# Repeat penalty (default: 1.1)
GEMMA3_REPEAT_PENALTY=1.1
# =============================================================================
# API RATE LIMITING (Optional)
# =============================================================================
# GitHub API requests per hour (default: 5000 for authenticated, 60 for anonymous)
GITHUB_RATE_LIMIT=5000
# Request timeout in seconds (default: 30)
REQUEST_TIMEOUT=30
# Retry attempts for failed requests (default: 3)
MAX_RETRIES=3