#-----------------------------------------------------------------------------
#GEMINI_API_KEY=
#Description: API key used for Google's Gemini models.
#Required: Yes (src/deep-research.ts expects this; the app cannot run without it)
#How to obtain: From Google AI Studio/Google Cloud project with Generative AI access.
#Example format: AIza... (never commit a real key)
GEMINI_API_KEY=YOUR_GEMINI_API_KEY_HERE
#-----------------------------------------------------------------------------
#RECOMMENDED / OPTIONAL WITH DEFAULTS
#-----------------------------------------------------------------------------
#GEMINI_MODEL
#Description: Model to use for generation with @google/genai.
#Default used by this example: "gemini-2.5-flash"
#Prior code default: "gemini-pro" (legacy). You may switch back if needed.
#Acceptable values:
#- gemini-2.5-flash (fast, capable)
#- gemini-2.5-pro (higher quality, may be slower)
#- gemini-2.5-flash-lite (faster, smaller)
#- gemini-1.5-pro (legacy-compatible, if supported by your tier)
GEMINI_MODEL=gemini-2.5-flash
GEMINI_EMBEDDING_MODEL=
# GEMINI_MAX_OUTPUT_TOKENS
#Description: Max output tokens for model responses. Large limits are possible
#with Google models (up to ~65k tokens on supported models/tiers).
#Default: 65536
#Set lower if you need to constrain cost/latency.
GEMINI_MAX_OUTPUT_TOKENS=65536
# Optional sampling config (used in src/ai/providers.ts)
# Ranges: TOP_P (0-1), TOP_K (1-1000), CANDIDATE_COUNT (1-8), TEMPERATURE (0-2 typical)
GEMINI_TEMPERATURE=0.5
GEMINI_TOP_P=0.9
GEMINI_TOP_K=40
GEMINI_CANDIDATE_COUNT=2
# Research thinking/cost controls and URL context toggle
THINKING_BUDGET_TOKENS=1500
ENABLE_URL_CONTEXT=true
#GEMINI_API_ENDPOINT=
#Description: Optional override for the API endpoint. Typically NOT required
#when using @google/genai which uses Google's default endpoints.
#Leave empty to use library defaults. Example if needed:
#https://generativelanguage.googleapis.com
#Default: (unset)
#GEMINI_API_ENDPOINT=
#CONCURRENCY_LIMIT
#Description: Controls concurrent task limit in research flows.
#Location: src/deep-research.ts
#Default in code: 5
#Increase/decrease depending on system resources and external API rate limits.
CONCURRENCY_LIMIT=5
#FIRECRAWL_API_KEY
#Description: API key for Firecrawl (optional but recommended for richer crawling).
#Location: src/deep-research.ts
#Required: No (recommended)
#Only set if you intend to use Firecrawl features.
#FIRECRAWL_API_KEY=YOUR_FIRECRAWL_API_KEY_HERE
#FIRECRAWL_BASE_URL
#Description: Optional base URL override for Firecrawl (self-hosted or custom endpoint).
#Location: src/deep-research.ts
#Required: No
#Example: https://api.firecrawl.dev
#FIRECRAWL_BASE_URL=
#-----------------------------------------------------------------------------
# GEMINI TOOLS / GROUNDING (optional feature flags)
#-----------------------------------------------------------------------------
# Enable Google Search grounding tool inside Gemini
ENABLE_GEMINI_GOOGLE_SEARCH=true
# Enable Code Execution tool (Python) inside Gemini
ENABLE_GEMINI_CODE_EXECUTION=false
# Enable Function Calling support (allows passing function declarations)
ENABLE_GEMINI_FUNCTIONS=false
# Provider-level response cache (LRU) for generateContent requests
ENABLE_PROVIDER_CACHE=true
PROVIDER_CACHE_MAX=100
PROVIDER_CACHE_TTL_MS=600000 # 10 minutes TTL for provider cache
# Logger / CLI visuals
# Pretty logs for local dev (pino-pretty)
LOG_PRETTY=false
# Log level: trace|debug|info|warn|error|fatal|silent
LOG_LEVEL=info
# Compact progress (hide labels to save width)
PROGRESS_COMPACT=false
#-----------------------------------------------------------------------------
#FUTURE / OPTIONAL INTEGRATIONS (COMMENTED PLACEHOLDERS)
#-----------------------------------------------------------------------------
#These are not currently referenced in code, but may be needed in the future.
#Leave commented unless you plan to integrate them.
#TAVILY_API_KEY
#Description: Key for Tavily search API (optional future integration).
#TAVILY_API_KEY=YOUR_TAVILY_API_KEY_HERE
#EXA_API_KEY
#Description: Key for Exa search API (optional future integration).
#EXA_API_KEY=YOUR_EXA_API_KEY_HERE
#ENABLE_EXA_PRIMARY
#Description: If true, use Exa as the primary search provider (staged rollout gate).
#Default: false (Gemini Google Search grounding remains primary)
#ENABLE_EXA_PRIMARY=false
#-----------------------------------------------------------------------------
# MIGRATION PLAN: AGENT + POLICY FLAGS (future-ready; keep defaults unless needed)
#-----------------------------------------------------------------------------
# Learning/memory (not yet enforced in code – reserved for agentization stage)
#ENABLE_AGENT_LEARNING=false
#AGENT_MEMORY_TTL_MS=900000 # 15 minutes
#AGENT_MEMORY_MAX=200 # max items to retain
# Text splitting overrides (currently unused; reserved)
#CHUNK_SIZE=140 # tokens/approx chars per chunk
# Output/policy constraints
#PROGRESS_REDACT_BODIES=true # redact verbose payloads in progress logs
#MIN_CITATIONS_PER_SECTION=2
#AUTHORITY_THRESHOLD=0.6 # 0..1 score used to filter weak sources
# Robustness controls
#MAX_REPAIR_PASSES=1 # number of self-repair attempts
#MAX_RETRIES=2 # network/model retry attempts
#PHASE_TIMEOUT_MS=90000 # per-phase timeout (ms)
#OVERALL_DEADLINE_MS=420000 # overall pipeline deadline (ms)
# MCP server diagnostics (off by default)
#ENABLE_MCP_HEALTH=false
#ENABLE_MCP_SERVER_INFO=false
#-----------------------------------------------------------------------------
#RUNTIME / ENVIRONMENT NOTES
#-----------------------------------------------------------------------------
#Node: This project targets Node.js 22.
#Loading: package.json scripts use "tsx --env-file=.env.local" to load these vars.
#Security: Do NOT commit .env.local. Use .gitignore to prevent accidental commits.
#========================