default.yamlā¢4.75 kB
server:
host: "localhost"
port: 3000
browser:
engine: playwright # playwright | cdp
headless: true
defaultTimeoutMs: 30000
storageDir: /data/profiles
maxContexts: 8
# Chrome DevTools Protocol (CDP) configuration
cdp:
enabled: false # enable CDP browser support
host: "localhost" # CDP host
port: 9222 # CDP port (default Chrome debug port)
autoDetect: true # auto-detect running browsers
maxRetries: 3 # connection retry attempts
retryDelay: 1000 # delay between retries (ms)
# Remote CDP server configuration
remote:
enabled: false # enable remote CDP server support
url: null # remote CDP server URL (e.g., "https://cdp.example.com:9222")
sslMode: "auto" # ssl mode: auto, enabled, disabled, insecure
apiKey: null # API key for authentication (if required)
headers: {} # additional headers for requests
# Browser detection settings
detection:
enabled: true # enable browser detection
ports: [9222, 9223, 9224, 9225, 9226] # ports to scan (for local detection)
timeout: 5000 # detection timeout per port
useRemote: false # use remote server for detection instead of local scanning
# Browser launch settings (when auto-launching)
launch:
autoLaunch: false # auto-launch browser if none found
browserPath: null # custom browser path (null = auto-detect)
userDataDir: null # custom user data directory
additionalArgs: [] # additional browser arguments
# Connection settings
connection:
timeout: 30000 # connection timeout
keepAlive: true # keep connection alive
reconnect: true # auto-reconnect on disconnect
maxReconnects: 5 # max reconnection attempts
network:
proxyDefault: null
security:
allowDomains: ["example.com", "gov.br"]
blockPrivateNetworks: true
llm:
defaultModel: "ollama:llama3.1" # replace with your model: ollama:model or jan:model
maxOutputTokens: 2000
temperature: 0
host: "localhost" # IP address of Ollama/JAN server
port: 11434 # Ollama port (default 11434)
janPort: 1337 # JAN port (default 1337)
autoPreprocess: true # automatic data preprocessing (recommended)
# Advanced preprocessing configuration
preprocessing:
enabled: true # enable/disable all preprocessing
intelligentMode: true # use local LLM for prompt generation (recommended)
fallbackToTemplates: true # fallback to templates if intelligent mode fails
# Content size thresholds for auto-preprocessing
thresholds:
html: 3000 # auto-preprocess HTML above this size (bytes)
text: 5000 # auto-preprocess text above this size (bytes)
json: 1000 # auto-preprocess JSON above this size (bytes)
# Model preferences for preprocessing (ordered by priority)
preferredModels:
- "ollama:qwen2.5:7b" # fastest for preprocessing
- "ollama:llama3.2:3b" # very fast, small model
- "ollama:mistral:7b" # good balance
- "ollama:llama3.1:8b" # stable choice
- "jan:llama-3.2-3b" # JAN fallback
- "jan:mistral-7b" # JAN alternative
# Analysis settings
analysis:
maxContentSample: 1000 # max chars to analyze for prompt generation
maxAnalysisTokens: 300 # max tokens for content analysis
analysisTemperature: 0.1 # low temperature for consistent analysis
limits:
maxChars: 300000
maxScreenshotBytes: 8000000
logging:
level: info
# Detailed LLM logging for debugging and statistics
llm:
enabled: true # enable detailed LLM logging
logPrompts: true # log all prompts sent to LLM
logResponses: true # log all responses from LLM
logTokens: true # log token usage statistics
logPerformance: true # log timing and performance metrics
logPreprocessing: true # log preprocessing analysis and results
# Data logging settings
maxPromptLength: 2000 # max chars to log for prompts (truncate if longer)
maxResponseLength: 1000 # max chars to log for responses (truncate if longer)
maxInputDataLength: 5000 # max chars to log for input data (truncate if longer)
# Performance tracking
trackMetrics: true # track performance metrics
metricsInterval: 100 # log metrics every N operations