config.example.pyβ’2.32 kB
# Configuration File for PentestGPT
# Copy this file to config.py and fill in your credentials
# NEVER commit config.py to Git!
# ============================================================================
# AZURE OPENAI CONFIGURATION
# ============================================================================
AZURE_CONFIG = {
# Get these from Azure Portal > Your OpenAI Resource
"api_key": "your-azure-openai-api-key-here",
"endpoint": "https://your-resource.openai.azure.com/",
"api_version": "2024-02-15-preview",
"deployment_name": "gpt-4" # Your GPT-4 deployment name
}
# ============================================================================
# CLAUDE (ANTHROPIC) CONFIGURATION (Optional)
# ============================================================================
CLAUDE_CONFIG = {
# Get from: https://console.anthropic.com/
"api_key": "your-claude-api-key-here",
"model": "claude-3-5-sonnet-20241022"
}
# ============================================================================
# GITHUB COPILOT CONFIGURATION (Optional)
# ============================================================================
COPILOT_CONFIG = {
# Get your GitHub token from: https://github.com/settings/tokens
"api_key": "your-github-token-here",
"base_url": "https://api.githubcopilot.com",
"model": "gpt-4"
}
# ============================================================================
# SELECT YOUR LLM PROVIDER
# ============================================================================
# Options: "azure", "claude", "copilot"
LLM_PROVIDER = "azure"
# ============================================================================
# ADVANCED SETTINGS (Optional)
# ============================================================================
# Proxy settings (if behind corporate firewall)
PROXY = {
"http": None, # e.g., "http://proxy.company.com:8080"
"https": None, # e.g., "http://proxy.company.com:8080"
}
# Timeout settings (in seconds)
TIMEOUT = {
"tool_execution": 600, # Max time for tool execution (10 minutes)
"llm_request": 120, # Max time for LLM API call (2 minutes)
}
# Logging settings
LOGGING = {
"level": "INFO", # DEBUG, INFO, WARNING, ERROR, CRITICAL
"file": "pentestgpt.log",
"console": True
}