.env.example•3.6 kB
# ╔══════════════════════════════════════════════════════════════╗
# ║ How to Create a GitHub Token for ZigNet ║
# ╚══════════════════════════════════════════════════════════════╝
#
# 📝 STEP-BY-STEP:
#
# 1. Go to: https://github.com/settings/tokens
#
# 2. Click "Generate new token" → "Generate new token (classic)"
#
# 3. Configure the token:
# Name: ZigNet Scraper
# Expiration: 90 days (or No expiration)
#
# 4. Select ONLY this scope:
# ✅ public_repo (Access public repositories - read only)
#
# 5. Click "Generate token"
#
# 6. COPY the token (you'll only see it once!)
#
# 7. Export in your shell:
#
# export GITHUB_TOKEN="ghp_your_token_here"
#
# Or add to ~/.bashrc:
# echo 'export GITHUB_TOKEN="ghp_your_token_here"' >> ~/.bashrc
# source ~/.bashrc
#
# 8. Verify:
# echo $GITHUB_TOKEN
#
# Should print your token
#
# 9. Run scraper:
# pnpm scrape-repos
#
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
#
# ⚠️ RATE LIMITS:
#
# Without token: 60 requests/hour (unusable)
# With token: 5000 requests/hour (recommended)
#
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
GITHUB_TOKEN="ghp_your_token_here"
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
# Zig Configuration
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
ZIG_SUPPORTED="0.13.0,0.14.0,0.15.2"
ZIG_DEFAULT="0.15.2"
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
# LLM Configuration
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
# Model path (default: ~/.zignet/models/zignet-qwen-7b-q4km.gguf)
# ZIGNET_MODEL_PATH="/path/to/custom/model.gguf"
# Auto-download model from HuggingFace (default: true)
# ZIGNET_MODEL_AUTO_DOWNLOAD="false"
# GPU device selection (CUDA_VISIBLE_DEVICES)
# Specify which GPU(s) to use for CUDA inference
# Default: undefined (use all available GPUs)
#
# Examples:
# Use first GPU only (e.g., RTX 4090):
# ZIGNET_GPU_DEVICE="0"
#
# Use second GPU only (e.g., avoid AMD GPU):
# ZIGNET_GPU_DEVICE="1"
#
# Use multiple GPUs:
# ZIGNET_GPU_DEVICE="0,1"
#
# Note: GPU indices are system-dependent.
# Use `nvidia-smi` to list available NVIDIA GPUs and their indices.
# This setting restricts CUDA to only the specified GPU devices.
#
# ZIGNET_GPU_DEVICE="0"
# Number of layers to offload to GPU (default: 35 for RTX 3090)
# Set to 0 for CPU-only inference
# ZIGNET_GPU_LAYERS="35"
# LLM context size in tokens (default: 4096)
# ZIGNET_CONTEXT_SIZE="4096"
# LLM temperature for creativity (default: 0.7)
# ZIGNET_TEMPERATURE="0.7"
# LLM top-p sampling (default: 0.9)
# ZIGNET_TOP_P="0.9"