project_root: "/Users/origo/src/flowcode/code-flow-mcp"
drift_enabled: true
watch_directories: ["code_flow"]
ignored_patterns: ["venv", "**/__pycache__"]
max_graph_depth: 3
embedding_model: "all-mpnet-base-v2"
max_tokens: 256
language: "python"
min_similarity: 0.1
call_graph_confidence_threshold: 0.1
drift_cluster_algorithm: "hdbscan"
drift_cluster_eps: 0.75
drift_cluster_min_samples: 5
memory_enabled: true
memory_collection_name: "cortex_memory_v1"
memory_similarity_weight: 0.7
memory_score_weight: 0.3
memory_min_score: 0.1
memory_cleanup_interval_seconds: 3600
memory_grace_seconds: 86400
memory_half_life_days:
TRIBAL: 180.0
EPISODIC: 14.0
FACT: 30.0
memory_decay_floor:
TRIBAL: 0.1
EPISODIC: 0.01
FACT: 0.05
memory_resources_enabled: true
memory_resources_limit: 10
memory_resources_filters: {}
llm_config:
api_key_env_var: "OPENAI_API_KEY"
base_url: "https://openrouter.ai/api/v1" # Default: OpenRouter
model: "x-ai/grok-4.1-fast" # Default model
max_tokens: 256 # Max tokens in LLM response per summary
concurrency: 2 # Number of parallel summary generation workers
# Smart filtering to reduce costs
min_complexity: 3 # Only summarize functions with complexity >= 3
min_nloc: 5 # Only summarize functions with >= 5 lines of code
skip_private: true # Skip functions starting with _ (private)
skip_test: true # Skip test functions (test_*, *_test)
prioritize_entry_points: true # Summarize entry points first
# Depth control
summary_depth: "standard" # "minimal", "standard", "detailed"
max_input_tokens: 2000 # Truncate function body if longer