Skip to main content
Glama

CodeGraph CLI MCP Server

by Jakedismo
example_embedding.toml971 B
# Example configuration focused on embedding model settings [embedding] provider = "openai" dimension = 1536 cache_enabled = true cache_ttl_secs = 3600 normalize_embeddings = true [embedding.openai] model = "text-embedding-3-small" api_key_env = "OPENAI_API_KEY" api_base = "https://api.openai.com/v1" max_retries = 3 timeout_secs = 30 [performance] mode = "balanced" [performance.index] index_type = "IVFFlat" nprobe = 16 nlist = 100 m = 32 ef_construction = 200 ef_search = 64 use_gpu = false [performance.cache] enabled = true max_size_mb = 256 ttl_secs = 3600 eviction_policy = "lru" preload_common = false [performance.processing] batch_size = 32 parallel_workers = 8 chunk_size = 512 overlap_size = 50 max_queue_size = 1000 timeout_secs = 30 [runtime] allow_runtime_switching = true hot_reload = false config_watch_interval_secs = 30 [monitoring] enabled = true metrics_enabled = true trace_enabled = false profile_enabled = false metrics_interval_secs = 60

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/Jakedismo/codegraph-rust'

If you have feedback or need assistance with the MCP directory API, please join our Discord server