We provide all the information about MCP servers via our MCP API.
curl -X GET 'https://glama.ai/api/mcp/v1/servers/suwa-sh/local-RAG-backend'
If you have feedback or need assistance with the MCP directory API, please join our Discord server
# Graphiti MCP Server Environment Configuration
# Neo4j設定
NEO4J_URI=bolt://localhost:7687
NEO4J_USER=neo4j
NEO4J_PASSWORD=demodemo
# LLM Model設定
LLM_MODEL_URL=https://api.openai.com/v1
LLM_MODEL_KEY=your_openai_api_key_here
LLM_MODEL_NAME=gpt-4o-mini
RERANK_MODEL_NAME=gpt-4o-mini
# Embedding Model設定
EMBEDDING_MODEL_URL=http://localhost:11434/v1
EMBEDDING_MODEL_KEY=dummy
EMBEDDING_MODEL_NAME=kun432/cl-nagoya-ruri-large:latest
# テナント識別子設定(必須)
GROUP_ID=default
# Optional: Path configuration for Docker
# PATH=/root/.local/bin:${PATH}
# Optional: Memory settings for Neo4j (used in Docker Compose)
# NEO4J_server_memory_heap_initial__size=512m
# NEO4J_server_memory_heap_max__size=1G
# NEO4J_server_memory_pagecache_size=512m
# Azure OpenAI configuration
# Optional: Only needed for Azure OpenAI endpoints
# AZURE_OPENAI_ENDPOINT=your_azure_openai_endpoint_here
# AZURE_OPENAI_API_VERSION=2025-01-01-preview
# AZURE_OPENAI_DEPLOYMENT_NAME=gpt-4o-gpt-4o-mini-deployment
# AZURE_OPENAI_EMBEDDING_API_VERSION=2023-05-15
# AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME=text-embedding-3-large-deployment
# AZURE_OPENAI_USE_MANAGED_IDENTITY=false