import os
from mem0 import Memory
from dotenv import load_dotenv
load_dotenv()
def get_mem0_client():
config = {
"vector_store": {
"provider": "qdrant",
"config": {
"host": os.getenv("QDRANT_HOST", "localhost"),
"port": int(os.getenv("QDRANT_PORT", 6334)),
}
},
"graph_store": {
"provider": "neo4j",
"config": {
"url": os.getenv("NEO4J_URI", "bolt://mem0-neo4j:7687"),
"username": os.getenv("NEO4J_USERNAME", "neo4j"),
"password": os.getenv("NEO4J_PASSWORD", "password"),
}
},
"embedder": {
"provider": os.getenv("MEM0_EMBEDDING_PROVIDER", "openai"),
"config": {
"model": os.getenv("MEM0_EMBEDDING_MODEL", "text-embedding-3-small"),
"api_key": os.getenv("OPENAI_API_KEY"),
}
},
"llm": {
"provider": os.getenv("MEM0_LLM_PROVIDER", "openai"),
"config": {
"model": os.getenv("MEM0_LLM_MODEL", "gpt-4o"),
"api_key": os.getenv("OPENAI_API_KEY"),
}
},
"version": "v1.1"
}
# Support for Ollama/Local fallback if configured
if os.getenv("MEM0_EMBEDDING_PROVIDER") == "ollama":
config["embedder"]["config"] = {
"model": os.getenv("MEM0_EMBEDDING_MODEL", "nomic-embed-text"),
"base_url": os.getenv("OLLAMA_BASE_URL", "http://localhost:11434"),
}
if os.getenv("MEM0_LLM_PROVIDER") == "ollama":
config["llm"]["config"] = {
"model": os.getenv("MEM0_LLM_MODEL", "llama3"),
"base_url": os.getenv("OLLAMA_BASE_URL", "http://localhost:11434"),
}
# If the user wants to force specific embedding provider config, they can do so here.
# For now we rely on defaults or environment variables handled by mem0 (e.g. OPENAI_API_KEY).
import json
print(f"DEBUG: Generated Config: {json.dumps(config, indent=2, default=str)}")
return Memory.from_config(config)