We provide all the information about MCP servers via our MCP API.
curl -X GET 'https://glama.ai/api/mcp/v1/servers/qinshu1109/memos-MCP'
If you have feedback or need assistance with the MCP directory API, please join our Discord server
"""
MemOS官方最佳实践配置管理器
基于官方文档的GeneralTextMemory配置,提供立即体感提升
"""
import os
import json
from pathlib import Path
from typing import Dict, Any, Optional
from dataclasses import dataclass
@dataclass
class OfficialMemOSConfig:
"""官方推荐的MemOS配置"""
# 基础配置
data_dir: str = "./memos_data"
# 内存类型配置 - 使用官方推荐的GeneralTextMemory
memory_backend: str = "general_text"
# 嵌入模型配置 - 使用SiliconFlow兼容的Qwen嵌入模型
embedder_backend: str = "siliconflow"
embedder_model: str = "Qwen/Qwen3-Embedding-0.6B"
embedder_api_base: str = "https://api.siliconflow.cn/v1"
# LLM配置 - 使用SiliconFlow的Qwen模型
llm_backend: str = "siliconflow"
llm_model: str = "deepseek-ai/DeepSeek-V3"
llm_api_base: str = "https://api.siliconflow.cn/v1"
# 向量数据库配置
vector_db_backend: str = "qdrant"
vector_db_host: str = "localhost"
vector_db_port: int = 6335
vector_db_collection: str = "general_memories"
# 容量规划配置 - 官方推荐值
working_memory_capacity: int = 20
user_memory_capacity: int = 500
long_term_memory_capacity: int = 2000
# 批处理优化配置
batch_size: int = 32
max_concurrent_requests: int = 4
# 性能优化配置
enable_scheduler: bool = True
enable_batch_processing: bool = True
enable_memory_compression: bool = True
class OfficialConfigManager:
"""官方配置管理器"""
def __init__(self, data_dir: str = "./memos_data"):
self.data_dir = Path(data_dir)
self.data_dir.mkdir(exist_ok=True)
self.config_file = self.data_dir / "official_config.json"
self.config = OfficialMemOSConfig(data_dir=str(self.data_dir))
def generate_official_config(self) -> Dict[str, Any]:
"""生成官方推荐的完整配置"""
# 基础MemOS配置
memos_config = {
"backend": "general_text",
"config": {
# LLM提取器配置
"extractor_llm": {
"backend": self.config.llm_backend,
"config": {
"model_name_or_path": self.config.llm_model,
"api_base": self.config.llm_api_base,
"api_key": os.getenv("SILICONFLOW_API_KEY", "sk-ygqlrgrxrypykiiskuspuahkwihhbhhjhazqokntwdzfwqdv"),
"temperature": 0.1,
"max_tokens": 1000
}
},
# 向量数据库配置
"vector_db": {
"backend": self.config.vector_db_backend,
"config": {
"collection_name": self.config.vector_db_collection,
"host": self.config.vector_db_host,
"port": self.config.vector_db_port,
"distance": "cosine"
}
},
# 嵌入模型配置 - 使用SiliconFlow兼容的Qwen嵌入模型
"embedder": {
"backend": self.config.embedder_backend,
"config": {
"model_name_or_path": self.config.embedder_model,
"api_base": self.config.embedder_api_base,
"api_key": os.getenv("SILICONFLOW_API_KEY", "sk-ygqlrgrxrypykiiskuspuahkwihhbhhjhazqokntwdzfwqdv"),
"embedding_dims": 1024 # Qwen3-Embedding-0.6B的维度
}
}
}
}
# 调度器配置 - 容量规划
scheduler_config = {
"memory_capacities": {
"working_memory_capacity": self.config.working_memory_capacity,
"user_memory_capacity": self.config.user_memory_capacity,
"long_term_memory_capacity": self.config.long_term_memory_capacity
},
"batch_processing": {
"enabled": self.config.enable_batch_processing,
"batch_size": self.config.batch_size,
"max_concurrent_requests": self.config.max_concurrent_requests
},
"performance_optimization": {
"enable_memory_compression": self.config.enable_memory_compression,
"compression_threshold": 1000, # 1k tokens
"auto_cleanup": True
}
}
# 完整配置
full_config = {
"version": "1.0",
"config_type": "official_best_practice",
"memory": memos_config,
"scheduler": scheduler_config if self.config.enable_scheduler else None,
"data_dir": str(self.data_dir),
"created_at": "2025-07-15",
"description": "基于MemOS官方最佳实践的GeneralTextMemory配置"
}
return full_config
def save_config(self) -> bool:
"""保存配置到文件"""
try:
config = self.generate_official_config()
with open(self.config_file, 'w', encoding='utf-8') as f:
json.dump(config, f, indent=2, ensure_ascii=False)
print(f"✅ 官方配置已保存到: {self.config_file}")
return True
except Exception as e:
print(f"❌ 配置保存失败: {e}")
return False
def load_config(self) -> Optional[Dict[str, Any]]:
"""加载配置文件"""
try:
if self.config_file.exists():
with open(self.config_file, 'r', encoding='utf-8') as f:
config = json.load(f)
print(f"✅ 官方配置已加载: {self.config_file}")
return config
else:
print(f"⚠️ 配置文件不存在: {self.config_file}")
return None
except Exception as e:
print(f"❌ 配置加载失败: {e}")
return None
def validate_dependencies(self) -> Dict[str, bool]:
"""验证依赖项是否可用"""
dependencies = {
"siliconflow_available": False,
"qdrant_available": False,
"qwen_embed_available": False
}
# 检查SiliconFlow可用性
try:
from openai import OpenAI
client = OpenAI(
api_key=os.getenv("SILICONFLOW_API_KEY", "sk-ygqlrgrxrypykiiskuspuahkwihhbhhjhazqokntwdzfwqdv"),
base_url=self.config.embedder_api_base
)
# 测试嵌入API
response = client.embeddings.create(
model=self.config.embedder_model,
input="test"
)
dependencies["siliconflow_available"] = True
dependencies["qwen_embed_available"] = True
except Exception:
pass
# 检查Qdrant可用性
try:
from qdrant_client import QdrantClient
client = QdrantClient(
host=self.config.vector_db_host,
port=self.config.vector_db_port
)
client.get_collections()
dependencies["qdrant_available"] = True
except Exception:
pass
return dependencies
def setup_environment(self) -> bool:
"""设置环境和依赖"""
print("🔧 设置MemOS官方配置环境...")
# 验证依赖
deps = self.validate_dependencies()
print(f"📊 依赖检查结果:")
print(f" SiliconFlow服务: {'✅' if deps['siliconflow_available'] else '❌'}")
print(f" Qdrant服务: {'✅' if deps['qdrant_available'] else '❌'}")
print(f" Qwen嵌入模型: {'✅' if deps['qwen_embed_available'] else '❌'}")
# 如果SiliconFlow不可用,提供配置指导
if not deps['siliconflow_available']:
print("\n💡 配置SiliconFlow API:")
print(" export SILICONFLOW_API_KEY=your_api_key")
print(" 或在.env文件中设置SILICONFLOW_API_KEY")
return all(deps.values())
def create_official_config_manager(data_dir: str = "./memos_data") -> OfficialConfigManager:
"""创建官方配置管理器的工厂函数"""
return OfficialConfigManager(data_dir)
if __name__ == "__main__":
# 测试官方配置管理器
print("🚀 测试MemOS官方配置管理器")
print("=" * 50)
manager = create_official_config_manager()
# 生成并保存配置
if manager.save_config():
print("✅ 官方配置生成成功")
# 验证环境
if manager.setup_environment():
print("✅ 环境验证通过")
else:
print("⚠️ 部分依赖不可用,请检查安装")