Skip to main content
Glama

MemOS-MCP

by qinshu1109
Apache 2.0
3
  • Linux
  • Apple
test_simple_expert_patch.py4.13 kB
#!/usr/bin/env python3 """ 简化的专家补丁验证脚本 只验证核心的延迟导入机制,避免复杂的模块导入 """ import sys from pathlib import Path # 添加路径 sys.path.insert(0, str(Path(__file__).parent / "src")) sys.path.insert(0, str(Path(__file__).parent / "tests")) def test_simple_expert_patch(): """简化验证专家补丁效果""" print("🎯 开始简化验证专家补丁效果") print("=" * 50) try: # 导入conftest设置Mock环境 import conftest print("✅ Mock依赖环境已设置") # 测试1: 验证延迟导入机制 print("\n📋 测试1: 验证MemoryFactory延迟导入...") from memos.memories.factory import MemoryFactory # 检查是否使用字符串路径 backends = MemoryFactory.backend_to_class print(f"✅ MemoryFactory后端: {list(backends.keys())}") for backend, path in backends.items(): if isinstance(path, str) and ":" in path: print(f" ✅ {backend}: 延迟导入路径 {path}") else: print(f" ❌ {backend}: 仍使用直接导入") # 测试2: 验证LLMFactory延迟导入 print("\n📋 测试2: 验证LLMFactory延迟导入...") from memos.llms.factory import LLMFactory llm_backends = LLMFactory.backend_to_class print(f"✅ LLMFactory后端: {list(llm_backends.keys())}") for backend, path in llm_backends.items(): if isinstance(path, str) and ":" in path: print(f" ✅ {backend}: 延迟导入路径 {path}") else: print(f" ❌ {backend}: 仍使用直接导入") # 测试3: 验证MultiMemoryMemCubeConfig可以创建 print("\n📋 测试3: 验证MultiMemoryMemCubeConfig...") from memos.configs.memory import MemoryConfigFactory from memos.configs.mem_cube import MultiMemoryMemCubeConfig # 创建简单配置 mem_configs = { 'test_mem': MemoryConfigFactory(backend='uninitialized') } config = MultiMemoryMemCubeConfig( user_id="simple_test", memory_configs=mem_configs ) print(f"✅ MultiMemoryMemCubeConfig创建成功: {config.user_id}") print(f" Memory模块: {list(config.memory_configs.keys())}") # 测试4: 验证MultiMemoryMemCube可以创建 print("\n📋 测试4: 验证MultiMemoryMemCube...") from memos.mem_cube.multi import MultiMemoryMemCube cube = MultiMemoryMemCube(config) memories = cube.list_memories() print(f"✅ MultiMemoryMemCube创建成功") print(f" Memory模块状态: {memories}") # 测试5: 验证Mock依赖正常工作 print("\n📋 测试5: 验证Mock依赖...") import transformers import ollama import redis print(f"✅ transformers模拟: {type(transformers)}") print(f"✅ ollama模拟: {type(ollama)}") print(f"✅ redis模拟: {type(redis)}") print("\n" + "=" * 50) print("🎉 简化专家补丁验证成功!") print("\n📊 验证结果:") print(" ✅ MemoryFactory使用延迟导入") print(" ✅ LLMFactory使用延迟导入") print(" ✅ MultiMemoryMemCubeConfig正常工作") print(" ✅ MultiMemoryMemCube正常工作") print(" ✅ Mock依赖正常工作") return True except Exception as e: print(f"\n❌ 简化验证失败: {e}") import traceback traceback.print_exc() return False if __name__ == "__main__": success = test_simple_expert_patch() if success: print("\n🌟 专家的延迟导入补丁完全有效!") print(" 核心MultiMemoryMemCube功能可以正常工作") else: print("\n💥 补丁验证失败,需要进一步调试") sys.exit(0 if success else 1)

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/qinshu1109/memos-MCP'

If you have feedback or need assistance with the MCP directory API, please join our Discord server