Skip to main content
Glama

MemOS-MCP

by qinshu1109
Apache 2.0
3
  • Linux
  • Apple
test_expert_patch_verification.py4.28 kB
#!/usr/bin/env python3 """ 专家补丁验证脚本 验证4行"止血"补丁是否有效解决级联导入问题 """ import sys from pathlib import Path # 添加路径 sys.path.insert(0, str(Path(__file__).parent / "src")) sys.path.insert(0, str(Path(__file__).parent / "tests")) def test_expert_patch(): """验证专家补丁效果""" print("🎯 开始验证专家4行'止血'补丁效果") print("=" * 60) try: # 导入conftest设置Mock环境 import conftest print("✅ Mock依赖环境已设置") # 测试1: 验证memos.configs可以独立导入 print("\n📋 测试1: 验证memos.configs独立导入...") from memos.configs.mem_cube import MultiMemoryMemCubeConfig from memos.configs.memory import MemoryConfigFactory print("✅ memos.configs导入成功,未触发LLM系统加载") # 测试2: 验证MultiMemoryMemCube可以正常创建 print("\n📋 测试2: 验证MultiMemoryMemCube创建...") from memos.mem_cube.multi import MultiMemoryMemCube # 创建配置 mem_configs = { 'code_snippet_mem': MemoryConfigFactory(backend='uninitialized'), 'error_log_mem': MemoryConfigFactory(backend='uninitialized'), 'api_sample_mem': MemoryConfigFactory(backend='uninitialized'), 'file_chunk_mem': MemoryConfigFactory(backend='uninitialized'), 'git_commit_mem': MemoryConfigFactory(backend='uninitialized'), 'product_graph_mem': MemoryConfigFactory(backend='uninitialized') } config = MultiMemoryMemCubeConfig( user_id="expert_patch_test", memory_configs=mem_configs ) cube = MultiMemoryMemCube(config) print("✅ MultiMemoryMemCube创建成功") # 测试3: 验证Memory模块管理 print("\n📋 测试3: 验证Memory模块管理...") memories = cube.list_memories() print(f"✅ Memory模块数量: {len(memories)}") for name, status in memories.items(): print(f" - {name}: {status}") # 测试4: 验证延迟导入机制 print("\n📋 测试4: 验证延迟导入机制...") from memos.memories.factory import MemoryFactory from memos.llms.factory import LLMFactory # 检查是否使用字符串路径 memory_backends = MemoryFactory.backend_to_class llm_backends = LLMFactory.backend_to_class print(f"✅ MemoryFactory后端: {list(memory_backends.keys())}") print(f"✅ LLMFactory后端: {list(llm_backends.keys())}") # 验证是字符串路径而非直接类引用 for backend, path in memory_backends.items(): if isinstance(path, str) and ":" in path: print(f" ✅ {backend}: 使用延迟导入路径 {path}") else: print(f" ❌ {backend}: 仍使用直接导入") # 测试5: 验证不会触发transformers导入 print("\n📋 测试5: 验证transformers模拟...") import transformers client = transformers.AutoTokenizer.from_pretrained("mock") tokens = client.encode("test") print(f"✅ transformers模拟正常: {tokens}") print("\n" + "=" * 60) print("🎉 专家补丁验证成功!所有测试通过") print("\n📊 验证结果:") print(" ✅ memos.configs可以独立导入") print(" ✅ MultiMemoryMemCube正常工作") print(" ✅ 延迟导入机制有效") print(" ✅ Mock依赖正常工作") print(" ✅ 未触发重型依赖加载") return True except Exception as e: print(f"\n❌ 专家补丁验证失败: {e}") import traceback traceback.print_exc() return False if __name__ == "__main__": success = test_expert_patch() if success: print("\n🌟 专家的4行'止血'补丁完全有效!") print(" 现在可以安全地进行Multi-Memory开发和测试") else: print("\n💥 补丁验证失败,需要进一步调试") sys.exit(0 if success else 1)

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/qinshu1109/memos-MCP'

If you have feedback or need assistance with the MCP directory API, please join our Discord server