Skip to main content
Glama

MemOS-MCP

by qinshu1109
Apache 2.0
3
  • Linux
  • Apple
simple_test.py7.36 kB
#!/usr/bin/env python3 """ MemOS 简单测试 - 测试远程LLM连接 不依赖MemOS的复杂模块,直接测试核心功能 """ import os import sys from pathlib import Path def load_env_file(): """加载环境变量文件""" env_file = Path(".env") if env_file.exists(): with open(env_file) as f: for line in f: if line.strip() and not line.startswith('#'): key, value = line.strip().split('=', 1) os.environ[key] = value def test_openai_import(): """测试OpenAI库导入""" print("🧪 测试OpenAI库导入...") try: from openai import OpenAI print("✅ OpenAI库导入成功") return True except ImportError as e: print(f"❌ OpenAI库导入失败: {e}") return False def test_basic_dependencies(): """测试基础依赖""" print("🧪 测试基础依赖...") try: import qdrant_client import fastapi import sqlalchemy import redis print("✅ 基础依赖导入成功") return True except ImportError as e: print(f"❌ 基础依赖导入失败: {e}") return False def test_siliconflow_api(): """测试SiliconFlow API连接""" print("🧪 测试SiliconFlow API连接...") try: from openai import OpenAI # 检查API密钥 api_key = os.getenv("SILICONFLOW_API_KEY") if not api_key: print("⚠️ 未找到SILICONFLOW_API_KEY,跳过API测试") print("💡 请在.env文件中配置API密钥") return True # 创建客户端 client = OpenAI( api_key=api_key, base_url="https://api.siliconflow.cn/v1" ) # 测试简单调用 response = client.chat.completions.create( model="deepseek-ai/DeepSeek-V3", messages=[ {"role": "system", "content": "你是一个专业的AI助手,请简洁回答问题。"}, {"role": "user", "content": "请用一句话介绍MemOS的核心价值。"} ], max_tokens=100 ) answer = response.choices[0].message.content print(f"🤖 LLM回答: {answer}") print("✅ SiliconFlow API连接成功") return True except Exception as e: print(f"❌ SiliconFlow API连接失败: {e}") print("💡 请检查API密钥配置和网络连接") return False def test_openai_api(): """测试OpenAI API连接""" print("🧪 测试OpenAI API连接...") try: from openai import OpenAI # 检查API密钥 api_key = os.getenv("OPENAI_API_KEY") base_url = os.getenv("OPENAI_BASE_URL") if not api_key: print("⚠️ 未找到OPENAI_API_KEY,跳过OpenAI API测试") return True # 创建客户端(使用SiliconFlow作为OpenAI兼容接口) client = OpenAI( api_key=api_key, base_url=base_url ) # 测试简单调用 response = client.chat.completions.create( model="deepseek-ai/DeepSeek-V3", messages=[ {"role": "system", "content": "你是一个专业的AI助手,请简洁回答问题。"}, {"role": "user", "content": "请用一句话介绍MemOS的核心价值。"} ], max_tokens=50 ) answer = response.choices[0].message.content print(f"🤖 DeepSeek-V3回答: {answer}") print("✅ OpenAI兼容API连接成功(DeepSeek-V3)") return True except Exception as e: print(f"❌ OpenAI API连接失败: {e}") return False def test_qdrant_connection(): """测试Qdrant向量数据库连接""" print("🧪 测试Qdrant向量数据库...") try: from qdrant_client import QdrantClient from qdrant_client.models import Distance, VectorParams, PointStruct import tempfile # 创建临时目录用于测试 with tempfile.TemporaryDirectory() as temp_dir: # 创建本地Qdrant客户端 client = QdrantClient(path=temp_dir) # 创建测试集合 collection_name = "test_collection" client.create_collection( collection_name=collection_name, vectors_config=VectorParams(size=4, distance=Distance.DOT) ) # 插入测试向量(使用PointStruct) points = [ PointStruct( id=1, vector=[0.1, 0.2, 0.3, 0.4], payload={"text": "这是一个测试记忆"} ), PointStruct( id=2, vector=[0.2, 0.3, 0.4, 0.5], payload={"text": "这是另一个测试记忆"} ) ] client.upsert( collection_name=collection_name, points=points ) # 搜索测试 search_result = client.search( collection_name=collection_name, query_vector=[0.1, 0.2, 0.3, 0.4], limit=1 ) assert len(search_result) > 0 result = search_result[0] print(f"🔍 搜索结果: {result.payload}") print(f"📊 相似度分数: {result.score:.3f}") print("✅ Qdrant向量数据库测试成功") return True except Exception as e: print(f"❌ Qdrant测试失败: {e}") return False def main(): """主测试函数""" print("🚀 MemOS 简单功能测试") print("=" * 50) # 加载环境变量 load_env_file() # 运行测试 tests = [ ("OpenAI库导入", test_openai_import), ("基础依赖", test_basic_dependencies), ("Qdrant向量数据库", test_qdrant_connection), ("SiliconFlow API", test_siliconflow_api), ("OpenAI API", test_openai_api), ] results = [] for name, test_func in tests: print(f"\n{'='*20} {name} {'='*20}") success = test_func() results.append((name, success)) # 总结 print("\n" + "="*50) print("📊 测试结果总结:") passed = 0 for name, success in results: status = "✅ 成功" if success else "❌ 失败" print(f" {name}: {status}") if success: passed += 1 print(f"\n🎯 总体结果: {passed}/{len(results)} 项测试成功") if passed >= 3: # 至少基础功能正常 print("🎉 MemOS基础环境部署成功!") print("\n📖 下一步:") print(" 1. 配置API密钥以启用远程LLM功能") print(" 2. 可选安装embedding支持: poetry install --extras embedding") print(" 3. 开始使用MemOS的记忆管理功能") else: print("⚠️ 部分核心功能需要进一步配置") print("\n🔧 建议:") print(" 1. 检查依赖安装: poetry install") print(" 2. 配置API密钥: 编辑.env文件") print(" 3. 检查网络连接") if __name__ == "__main__": main()

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/qinshu1109/memos-MCP'

If you have feedback or need assistance with the MCP directory API, please join our Discord server