test_remote_llm.py•5.26 kB
#!/usr/bin/env python3
"""
MemOS 远程LLM功能测试脚本
测试本地存储 + 远程LLM的集成
"""
import os
import sys
import json
import tempfile
from pathlib import Path
# 添加src路径
sys.path.insert(0, str(Path(__file__).parent / "src"))
def load_env_file():
"""加载环境变量文件"""
env_file = Path(".env")
if env_file.exists():
with open(env_file) as f:
for line in f:
if line.strip() and not line.startswith('#'):
key, value = line.strip().split('=', 1)
os.environ[key] = value
def test_config_loading():
"""测试配置文件加载"""
print("🧪 测试配置文件加载...")
try:
# 测试SiliconFlow配置
config_path = "examples/data/config/siliconflow_config.json"
with open(config_path) as f:
config = json.load(f)
assert "chat_model" in config
assert "mem_reader" in config
assert config["chat_model"]["backend"] == "openai"
print("✅ 配置文件加载成功")
return True
except Exception as e:
print(f"❌ 配置文件加载失败: {e}")
return False
def test_openai_client():
"""测试OpenAI客户端连接"""
print("🧪 测试远程LLM API连接...")
try:
from openai import OpenAI
# 检查API密钥
api_key = os.getenv("SILICONFLOW_API_KEY") or os.getenv("OPENAI_API_KEY")
if not api_key:
print("⚠️ 未找到API密钥,跳过API连接测试")
return True
# 创建客户端
if os.getenv("SILICONFLOW_API_KEY"):
client = OpenAI(
api_key=os.getenv("SILICONFLOW_API_KEY"),
base_url="https://api.siliconflow.cn/v1"
)
model = "Qwen/Qwen2.5-7B-Instruct"
else:
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
model = "gpt-4o-mini"
# 测试简单调用
response = client.chat.completions.create(
model=model,
messages=[{"role": "user", "content": "Hello, this is a test."}],
max_tokens=50
)
assert response.choices[0].message.content
print("✅ 远程LLM API连接成功")
return True
except Exception as e:
print(f"❌ 远程LLM API连接失败: {e}")
print("💡 请检查API密钥配置和网络连接")
return False
def test_embedding_model():
"""测试本地Embedding模型"""
print("🧪 测试本地Embedding模型...")
try:
from sentence_transformers import SentenceTransformer
# 加载轻量级模型
model = SentenceTransformer('all-MiniLM-L6-v2')
# 测试编码
texts = ["这是一个测试句子", "This is a test sentence"]
embeddings = model.encode(texts)
assert embeddings.shape[0] == 2
assert embeddings.shape[1] > 0
print("✅ 本地Embedding模型测试通过")
return True
except Exception as e:
print(f"❌ 本地Embedding模型测试失败: {e}")
return False
def test_memos_integration():
"""测试MemOS集成功能"""
print("🧪 测试MemOS集成功能...")
try:
# 加载环境变量
load_env_file()
# 检查是否有API密钥
api_key = os.getenv("SILICONFLOW_API_KEY") or os.getenv("OPENAI_API_KEY")
if not api_key:
print("⚠️ 未找到API密钥,使用Mock模式测试")
config_path = "examples/data/config/lite_memos_config.json"
else:
config_path = "examples/data/config/siliconflow_config.json"
# 这里可以添加更多的MemOS集成测试
# 由于需要完整的MemOS环境,暂时只测试配置加载
with open(config_path) as f:
config = json.load(f)
print("✅ MemOS集成配置测试通过")
return True
except Exception as e:
print(f"❌ MemOS集成测试失败: {e}")
return False
def main():
"""主测试函数"""
print("🚀 开始MemOS远程LLM功能测试")
print("=" * 50)
# 加载环境变量
load_env_file()
tests = [
test_config_loading,
test_openai_client,
test_embedding_model,
test_memos_integration,
]
passed = 0
total = len(tests)
for test in tests:
if test():
passed += 1
print()
print("=" * 50)
print(f"📊 测试结果: {passed}/{total} 通过")
if passed == total:
print("🎉 远程LLM功能测试通过!")
print("💡 现在可以使用本地存储 + 远程LLM的MemOS了")
else:
print("❌ 部分测试失败,请检查配置")
print("💡 提示:")
print(" 1. 确保已配置API密钥 (.env文件)")
print(" 2. 检查网络连接")
print(" 3. 验证API密钥有效性")
return passed == total
if __name__ == "__main__":
success = main()
sys.exit(0 if success else 1)