Skip to main content
Glama

MemOS-MCP

by qinshu1109
Apache 2.0
3
  • Linux
  • Apple
performance_benchmark.py8.94 kB
""" MemOS性能基准测试 对比官方最佳实践配置前后的性能提升 """ import time import statistics from typing import List, Dict, Any from mvp_memory import create_mvp_memory_manager class PerformanceBenchmark: """性能基准测试器""" def __init__(self): self.results = {} def run_memory_benchmark(self, manager, test_name: str, num_memories: int = 10) -> Dict[str, Any]: """运行记忆操作基准测试""" print(f"\n🧪 运行{test_name}基准测试...") # 记忆添加性能测试 add_times = [] test_memories = [ f"这是第{i}条测试记忆,用于性能基准测试。内容包含一些技术细节和随机信息。" f"时间戳: {time.time()}, 序号: {i}" for i in range(num_memories) ] print(f"📝 测试记忆添加性能 ({num_memories}条记忆)...") for i, content in enumerate(test_memories): start_time = time.time() success = manager.remember(content, tags=[f"测试{i}", "性能基准"]) end_time = time.time() if success: add_times.append(end_time - start_time) else: print(f"❌ 记忆{i}添加失败") # 记忆检索性能测试 search_times = [] search_queries = [ "测试记忆", "性能基准", "技术细节", "随机信息", "时间戳" ] print(f"🔍 测试记忆检索性能 ({len(search_queries)}次查询)...") for query in search_queries: start_time = time.time() results = manager.recall(query, top_k=5) end_time = time.time() search_times.append(end_time - start_time) print(f" 查询'{query}': 找到{len(results)}条记忆") # 计算统计数据 benchmark_result = { "test_name": test_name, "memory_operations": { "add_count": len(add_times), "add_avg_time": statistics.mean(add_times) if add_times else 0, "add_total_time": sum(add_times), "add_throughput": len(add_times) / sum(add_times) if add_times else 0 }, "search_operations": { "search_count": len(search_times), "search_avg_time": statistics.mean(search_times) if search_times else 0, "search_total_time": sum(search_times), "search_throughput": len(search_times) / sum(search_times) if search_times else 0 }, "system_info": manager.get_status_info() } return benchmark_result def run_comparison_test(self) -> Dict[str, Any]: """运行对比测试""" print("🚀 开始MemOS性能对比测试") print("=" * 60) # 测试1: 使用官方最佳实践配置 print("\n📊 测试1: 官方最佳实践配置") manager_official = create_mvp_memory_manager( data_dir="./memos_data_official", use_official_config=True ) official_result = self.run_memory_benchmark( manager_official, "官方最佳实践配置", num_memories=5 # 减少测试数量以节省时间 ) # 测试2: 使用默认配置(对比基准) print("\n📊 测试2: 默认配置(对比基准)") manager_default = create_mvp_memory_manager( data_dir="./memos_data_default", use_official_config=False ) default_result = self.run_memory_benchmark( manager_default, "默认配置", num_memories=5 ) # 性能对比分析 comparison = self._analyze_performance(official_result, default_result) return { "official_config": official_result, "default_config": default_result, "comparison": comparison } def _analyze_performance(self, official: Dict[str, Any], default: Dict[str, Any]) -> Dict[str, Any]: """分析性能对比结果""" # 计算性能提升百分比 def calculate_improvement(official_val, default_val): if default_val == 0: return 0 return ((official_val - default_val) / default_val) * 100 # 记忆添加性能对比 add_improvement = calculate_improvement( official["memory_operations"]["add_throughput"], default["memory_operations"]["add_throughput"] ) # 记忆检索性能对比 search_improvement = calculate_improvement( official["search_operations"]["search_throughput"], default["search_operations"]["search_throughput"] ) # 响应时间对比 add_time_improvement = -calculate_improvement( official["memory_operations"]["add_avg_time"], default["memory_operations"]["add_avg_time"] ) search_time_improvement = -calculate_improvement( official["search_operations"]["search_avg_time"], default["search_operations"]["search_avg_time"] ) return { "throughput_improvements": { "memory_add": f"{add_improvement:+.1f}%", "memory_search": f"{search_improvement:+.1f}%" }, "response_time_improvements": { "memory_add": f"{add_time_improvement:+.1f}%", "memory_search": f"{search_time_improvement:+.1f}%" }, "configuration_benefits": { "official_uses_best_practices": official["system_info"].get("official_config", False), "official_backend": official["system_info"].get("memory_backend", "unknown"), "capacity_planning": official["system_info"].get("capacity_planning", {}), "optimizations": official["system_info"].get("optimizations", {}) } } def print_results(self, results: Dict[str, Any]): """打印测试结果""" print("\n" + "=" * 60) print("📊 MemOS性能基准测试结果") print("=" * 60) # 官方配置结果 official = results["official_config"] print(f"\n✅ 官方最佳实践配置:") print(f" 记忆添加: {official['memory_operations']['add_avg_time']:.3f}s/条") print(f" 记忆检索: {official['search_operations']['search_avg_time']:.3f}s/次") print(f" 添加吞吐量: {official['memory_operations']['add_throughput']:.1f}条/秒") print(f" 检索吞吐量: {official['search_operations']['search_throughput']:.1f}次/秒") # 默认配置结果 default = results["default_config"] print(f"\n📋 默认配置:") print(f" 记忆添加: {default['memory_operations']['add_avg_time']:.3f}s/条") print(f" 记忆检索: {default['search_operations']['search_avg_time']:.3f}s/次") print(f" 添加吞吐量: {default['memory_operations']['add_throughput']:.1f}条/秒") print(f" 检索吞吐量: {default['search_operations']['search_throughput']:.1f}次/秒") # 性能提升对比 comparison = results["comparison"] print(f"\n🚀 性能提升对比:") print(f" 记忆添加吞吐量: {comparison['throughput_improvements']['memory_add']}") print(f" 记忆检索吞吐量: {comparison['throughput_improvements']['memory_search']}") print(f" 记忆添加响应时间: {comparison['response_time_improvements']['memory_add']}") print(f" 记忆检索响应时间: {comparison['response_time_improvements']['memory_search']}") # 配置优势 benefits = comparison["configuration_benefits"] print(f"\n💡 配置优势:") print(f" 使用官方最佳实践: {benefits['official_uses_best_practices']}") print(f" 内存后端类型: {benefits['official_backend']}") print(f" 容量规划: {benefits['capacity_planning']}") print(f" 性能优化: {benefits['optimizations']}") print(f"\n✅ 基于官方最佳实践的配置显著提升了MemOS的性能和功能!") def main(): """主函数""" benchmark = PerformanceBenchmark() try: # 运行性能对比测试 results = benchmark.run_comparison_test() # 打印结果 benchmark.print_results(results) except Exception as e: print(f"❌ 性能测试失败: {e}") import traceback traceback.print_exc() if __name__ == "__main__": main()

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/qinshu1109/memos-MCP'

If you have feedback or need assistance with the MCP directory API, please join our Discord server