Skip to main content
Glama

GitHub Copilot Response Time Statistics MCP Server

by fishcoderman
client_example.py4.62 kB
#!/usr/bin/env python3 """ MCP 客户端示例 - 演示如何使用响应时间统计服务 """ import time import uuid from typing import Any, Dict class MockMCPClient: """模拟 MCP 客户端,用于演示""" def __init__(self): # 在实际使用中,这里会连接到真正的 MCP 服务 from index import tracker self.tracker = tracker def start_timing(self, model_name: str, question: str) -> str: """开始计时""" session_id = str(uuid.uuid4()) result = self.tracker.start_question(session_id, model_name, question) return session_id, result def end_timing(self, session_id: str) -> str: """结束计时""" return self.tracker.end_question(session_id) def get_stats(self, model_name: str = None) -> Dict[str, Any]: """获取统计数据""" return self.tracker.get_statistics(model_name) def compare_models(self) -> Dict[str, Any]: """对比模型""" return self.tracker.compare_models() def simulate_copilot_interaction(client: MockMCPClient, model_name: str, question: str, response_delay: float): """模拟与 Copilot 的交互""" print(f"\n=== 测试 {model_name} ===") print(f"问题: {question}") # 开始计时 session_id, start_msg = client.start_timing(model_name, question) print(f"✓ {start_msg}") # 模拟等待 Copilot 回答的时间 print(f"⏳ 等待 {model_name} 回答...") time.sleep(response_delay) # 结束计时 end_msg = client.end_timing(session_id) print(f"✓ {end_msg}") def main(): """主函数 - 演示完整的使用流程""" print("🚀 GitHub Copilot 响应时间统计演示") print("=" * 50) # 创建客户端 client = MockMCPClient() # 模拟不同模型的响应时间测试 test_scenarios = [ ("GitHub Copilot", "如何创建一个Python函数?", 1.2), ("Claude-3", "如何创建一个Python函数?", 0.8), ("GPT-4", "如何创建一个Python函数?", 1.5), ("GitHub Copilot", "解释Python装饰器的工作原理", 2.1), ("Claude-3", "解释Python装饰器的工作原理", 1.3), ("GPT-4", "解释Python装饰器的工作原理", 1.8), ] # 执行测试 for model_name, question, delay in test_scenarios: simulate_copilot_interaction(client, model_name, question, delay) time.sleep(0.5) # 短暂间隔 print("\n" + "=" * 50) print("📊 统计结果") # 显示各模型统计 for model in ["GitHub Copilot", "Claude-3", "GPT-4"]: stats = client.get_stats(model) if "error" not in stats: print(f"\n📈 {model} 统计:") print(f" 记录数量: {stats['total_records']}") print(f" 平均响应时间: {stats['avg_response_time_ms']:.2f} ms") print(f" 最快响应: {stats['min_response_time_ms']:.2f} ms") print(f" 最慢响应: {stats['max_response_time_ms']:.2f} ms") # 模型对比 print("\n🏆 模型对比结果:") comparison = client.compare_models() if comparison.get("comparison"): for model, stats in comparison["comparison"].items(): print(f"\n{model}:") print(f" 测试次数: {stats['count']}") print(f" 平均时间: {stats['avg_time_ms']:.2f} ms") if comparison.get("fastest_model"): print(f"\n🥇 最快模型: {comparison['fastest_model']}") def demo_real_usage(): """演示实际使用方法""" print("\n" + "=" * 60) print("📝 实际使用示例代码:") print("=" * 60) example_code = ''' # 实际使用时的代码示例: import uuid from mcp_client import MCPClient # 假设的 MCP 客户端库 # 连接到 MCP 服务 client = MCPClient("stdio://path/to/index.py") # 准备问一个问题 question = "如何优化这段Python代码的性能?" session_id = str(uuid.uuid4()) # 开始计时 client.call_tool("start_question_timing", { "session_id": session_id, "model_name": "GitHub Copilot", "question": question }) # 向 GitHub Copilot 提问 print("向 Copilot 提问:", question) # ... 等待 Copilot 回答 ... # 收到回答后立即结束计时 client.call_tool("end_question_timing", {"session_id": session_id}) # 查看统计 stats = client.call_tool("get_model_statistics", {"model_name": "GitHub Copilot"}) print("统计结果:", stats) ''' print(example_code) if __name__ == "__main__": main() demo_real_usage()

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/fishcoderman/MCP'

If you have feedback or need assistance with the MCP directory API, please join our Discord server