Skip to main content
Glama

GitHub Copilot Response Time Statistics MCP Server

by fishcoderman
index.py7.76 kB
#!/usr/bin/env python3 """ GitHub Copilot Response Time Statistics MCP Server 用于统计和对比不同 AI 模型回答问题的时间长短 """ import asyncio import json import time import datetime from typing import Dict, List, Optional, Any from pathlib import Path from dataclasses import dataclass, asdict from fastmcp import FastMCP # 创建 MCP 服务实例 mcp = FastMCP(name="Copilot Response Time Statistics Server") @dataclass class ResponseRecord: """响应记录数据结构""" model_name: str question: str start_time: float end_time: float response_time_ms: float timestamp: str session_id: str def to_dict(self) -> dict: return asdict(self) class ResponseTimeTracker: """响应时间跟踪器""" def __init__(self, data_file: str = "response_stats.json"): self.data_file = Path(data_file) self.active_sessions: Dict[str, Dict[str, Any]] = {} self.records: List[ResponseRecord] = [] self.load_data() def load_data(self): """从文件加载历史数据""" if self.data_file.exists(): try: with open(self.data_file, 'r', encoding='utf-8') as f: data = json.load(f) self.records = [ResponseRecord(**record) for record in data] except Exception as e: print(f"加载数据时出错: {e}") self.records = [] def save_data(self): """保存数据到文件""" try: with open(self.data_file, 'w', encoding='utf-8') as f: json.dump([record.to_dict() for record in self.records], f, ensure_ascii=False, indent=2) except Exception as e: print(f"保存数据时出错: {e}") def start_question(self, session_id: str, model_name: str, question: str) -> str: """开始记录问题""" start_time = time.time() self.active_sessions[session_id] = { 'model_name': model_name, 'question': question, 'start_time': start_time } return f"已开始记录 {model_name} 的响应时间,会话ID: {session_id}" def end_question(self, session_id: str) -> str: """结束记录问题并计算响应时间""" if session_id not in self.active_sessions: return f"未找到会话 {session_id},请先调用 start_question" end_time = time.time() session_data = self.active_sessions[session_id] response_time_ms = (end_time - session_data['start_time']) * 1000 record = ResponseRecord( model_name=session_data['model_name'], question=session_data['question'], start_time=session_data['start_time'], end_time=end_time, response_time_ms=response_time_ms, timestamp=datetime.datetime.now().isoformat(), session_id=session_id ) self.records.append(record) del self.active_sessions[session_id] self.save_data() return f"记录完成!{session_data['model_name']} 响应时间: {response_time_ms:.2f} 毫秒" def get_statistics(self, model_name: Optional[str] = None) -> Dict[str, Any]: """获取统计数据""" filtered_records = self.records if model_name: filtered_records = [r for r in self.records if r.model_name == model_name] if not filtered_records: return {"error": f"未找到{'模型 ' + model_name if model_name else '任何'}的记录"} response_times = [r.response_time_ms for r in filtered_records] stats = { "model_name": model_name or "所有模型", "total_records": len(filtered_records), "avg_response_time_ms": sum(response_times) / len(response_times), "min_response_time_ms": min(response_times), "max_response_time_ms": max(response_times), "recent_records": [r.to_dict() for r in filtered_records[-5:]] } return stats def compare_models(self) -> Dict[str, Any]: """对比不同模型的性能""" model_stats = {} for record in self.records: model = record.model_name if model not in model_stats: model_stats[model] = [] model_stats[model].append(record.response_time_ms) comparison = {} for model, times in model_stats.items(): comparison[model] = { "count": len(times), "avg_time_ms": sum(times) / len(times), "min_time_ms": min(times), "max_time_ms": max(times) } return { "comparison": comparison, "fastest_model": min(comparison.keys(), key=lambda k: comparison[k]["avg_time_ms"]) if comparison else None } # 创建全局跟踪器实例 tracker = ResponseTimeTracker() @mcp.tool() def start_question_timing(session_id: str, model_name: str, question: str) -> str: """ 开始记录模型回答问题的时间 Args: session_id: 唯一的会话标识符 model_name: 模型名称 (如: "gpt-4", "claude-3", "copilot") question: 问题内容 """ return tracker.start_question(session_id, model_name, question) @mcp.tool() def end_question_timing(session_id: str) -> str: """ 结束记录并计算响应时间 Args: session_id: 对应的会话标识符 """ return tracker.end_question(session_id) @mcp.tool() def get_model_statistics(model_name: str = None) -> Dict[str, Any]: """ 获取指定模型或所有模型的统计数据 Args: model_name: 模型名称,为空时返回所有模型的统计 """ return tracker.get_statistics(model_name) @mcp.tool() def compare_all_models() -> Dict[str, Any]: """ 对比所有模型的响应时间性能 """ return tracker.compare_models() @mcp.tool() def get_active_sessions() -> Dict[str, Any]: """ 获取当前活动的测试会话 """ return { "active_sessions": list(tracker.active_sessions.keys()), "session_details": tracker.active_sessions } @mcp.tool() def clear_all_data() -> str: """ 清除所有统计数据(谨慎使用) """ tracker.records.clear() tracker.active_sessions.clear() tracker.save_data() return "所有数据已清除" @mcp.resource("resource://usage") def usage_guide() -> str: """ 使用指南 """ return """ ## GitHub Copilot 响应时间统计 MCP 服务使用指南 ### 基本使用流程: 1. **开始计时**: ``` start_question_timing(session_id="test-1", model_name="copilot", question="如何创建Python函数?") ``` 2. **结束计时**: ``` end_question_timing(session_id="test-1") ``` 3. **查看统计**: ``` get_model_statistics() # 所有模型 get_model_statistics(model_name="copilot") # 特定模型 ``` 4. **模型对比**: ``` compare_all_models() ``` ### 示例工作流程: 1. 准备向 Copilot 提问时,调用 start_question_timing 2. 向 Copilot 提问并等待回答 3. 收到完整回答后,立即调用 end_question_timing 4. 使用统计工具查看和对比结果 ### 注意事项: - session_id 必须唯一 - 确保及时调用 end_question_timing - 数据自动保存到 response_stats.json 文件 """ if __name__ == "__main__": print("启动 GitHub Copilot 响应时间统计 MCP 服务...") mcp.run()

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/fishcoderman/MCP'

If you have feedback or need assistance with the MCP directory API, please join our Discord server