Skip to main content
Glama

ArXiv MCP Server

by wr-web
mytest.py8.44 kB
import asyncio import json from pathlib import Path import sys from datetime import datetime, timedelta # 添加项目路径 project_root = Path(__file__).parent / "src" sys.path.insert(0, str(project_root)) from arxiv_mcp_server.tools import handle_search, handle_download, handle_list_papers def print_usage_examples(): """打印使用示例""" print("\n=== 搜索规则示例 ===") print("1. 基本搜索: 'machine learning'") print("2. 日期过滤: 'deep learning after:2023-01-01'") print("3. 分类过滤: 'neural networks cat:cs.AI'") print("4. CCF A类: 'computer vision ccf:a'") print("5. 保存结果: 'transformer save:results.json'") print("6. 组合搜索: 'GAN after:2023-01-01 cat:cs.CV ccf:a save:gan_results.json'") print("7. 引用数过滤: 'attention mechanism citations>50' (暂不支持)") print("\n可用分类:") print("- cs.AI (人工智能)") print("- cs.CV (计算机视觉)") print("- cs.LG (机器学习)") print("- cs.CL (计算语言学)") print("- cs.NE (神经与进化计算)") print("- cs.IR (信息检索)") def get_search_parameters(): """获取搜索参数""" print("\n请输入搜索参数:") # 基本查询 query = input("搜索关键词 (支持规则语法): ").strip() if not query: return None # 最大结果数 max_results_input = input("最大结果数 (默认10): ").strip() max_results = int(max_results_input) if max_results_input else 10 # 日期过滤 date_from_input = input("开始日期 (YYYY-MM-DD, 回车跳过): ").strip() date_to_input = input("结束日期 (YYYY-MM-DD, 回车跳过): ").strip() # 分类过滤 categories_input = input("分类过滤 (逗号分隔, 如 cs.AI,cs.CV, 回车跳过): ").strip() categories = [cat.strip() for cat in categories_input.split(",")] if categories_input else None # CCF A类过滤 ccf_a_input = input("仅显示CCF A类论文? (y/n, 默认n): ").strip().lower() ccf_a_only = ccf_a_input == 'y' # 保存文件 save_file = input("保存到文件 (输入文件名, 回车跳过): ").strip() # 构建参数字典 params = { "query": query, "max_results": max_results } if date_from_input: params["date_from"] = date_from_input if date_to_input: params["date_to"] = date_to_input if categories: params["categories"] = categories if ccf_a_only: params["ccf_a_only"] = ccf_a_only if save_file: params["save_to_file"] = save_file return params def format_paper_display(paper_data): """格式化论文显示""" paper = paper_data result = [] result.append(f"标题: {paper.get('title', 'N/A')}") result.append(f"ID: {paper.get('id', 'N/A')}") result.append(f"作者: {', '.join(paper.get('authors', []))}") result.append(f"发表日期: {paper.get('published', 'N/A')[:10]}") result.append(f"分类: {', '.join(paper.get('categories', []))}") if paper.get('is_ccf_a'): result.append("🏆 CCF A类论文") if paper.get('journal_ref'): result.append(f"期刊: {paper['journal_ref']}") result.append(f"链接: {paper.get('url', 'N/A')}") result.append(f"摘要: {paper.get('abstract', 'N/A')[:200]}...") result.append("-" * 80) async def interactive_test(): """交互式测试""" while True: print("\n=== arXiv MCP 服务器测试 ===") print("1. 基础搜索论文") print("2. 高级搜索论文 (支持更多选项)") print("3. 列出本地论文") print("4. 显示搜索规则示例") print("5. 退出") choice = input("请选择 (1-5): ").strip() if choice == "1": # 基础搜索 query = input("输入搜索关键词: ").strip() max_results = int(input("最大结果数 (默认5): ") or "5") print(f"\n正在搜索: {query}") try: results = await handle_search({ "query": query, "max_results": max_results }) # 解析和显示结果 for result in results: data = json.loads(result.text) print(f"\n找到 {data['total_results']} 篇论文:\n") for i, paper in enumerate(data['papers'], 1): print(f"\n=== 论文 {i} ===") print(format_paper_display(paper)) if data.get('saved_to'): print(f"\n✅ 结果已保存到: {data['saved_to']}") except Exception as e: print(f"搜索失败: {e}") elif choice == "2": # 高级搜索 params = get_search_parameters() if params: print(f"\n正在执行高级搜索...") print(f"参数: {json.dumps(params, indent=2, ensure_ascii=False)}") try: results = await handle_search(params) # 解析和显示结果 for result in results: data = json.loads(result.text) print(f"\n找到 {data['total_results']} 篇论文:") if data.get('parsed_filters'): filters = data['parsed_filters'] print(f"应用的过滤器:") for key, value in filters.items(): if value: print(f" {key}: {value}") print("\n" + "="*80) for i, paper in enumerate(data['papers'], 1): print(f"\n=== 论文 {i} ===") print(format_paper_display(paper)) if data.get('saved_to'): print(f"\n✅ 结果已保存到: {data['saved_to']}") if data.get('save_error'): print(f"\n❌ 保存失败: {data['save_error']}") except Exception as e: print(f"搜索失败: {e}") elif choice == "3": print("\n本地论文列表:") try: results = await handle_list_papers({}) for result in results: print(result.text) except Exception as e: print(f"列表获取失败: {e}") elif choice == "4": print_usage_examples() elif choice == "5": print("退出...") break else: print("无效选择") """交互式测试""" while True: print("\n=== arXiv MCP 服务器测试 ===") print("1. 搜索论文") print("2. 列出本地论文") print("3. 退出") choice = input("请选择 (1-3): ").strip() if choice == "1": query = input("输入搜索关键词: ").strip() max_results = int(input("最大结果数 (默认5): ") or "5") print(f"\n正在搜索: {query}") try: results = await handle_search({ "query": query, "max_results": max_results }) for result in results: print(result.text) except Exception as e: print(f"搜索失败: {e}") elif choice == "2": print("\n本地论文列表:") try: results = await handle_list_papers({}) for result in results: print(result.text) except Exception as e: print(f"列表获取失败: {e}") elif choice == "3": print("退出...") break else: print("无效选择") if __name__ == "__main__": asyncio.run(interactive_test())

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/wr-web/APR'

If you have feedback or need assistance with the MCP directory API, please join our Discord server