Skip to main content
Glama
fengin

Search MCP Server

by fengin

search

Perform web searches to find information and links across the internet. Filter results by time range, view detailed summaries, and retrieve paginated results for AI applications.

Instructions

执行网页搜索,从全网搜索任何网页信息和网页链接。结果准确、摘要完整,更适合AI使用。支持以下特性:

  • 时间范围过滤

  • 显示详细摘要

  • 分页获取 每次请求最多返回10个结果。(当前使用博查搜索API实现)

Input Schema

TableJSON Schema
NameRequiredDescriptionDefault
queryYes搜索查询内容
countNo结果数量(1-10,默认10)
pageNo页码,从1开始
freshnessNo时间范围(noLimit:不限, oneDay:一天内, oneWeek:一周内, oneMonth:一月内, oneYear:一年内)noLimit
summaryNo是否显示详细摘要

Implementation Reference

  • Registers the MCP tool handlers: list_tools returns the schema(s) from the selected search engine, call_tool delegates execution to the engine's handle_tool_call.
    @server.list_tools() async def handle_list_tools() -> list[types.Tool]: """列出可用的搜索工具""" return AVAILABLE_ENGINES[SEARCH_ENGINE]["tools"]() @server.call_tool() async def handle_call_tool( name: str, arguments: Optional[Dict[str, Any]] ) -> List[types.TextContent | types.ImageContent | types.EmbeddedResource]: """处理工具调用请求""" try: if not arguments: raise ValueError("缺少参数") result = await AVAILABLE_ENGINES[SEARCH_ENGINE]["handle_tool"](name, arguments) return [result] except Exception as e: return [types.TextContent( type="text", text=f"错误: {str(e)}" )]
  • Schema definition for the 'search' tool using Bocha Search API, including input parameters for query, count, page, freshness, and summary.
    def get_tool_descriptions() -> list[types.Tool]: """返回博查搜索工具的描述列表""" return [ types.Tool( name="search", description=( "执行网页搜索,从全网搜索任何网页信息和网页链接。" "结果准确、摘要完整,更适合AI使用。" "支持以下特性:\n" "- 时间范围过滤\n" "- 显示详细摘要\n" "- 分页获取\n" "每次请求最多返回10个结果。" "(当前使用博查搜索API实现)" ), inputSchema={ "type": "object", "properties": { "query": { "type": "string", "description": "搜索查询内容" }, "count": { "type": "number", "description": "结果数量(1-10,默认10)", "default": 10 }, "page": { "type": "number", "description": "页码,从1开始", "default": 1 }, "freshness": { "type": "string", "description": "时间范围(noLimit:不限, oneDay:一天内, oneWeek:一周内, oneMonth:一月内, oneYear:一年内)", "enum": list(FRESHNESS_RANGES.values()), "default": "noLimit" }, "summary": { "type": "boolean", "description": "是否显示详细摘要", "default": False } }, "required": ["query"] } ) ]
  • Handler for the 'search' tool: performs web search using BochaClient, processes response, formats results including stats, web pages, and images into markdown text.
    async def handle_tool_call(name: str, arguments: Dict[str, Any]) -> types.TextContent: """统一处理工具调用""" if not arguments or "query" not in arguments: raise ValueError("缺少query参数") query = arguments["query"] client = BochaClient() try: if name == "search": count = arguments.get("count", 10) page = arguments.get("page", 1) freshness = arguments.get("freshness", "noLimit") summary = arguments.get("summary", False) response = await client.web_search( query=query, count=count, page=page, freshness=freshness, summary=summary ) # 检查响应状态 if response.get("code") != 200: return types.TextContent( type="text", text=f"搜索失败: {response.get('msg', '未知错误')}" ) # 获取搜索结果数据 data = response.get("data", {}) if not data: return types.TextContent( type="text", text="未找到相关结果" ) # 格式化输出 formatted_results = [] # 首先添加统计信息 web_pages = data.get("webPages", {}) total_results = web_pages.get("totalEstimatedMatches", 0) current_results = len(web_pages.get("value", [])) total_pages = (total_results + count - 1) // count if total_results > 0 else 0 formatted_results.extend([ "搜索统计信息:", f"- 总结果数: {total_results:,} 条", f"- 当前页/总页数: {page}/{total_pages}", f"- 本页结果数: {current_results} 条", "" # 添加空行分隔 ]) # 处理网页结果 web_pages = data.get("webPages", {}).get("value", []) for result in web_pages: content = [ f"标题: {result.get('name', '')}", f"网址: {result.get('url', '')}", f"来源: {result.get('siteName', '未知来源')}" ] # 如果有摘要,添加摘要信息 if summary and "summary" in result: content.append(f"摘要: {result['summary']}") else: content.append(f"描述: {result.get('snippet', '')}") # 如果有发布时间,添加时间信息 if "dateLastCrawled" in result: content.append(f"发布时间: {result['dateLastCrawled']}") formatted_results.append("\n".join(content)) # 如果有图片结果,添加图片信息 images = data.get("images", {}).get("value", []) if images: formatted_results.append("\n相关图片:") for image in images: image_info = [ f"- 名称: {image.get('name', '未命名')}", f" 尺寸: {image.get('width', '未知')}x{image.get('height', '未知')}", f" 来源页面: {image.get('hostPageDisplayUrl', image.get('hostPageUrl', '未知'))}", f" 原图URL: {image.get('contentUrl', '')}", f" 缩略图URL: {image.get('thumbnailUrl', '')}" ] formatted_results.append("\n".join(image_info)) return types.TextContent( type="text", text="\n\n".join(formatted_results) if formatted_results else "未找到相关结果" ) else: raise ValueError(f"博查搜索不支持的工具: {name}") except BochaException as e: return types.TextContent( type="text", text=f"搜索执行错误: {str(e)}" )
  • Handler for the 'search' tool using Brave Search API: executes web_search, formats title, description, url for each result.
    async def handle_tool_call(name: str, arguments: Dict[str, Any]) -> types.TextContent: """统一处理工具调用""" if not arguments or "query" not in arguments: raise ValueError("缺少query参数") query = arguments["query"] client = BraveClient() try: if name == "search": count = arguments.get("count", 10) offset = arguments.get("offset", 0) results = await client.web_search(query, count, offset) # 格式化输出 formatted_results = [] for result in results: formatted_results.append( f"标题: {result['title']}\n" f"描述: {result['description']}\n" f"网址: {result['url']}" ) return types.TextContent( type="text", text="\n\n".join(formatted_results) ) elif name == "location_search": count = arguments.get("count", 5) results = await client.location_search(query, count) # 检查是否为网络搜索结果 if results and "url" in results[0]: # 如果是网络搜索结果,使用网络搜索格式 formatted_results = [] for result in results: formatted_results.append( f"标题: {result['title']}\n" f"描述: {result['description']}\n" f"网址: {result['url']}" ) else: # 如果是位置搜索结果,使用位置信息格式 formatted_results = [] for result in results: formatted_results.append( f"名称: {result['name']}\n" f"地址: {result['address']}\n" f"电话: {result['phone']}\n" f"评分: {result['rating']['value']} ({result['rating']['count']}条评论)\n" f"价格范围: {result['price_range']}\n" f"营业时间: {', '.join(result['opening_hours']) or '暂无'}\n" f"描述: {result['description']}" ) return types.TextContent( type="text", text="\n---\n".join(formatted_results) if formatted_results else "未找到相关结果" ) else: raise ValueError(f"Brave搜索不支持的工具: {name}") except BraveException as e: return types.TextContent( type="text", text=f"搜索执行错误: {str(e)}" )
  • Handler for the 'search' tool using Metaso Search API: calls perform_search with web mode, returns formatted content with references.
    async def handle_tool_call(name: str, arguments: Dict[str, Any]) -> types.TextContent: """统一处理工具调用""" if not arguments or "query" not in arguments: raise ValueError("缺少query参数") query = arguments["query"] mode = arguments.get("mode", DEFAULT_MODEL) if name == "search": results = await perform_search(query, mode, is_scholar=False) elif name == "scholar_search": results = await perform_search(query, mode, is_scholar=True) else: raise ValueError(f"Metaso搜索不支持的工具: {name}") return types.TextContent(type="text", text=results)
Install Server

Other Tools

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/fengin/search-server'

If you have feedback or need assistance with the MCP directory API, please join our Discord server