Skip to main content
Glama
gemini2026

Documentation Search MCP Server

by gemini2026

get_docs

Search official documentation for programming libraries and frameworks using specific queries and version parameters to retrieve structured summaries and metadata.

Instructions

Search documentation for a given query and one or more libraries. Args: query: The query to search for (e.g., "Chroma DB") libraries: A single library or a list of libraries to search in (e.g., "langchain" or ["fastapi", "django"]) version: Library version to search (e.g., "4.2", "stable", "latest"). Default: "latest" auto_detect_version: Automatically detect installed package version. Default: False Returns: Dictionary with structured summaries and supporting metadata

Input Schema

TableJSON Schema
NameRequiredDescriptionDefault
queryYes
librariesYes
versionNolatest
auto_detect_versionNo

Implementation Reference

  • The core handler function for the 'get_docs' tool. It performs web searches on library documentation sites using Serper or fallback sitemap search, fetches content, generates summaries, and returns structured results per library including metadata like code snippet counts.
    @mcp.tool() async def get_docs( query: str, libraries: LibrariesParam, version: str = "latest", auto_detect_version: bool = False, ): """ Search documentation for a given query and one or more libraries. Args: query: The query to search for (e.g., "Chroma DB") libraries: A single library or a list of libraries to search in (e.g., "langchain" or ["fastapi", "django"]) version: Library version to search (e.g., "4.2", "stable", "latest"). Default: "latest" auto_detect_version: Automatically detect installed package version. Default: False Returns: Dictionary with structured summaries and supporting metadata """ await enforce_rate_limit("get_docs") if isinstance(libraries, str): libraries = [lib.strip() for lib in libraries.split(",") if lib.strip()] config_dict = config_model.model_dump() library_summaries: List[Dict[str, Any]] = [] summary_sections: List[str] = [] for library in libraries: # Resolve version (with auto-detection if enabled) resolved_version = await version_resolver.resolve_version( library=library, requested_version=version, auto_detect=auto_detect_version, project_path=".", ) lib_entry: Dict[str, Any] = { "library": library, "requested_query": query, "status": "searched", "results": [], } lib_config = config_dict.get("docs_urls", {}).get(library, {}) auto_approve = lib_config.get("auto_approve", True) if not auto_approve: print( f"⚠️ Requesting approval to search {library} documentation...", file=sys.stderr, ) docs_root = docs_urls.get(library) if not docs_root: lib_entry.update( { "status": "unsupported", "message": f"Library '{library}' not supported by this tool", } ) library_summaries.append(lib_entry) summary_sections.append( f"### {library}\n- Unsupported library; no documentation root configured." ) continue # Get version-specific URL versioned_url = get_versioned_docs_url(library, resolved_version, lib_config) # Build search query with version context search_query = f"site:{versioned_url} {query}" if resolved_version != "latest" and not lib_config.get("version_url_template"): # Add version to query if URL doesn't support versioning search_query += f" version {resolved_version}" search_results = await search_web(search_query, num_results=5) organic_results = (search_results.get("organic") or [])[:3] if not organic_results: lib_entry.update( { "status": "no_results", "message": "No indexed documentation results returned", } ) library_summaries.append(lib_entry) summary_sections.append(f"### {library}\n- No results for query '{query}'.") continue fetch_tasks = [fetch_url(result["link"]) for result in organic_results] fetched_contents = await asyncio.gather(*fetch_tasks, return_exceptions=True) library_lines = [f"### {library}"] for result, content in zip(organic_results, fetched_contents): entry: Dict[str, Any] = { "title": result.get("title") or result.get("link"), "url": result.get("link"), "source_snippet": result.get("snippet", ""), } if isinstance(content, Exception): error_message = str(content) entry["status"] = "error" entry["error"] = error_message library_lines.append( f"- {entry['title']}: failed to fetch ({error_message})" ) else: content_str = str(content) summary = content_enhancer.generate_summary(content_str, query) code_snippet_count = len( content_enhancer.extract_code_snippets(content_str) ) entry.update( { "status": "ok", "summary": summary, "code_snippet_count": code_snippet_count, } ) bullet_summary = summary if summary else "No summary extracted." library_lines.append( f"- {entry['title']}: {bullet_summary} (code snippets: {code_snippet_count})" ) lib_entry["results"].append(entry) lib_entry["total_results"] = len(lib_entry["results"]) library_summaries.append(lib_entry) summary_sections.append("\n".join(library_lines)) if cache: await cache.clear_expired() return { "query": query, "libraries": library_summaries, "summary_markdown": "\n\n".join(summary_sections), }
  • The @mcp.tool() decorator registers the get_docs function as an MCP tool.
    @mcp.tool()
  • Custom type annotation and validator for the 'libraries' parameter, allowing flexible input like strings, lists, or comma-separated values.
    def _normalize_libraries(value: Any) -> List[str]: if value is None: return [] if isinstance(value, str): parts = [part.strip() for part in value.split(",")] return [part for part in parts if part] if isinstance(value, (list, tuple, set)): libraries: List[str] = [] for item in value: if item is None: continue item_str = str(item).strip() if item_str: libraries.append(item_str) return libraries return [str(value).strip()] LibrariesParam = Annotated[List[str], BeforeValidator(_normalize_libraries)]

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/gemini2026/documentation-search-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server