Skip to main content
Glama

search_documentation

Search across specific documentation sets and snippets using a query string. Specify docset identifiers, include snippets, and limit results to refine searches effectively. Truncates results exceeding 25,000 tokens.

Instructions

Search for documentation across docset identifiers and snippets. Args: query: The search query string docset_identifiers: Comma-separated list of docset identifiers to search in (from list_installed_docsets) search_snippets: Whether to include snippets in search results max_results: Maximum number of results to return (1-1000) Results are automatically truncated if they would exceed 25,000 tokens.

Input Schema

TableJSON Schema
NameRequiredDescriptionDefault
docset_identifiersNo
max_resultsNo
queryYes
search_snippetsNo

Implementation Reference

  • The core handler function for the 'search_documentation' tool. It performs the actual search via Dash API, handles errors, applies token limits using estimate_tokens, and returns structured results. Registered via @mcp.tool() decorator.
    @mcp.tool() async def search_documentation( ctx: Context, query: str, docset_identifiers: str, search_snippets: bool = True, max_results: int = 100, ) -> SearchResults: """ Search for documentation across docset identifiers and snippets. Args: query: The search query string docset_identifiers: Comma-separated list of docset identifiers to search in (from list_installed_docsets) search_snippets: Whether to include snippets in search results max_results: Maximum number of results to return (1-1000) Results are automatically truncated if they would exceed 25,000 tokens. """ if not query.strip(): await ctx.error("Query cannot be empty") return SearchResults(error="Query cannot be empty") if not docset_identifiers.strip(): await ctx.error("docset_identifiers cannot be empty. Get the docset identifiers using list_installed_docsets") return SearchResults(error="docset_identifiers cannot be empty. Get the docset identifiers using list_installed_docsets") if max_results < 1 or max_results > 1000: await ctx.error("max_results must be between 1 and 1000") return SearchResults(error="max_results must be between 1 and 1000") try: base_url = await working_api_base_url(ctx) if base_url is None: return SearchResults(error="Failed to connect to Dash API Server. Please ensure Dash is running and the API server is enabled (in Dash Settings > Integration).") params = { "query": query, "docset_identifiers": docset_identifiers, "search_snippets": search_snippets, "max_results": max_results, } await ctx.debug(f"Searching Dash API with query: '{query}'") with httpx.Client(timeout=30.0) as client: response = client.get(f"{base_url}/search", params=params) response.raise_for_status() result = response.json() # Check for warning message in response warning_message = None if "message" in result: warning_message = result["message"] await ctx.warning(warning_message) results = result.get("results", []) await ctx.info(f"Found {len(results)} results") # Build result list with token limit checking token_limit = 25000 current_tokens = 100 # Base overhead for response structure limited_results = [] for item in results: search_result = SearchResult( name=item["name"], type=item["type"], platform=item.get("platform"), load_url=item["load_url"], docset=item.get("docset"), description=item.get("description"), language=item.get("language"), tags=item.get("tags") ) # Estimate tokens for this result result_tokens = estimate_tokens(search_result) if current_tokens + result_tokens > token_limit: await ctx.warning(f"Token limit reached. Returning {len(limited_results)} of {len(results)} results to stay under 25k token limit.") break limited_results.append(search_result) current_tokens += result_tokens if len(limited_results) < len(results): await ctx.info(f"Returned {len(limited_results)} results (truncated from {len(results)} due to token limit)") return SearchResults(results=limited_results, error=warning_message) except httpx.HTTPStatusError as e: if e.response.status_code == 400: error_text = e.response.text if "Docset with identifier" in error_text and "not found" in error_text: await ctx.error("Invalid docset identifier. Run list_installed_docsets to see available docsets.") return SearchResults(error="Invalid docset identifier. Run list_installed_docsets to see available docsets, then use the exact identifier from that list.") elif "No docsets found" in error_text: await ctx.error("No valid docsets found for search.") return SearchResults(error="No valid docsets found for search. Either provide valid docset identifiers from list_installed_docsets, or set search_snippets=true to search snippets only.") else: await ctx.error(f"Bad request: {error_text}") return SearchResults(error=f"Bad request: {error_text}. Please ensure Dash is running and the API server is enabled (in Dash Settings > Integration).") elif e.response.status_code == 403: error_text = e.response.text if "API access blocked due to Dash trial expiration" in error_text: await ctx.error("Dash trial expired. Purchase Dash to continue using the API.") return SearchResults(error="Your Dash trial has expired. Purchase Dash at https://kapeli.com/dash to continue using the API. During trial expiration, API access is blocked.") else: await ctx.error(f"Forbidden: {error_text}") return SearchResults(error=f"Forbidden: {error_text}. Please ensure Dash is running and the API server is enabled (in Dash Settings > Integration).") await ctx.error(f"HTTP error: {e}") return SearchResults(error=f"HTTP error: {e}. Please ensure Dash is running and the API server is enabled (in Dash Settings > Integration).") except Exception as e: await ctx.error(f"Search failed: {e}") return SearchResults(error=f"Search failed: {e}. Please ensure Dash is running and the API server is enabled (in Dash Settings > Integration).")
  • Pydantic models defining the input/output schema for the search_documentation tool. SearchResult for individual results, SearchResults for the response container.
    class SearchResult(BaseModel): """A search result from documentation.""" name: str = Field(description="Name of the documentation entry") type: str = Field(description="Type of result (Function, Class, etc.)") platform: Optional[str] = Field(description="Platform of the result", default=None) load_url: str = Field(description="URL to load the documentation") docset: Optional[str] = Field(description="Name of the docset", default=None) description: Optional[str] = Field(description="Additional description", default=None) language: Optional[str] = Field(description="Programming language (snippet results only)", default=None) tags: Optional[str] = Field(description="Tags (snippet results only)", default=None) class SearchResults(BaseModel): """Result from searching documentation.""" results: list[SearchResult] = Field(description="List of search results", default_factory=list) error: Optional[str] = Field(description="Error message if there was an issue", default=None)
  • Helper function used by search_documentation to estimate token counts for result truncation to avoid exceeding LLM context limits.
    def estimate_tokens(obj) -> int: """Estimate token count for a serialized object. Rough approximation: 1 token ≈ 4 characters.""" if isinstance(obj, str): return max(1, len(obj) // 4) elif isinstance(obj, (list, tuple)): return sum(estimate_tokens(item) for item in obj) elif isinstance(obj, dict): return sum(estimate_tokens(k) + estimate_tokens(v) for k, v in obj.items()) elif hasattr(obj, 'model_dump'): # Pydantic model return estimate_tokens(obj.model_dump()) else: return max(1, len(str(obj)) // 4)
  • Helper function to get the working Dash API base URL, ensuring Dash is running and API server is enabled. Called by search_documentation.
    async def working_api_base_url(ctx: Context) -> Optional[str]: dash_running = await ensure_dash_running(ctx) if not dash_running: return None port = await get_dash_api_port(ctx) if port is None: # Try to automatically enable the Dash API Server await ctx.info("The Dash API Server is not enabled. Attempting to enable it automatically...") try: subprocess.run( ["defaults", "write", "com.kapeli.dashdoc", "DHAPIServerEnabled", "YES"], check=True, timeout=10 ) subprocess.run( ["defaults", "write", "com.kapeli.dash-setapp", "DHAPIServerEnabled", "YES"], check=True, timeout=10 ) # Wait a moment for Dash to pick up the change import time time.sleep(2) # Try to get the port again port = await get_dash_api_port(ctx) if port is None: await ctx.error("Failed to enable Dash API Server automatically. Please enable it manually in Dash Settings > Integration") return None else: await ctx.info("Successfully enabled Dash API Server") except Exception as e: await ctx.error("Failed to enable Dash API Server automatically. Please enable it manually in Dash Settings > Integration") return None return f"http://127.0.0.1:{port}"

Other Tools

Related Tools

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/Kapeli/dash-mcp-server'

If you have feedback or need assistance with the MCP directory API, please join our Discord server