Skip to main content
Glama
billallison

URL Text Fetcher MCP Server

by billallison

brave_search_and_fetch

Search the web with Brave Search and retrieve text content from top results to gather information from multiple sources in one operation.

Instructions

Search the web using Brave Search and automatically fetch content from the top results.

Args: query: The search query max_results: Maximum number of results to fetch content for (default: 3, max: 10)

Input Schema

TableJSON Schema
NameRequiredDescriptionDefault
queryYes
max_resultsNo

Implementation Reference

  • Main handler function for the brave_search_and_fetch tool using @mcp.tool() decorator. Performs Brave search, fetches content from top results, sanitizes inputs, handles errors and limits.
    @mcp.tool() async def brave_search_and_fetch(query: str, max_results: int = 3) -> str: """Search the web using Brave Search and automatically fetch content from the top results. Args: query: The search query max_results: Maximum number of results to fetch content for (default: 3, max: 10) """ # Sanitize query input query = sanitize_query(query) if not query: return "Error: Invalid or empty search query" max_results = max(1, min(10, max_results)) # Clamp between 1-10 try: logger.info(f"Performing Brave search: {query}") search_results = brave_search(query, count=max_results * 2) if not search_results: return f"No search results found for query: {query}" # Build response with search results and content response_parts = [f"Search Results for: {query}", "=" * 50, ""] fetched_count = 0 for result in search_results: if fetched_count >= max_results: break title = result.get('title', 'No title') url = result.get('url', '') description = result.get('description', 'No description') response_parts.append(f"{fetched_count + 1}. {title}") response_parts.append(f" URL: {url}") response_parts.append(f" Description: {description}") # Fetch content from this URL if url: content = fetch_url_content(url) # Limit content per result max_content_per_result = CONTENT_LENGTH_LIMIT // max_results if len(content) > max_content_per_result: content = content[:max_content_per_result] + "... [Truncated]" response_parts.append(f" Content: {content}") fetched_count += 1 else: response_parts.append(" Content: No URL available") response_parts.append("") # Add spacing final_response = "\n".join(response_parts) # Final length check if len(final_response) > CONTENT_LENGTH_LIMIT: final_response = final_response[:CONTENT_LENGTH_LIMIT] + "... [Response truncated]" return final_response except Exception as e: error_msg = str(e) logger.error(f"Search operation failed: {error_msg}", exc_info=True) # Return more detailed error information for debugging return f"Error: Search operation failed - {error_msg}"
  • Alternative/modern handler for brave_search_and_fetch tool with Context support, Pydantic Fields for schema, and progress reporting.
    @mcp.tool() async def brave_search_and_fetch( ctx: Context, query: str = Field(description="The search query"), max_results: int = Field( default=3, description="Maximum number of results to fetch content for", ge=1, le=10 ) ) -> str: """Search the web using Brave Search and automatically fetch content from the top results""" # Use context for logging await ctx.info(f"Starting search for: {query}") # Sanitize query input query = sanitize_query(query) if not query: return "Error: Invalid or empty search query" max_results = max(1, min(10, max_results)) # Clamp between 1-10 try: await ctx.info(f"Performing Brave search: {query}") search_results = brave_search(query, count=max_results * 2) if not search_results: return f"No search results found for query: {query}" # Build response with search results and content response_parts = [f"Search Results for: {query}", "=" * 50, ""] fetched_count = 0 for result in search_results: if fetched_count >= max_results: break title = result.get('title', 'No title') url = result.get('url', '') description = result.get('description', 'No description') response_parts.append(f"{fetched_count + 1}. {title}") response_parts.append(f" URL: {url}") response_parts.append(f" Description: {description}") # Report progress progress = (fetched_count + 1) / max_results await ctx.report_progress( progress=progress, total=1.0, message=f"Fetching content from result {fetched_count + 1} of {max_results}" ) # Fetch content from this URL if url: content = fetch_url_content(url) # Limit content per result max_content_per_result = CONTENT_LENGTH_LIMIT // max_results if len(content) > max_content_per_result: content = content[:max_content_per_result] + "... [Truncated]" response_parts.append(f" Content: {content}") fetched_count += 1 else: response_parts.append(" Content: No URL available") response_parts.append("") # Add spacing final_response = "\n".join(response_parts) # Final length check if len(final_response) > CONTENT_LENGTH_LIMIT: final_response = final_response[:CONTENT_LENGTH_LIMIT] + "... [Response truncated]" await ctx.info(f"Search completed successfully: {fetched_count} results fetched") return final_response except Exception as e: await ctx.error(f"Search operation failed: {str(e)}") logger.error(f"Search operation failed: {e}", exc_info=True) # Don't leak internal error details return "Error: Search operation failed"
  • Core helper function that performs the actual Brave Search API call, handles rate limiting, authentication, and parses results into title/url/description.
    def brave_search(query: str, count: int = 10) -> List[dict]: """Perform a Brave search and return results with thread-safe rate limiting.""" if not BRAVE_API_KEY: logger.error("Brave Search API key not configured") raise ValueError("BRAVE_API_KEY environment variable is required") # Thread-safe rate limiting: ensure minimum interval between requests with rate_limit_lock: current_time = time.time() time_since_last_request = current_time - last_brave_request[0] if time_since_last_request < MIN_REQUEST_INTERVAL: sleep_time = MIN_REQUEST_INTERVAL - time_since_last_request logger.info(f"Rate limiting: sleeping for {sleep_time:.3f} seconds (limit: {BRAVE_RATE_LIMIT_RPS} req/s)") time.sleep(sleep_time) last_brave_request[0] = time.time() url = "https://api.search.brave.com/res/v1/web/search" headers = { 'User-Agent': 'Mozilla/5.0 (compatible; MCP-URL-Fetcher/1.0)', 'Accept': 'application/json', # Brave API requires application/json or */* "X-Subscription-Token": BRAVE_API_KEY } params = { "q": query, "count": count, "search_lang": "en", "country": "US", "safesearch": "moderate" } try: logger.info(f"SEARCH_REQUEST: Making Brave Search for '{query}' (count={count})") response = requests.get(url, headers=headers, params=params, timeout=REQUEST_TIMEOUT) # Log response details for debugging logger.info(f"SEARCH_RESPONSE: Status {response.status_code}, Content-Type: {response.headers.get('Content-Type', 'unknown')}") response.raise_for_status() data = response.json() results = [] if 'web' in data and 'results' in data['web']: for result in data['web']['results']: results.append({ 'title': result.get('title', ''), 'url': result.get('url', ''), 'description': result.get('description', ''), }) else: logger.warning(f"Unexpected response structure: {data}") logger.info(f"SEARCH_SUCCESS: Found {len(results)} results for '{query}'") return results except requests.HTTPError as e: logger.error(f"Brave Search API error: {e.response.status_code}") logger.error(f"Response headers: {dict(e.response.headers)}") logger.error(f"Response body: {e.response.text}") if e.response.status_code == 401: raise Exception("Invalid or missing API key - check your BRAVE_API_KEY") elif e.response.status_code == 403: raise Exception("API key does not have permission for this endpoint") elif e.response.status_code == 422: raise Exception("Search request was rejected - please check your query") elif e.response.status_code == 429: raise Exception("Rate limit exceeded - please wait before making another request") else: raise Exception(f"Search service error: {e.response.status_code} - {e.response.text}") except requests.RequestException as e: logger.error(f"Network error during search: {e}") raise Exception("Network error occurred during search") except Exception as e: logger.error(f"Unexpected error in brave_search: {e}", exc_info=True) raise Exception("An unexpected error occurred during search")
  • Helper function to safely fetch and extract text content from URLs, used by the search tool. Includes SSRF protection, size limits, and HTML parsing.
    def fetch_url_content(url: str) -> str: """Helper function to fetch text content from a URL with safety checks.""" # Validate URL safety first if not is_safe_url(url): logger.warning(f"SECURITY: Blocked unsafe URL: {url}") return "Error: URL not allowed for security reasons" try: # Log request for monitoring logger.info(f"REQUEST: Fetching content from {url}") # Make request with streaming to check size resp = requests.get(url, headers=HEADERS, timeout=REQUEST_TIMEOUT, stream=True) resp.raise_for_status() # Log response details logger.info(f"RESPONSE: {resp.status_code} from {url}, Content-Type: {resp.headers.get('Content-Type', 'unknown')}") # Check content length header content_length = resp.headers.get('Content-Length') if content_length and int(content_length) > MAX_RESPONSE_SIZE: logger.warning(f"SECURITY: Content too large: {content_length} bytes for {url}") return f"Error: Content too large ({content_length} bytes, max {MAX_RESPONSE_SIZE})" # Read content with size limit content_chunks = [] total_size = 0 try: for chunk in resp.iter_content(chunk_size=8192, decode_unicode=True): if chunk: # filter out keep-alive new chunks total_size += len(chunk) if total_size > MAX_RESPONSE_SIZE: logger.warning(f"SECURITY: Content exceeded size limit for {url}") return f"Error: Content exceeded size limit ({MAX_RESPONSE_SIZE} bytes)" content_chunks.append(chunk) except UnicodeDecodeError: # If we can't decode as text, it's probably binary content logger.warning(f"CONTENT: Unable to decode content as text from {url}") return "Error: Unable to decode content as text" html_content = ''.join(content_chunks) # Parse with BeautifulSoup soup = BeautifulSoup(html_content, "html.parser") # Remove script and style elements for script in soup(["script", "style"]): script.decompose() text_content = soup.get_text(separator="\n", strip=True) # Limit final content length if len(text_content) > CONTENT_LENGTH_LIMIT: logger.info(f"CONTENT: Truncating content from {url} ({len(text_content)} -> {CONTENT_LENGTH_LIMIT} chars)") text_content = text_content[:CONTENT_LENGTH_LIMIT] + "... [Content truncated]" logger.info(f"SUCCESS: Fetched {len(text_content)} characters from {url}") return text_content except requests.RequestException as e: logger.error(f"REQUEST_ERROR: Failed to fetch {url}: {e}") return "Error: Unable to fetch URL content" except Exception as e: logger.error(f"UNEXPECTED_ERROR: Processing {url}: {e}", exc_info=True) return "Error: An unexpected error occurred while processing the URL"

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/billallison/brsearch-mcp-server'

If you have feedback or need assistance with the MCP directory API, please join our Discord server