invoke_firecrawl_crawlhtml
Start a web crawl job to extract HTML content from URLs and upload results to S3 storage for processing.
Instructions
Start an asynchronous web crawl job using Firecrawl to retrieve HTML content.
Args:
url: URL to crawl
s3_uri: S3 URI where results will be uploaded
limit: Maximum number of pages to crawl (default: 100)
Returns:
Dictionary with crawl job information including the job ID
Input Schema
TableJSON Schema
| Name | Required | Description | Default |
|---|---|---|---|
| url | Yes | ||
| s3_uri | Yes | ||
| limit | No |
Implementation Reference
- The primary handler function for the 'invoke_firecrawl_crawlhtml' tool. It prepares parameters for an HTML crawl and delegates to the generic job invoker.async def invoke_firecrawl_crawlhtml( url: str, s3_uri: str, limit: int = 100, ) -> Dict[str, Any]: """Start an asynchronous web crawl job using Firecrawl to retrieve HTML content. Args: url: URL to crawl s3_uri: S3 URI where results will be uploaded limit: Maximum number of pages to crawl (default: 100) Returns: Dictionary with crawl job information including the job ID """ # Call the generic invoke function with crawl-specific parameters params = { "limit": limit, "scrapeOptions": { "formats": ["html"], # Only use HTML format TODO: Bring in other features of this API }, } return await _invoke_firecrawl_job( url=url, s3_uri=s3_uri, job_type="crawlhtml", job_params=params, )
- uns_mcp/connectors/external/__init__.py:12-20 (registration)Registration of the invoke_firecrawl_crawlhtml tool (and related tools) with the MCP server using mcp.tool() decorator.from .firecrawl import ( cancel_crawlhtml_job, check_crawlhtml_status, check_llmtxt_status, invoke_firecrawl_crawlhtml, invoke_firecrawl_llmtxt, ) mcp.tool()(invoke_firecrawl_crawlhtml)
- Core helper function that performs the actual Firecrawl job invocation, validation, client initialization, starts the job, and kicks off a background completion task.async def _invoke_firecrawl_job( url: str, s3_uri: str, job_type: Firecrawl_JobType, job_params: Dict[str, Any], ) -> Dict[str, Any]: """Generic function to start a Firecrawl job (either HTML crawl or llmfull.txt generation). Args: url: URL to process s3_uri: S3 URI where results will be uploaded job_type: Type of job ('crawlhtml' or 'llmtxt') job_params: Parameters specific to the job type Returns: Dictionary with job information including the job ID """ # Get configuration with API key config = _prepare_firecrawl_config() # Check if config contains an error if "error" in config: return {"error": config["error"]} # Validate and normalize S3 URI first - # doing this outside the try block to handle validation errors specifically try: validated_s3_uri = _ensure_valid_s3_uri(s3_uri) except ValueError as ve: return {"error": f"Invalid S3 URI: {str(ve)}"} try: # Initialize the Firecrawl client firecrawl = FirecrawlApp(api_key=config["api_key"]) # Start the job based on job_type if job_type == "crawlhtml": job_status = firecrawl.async_crawl_url(url, params=job_params) elif job_type == "llmfulltxt": job_status = firecrawl.async_generate_llms_text(url, params=job_params) else: return {"error": f"Unknown job type: {job_type}"} # Handle the response if "id" in job_status: job_id = job_status["id"] # Start background task without waiting for it asyncio.create_task(wait_for_job_completion(job_id, validated_s3_uri, job_type)) # Prepare and return the response response = { "id": job_id, "status": job_status.get("status", "started"), "s3_uri": f"{validated_s3_uri}{job_id}/", "message": f"Firecrawl {job_type} job started " f"and will be auto-processed when complete", } return response else: return {"error": f"Failed to start Firecrawl {job_type} job", "details": job_status} except Exception as e: return {"error": f"Error starting Firecrawl {job_type} job: {str(e)}"}
- Background helper task invoked by the handler to poll for job completion, process results, and upload to S3.async def wait_for_job_completion( job_id: str, s3_uri: str, job_type: Firecrawl_JobType, poll_interval: int = 30, timeout: int = 3600, ) -> Dict[str, Any]: """Poll a Firecrawl job until completion and upload results to S3. Args: job_id: ID of the job to monitor s3_uri: S3 URI where results will be uploaded (already validated) job_type: Type of job ('crawlhtml' or 'llmtxt') poll_interval: How often to check job status in seconds (default: 30) timeout: Maximum time to wait in seconds (default: 1 hour) Returns: Dictionary with information about the completed job and S3 URI """ # Get configuration with API key config = _prepare_firecrawl_config() # Check if config contains an error if "error" in config: return {"error": config["error"]} try: # Initialize the Firecrawl client firecrawl = FirecrawlApp(api_key=config["api_key"]) start_time = time.time() # Poll until completion or timeout while True: # Check status based on job type if job_type == "crawlhtml": result = firecrawl.check_crawl_status(job_id) elif job_type == "llmfulltxt": result = firecrawl.check_generate_llms_text_status(job_id) else: return {"error": f"Unknown job type: {job_type}", "id": job_id} # Check if job is completed if result.get("status") == "completed": break # Check for timeout if time.time() - start_time > timeout: return { "id": job_id, "status": "timeout", "error": f"Timeout waiting for {job_type} job {job_id} to complete", "elapsed_time": time.time() - start_time, } # Wait before polling again await asyncio.sleep(poll_interval) # Job completed - process results based on job type with tempfile.TemporaryDirectory() as temp_dir: # Create a job-specific subdirectory job_dir = os.path.join(temp_dir, job_id) os.makedirs(job_dir, exist_ok=True) # Process results based on job type if job_type == "crawlhtml": file_count = await _process_crawlhtml_results(result, job_dir) elif job_type == "llmfulltxt": file_count = _process_llmtxt_results(result, job_dir) else: return {"error": f"Unknown job type: {job_type}", "id": job_id} # Upload to S3 final_s3_uri = f"{s3_uri}{job_id}/" upload_stats = _upload_directory_to_s3(job_dir, final_s3_uri) # Return combined results response = { "id": job_id, "status": "completed", "s3_uri": final_s3_uri, "file_count": file_count, "uploaded_files": upload_stats["uploaded_files"], "failed_uploads": upload_stats["failed_files"], "upload_size_bytes": upload_stats["total_bytes"], "elapsed_time": time.time() - start_time, } # Add job-type specific information if job_type == "crawlhtml": response.update( { "completed_urls": result.get("completed", 0), "total_urls": result.get("total", 0), }, ) elif job_type == "llmfulltxt" and "data" in result: response.update( { "processed_urls_count": len(result["data"].get("processedUrls", [])), }, ) return response except Exception as e: return {"error": f"Error in wait_for_{job_type}_completion: {str(e)}", "id": job_id}
- Helper to retrieve and validate the Firecrawl API key from environment variables.def _prepare_firecrawl_config() -> Dict[str, str]: """Prepare the Firecrawl configuration by retrieving and validating the API key. Returns: A dictionary containing either an API key or an error message """ api_key = os.getenv("FIRECRAWL_API_KEY") if not api_key: return { "error": "Firecrawl API key is required. Set FIRECRAWL_API_KEY environment variable.", } return {"api_key": api_key}