Skip to main content
Glama

Crawl4AI MCP Server

pyproject.toml•1.58 kB
[build-system] requires = ["setuptools>=61.0", "wheel"] build-backend = "setuptools.build_meta" [project] name = "crawl4ai-mcp-server" version = "1.0.0" description = "MCP server for Crawl4AI web scraping integration" readme = "README.md" license = {text = "MIT"} authors = [ {name = "Claude", email = "noreply@anthropic.com"} ] keywords = ["mcp", "crawl4ai", "web-scraping", "fastmcp"] classifiers = [ "Development Status :: 4 - Beta", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", ] requires-python = ">=3.10" dependencies = [ "fastmcp[all]>=0.3.0", "crawl4ai>=0.3.74", "Pillow>=10.0.0", "httpx>=0.24.0", ] [project.optional-dependencies] dev = [ "pytest>=7.0.0", "pytest-asyncio>=0.21.0", ] [project.scripts] crawl4ai-mcp = "crawl4ai_mcp_server:main" [tool.setuptools.packages.find] where = ["."] include = ["crawl4ai_mcp_server*"] [tool.pytest.ini_options] testpaths = ["tests"] asyncio_mode = "auto" python_files = "test_*.py" python_classes = "Test*" python_functions = "test_*" [tool.ruff] # Exclude JSON files and other non-Python files from linting exclude = [ "*.json", "*.md", "*.txt", "*.log", "venv/", "node_modules/", ".git/", "logs/", "*.min.js", "dist/", "build/" ] # Only lint Python files include = ["*.py", "*.pyi"]

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/Nexus-Digital-Automations/crawl4ai-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server