Code Knowledge MCP Server

Directory structure: └── modelcontextprotocol-python-sdk/ ├── README.md ├── CLAUDE.md ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── RELEASE.md ├── SECURITY.md ├── pyproject.toml ├── uv.lock ├── .git-blame-ignore-revs ├── .pre-commit-config.yaml ├── .python-version ├── examples/ │ ├── README.md │ ├── clients/ │ │ └── simple-chatbot/ │ │ ├── README.MD │ │ ├── pyproject.toml │ │ ├── uv.lock │ │ ├── .python-version │ │ └── mcp_simple_chatbot/ │ │ ├── main.py │ │ ├── requirements.txt │ │ ├── servers_config.json │ │ ├── test.db │ │ └── .env.example │ ├── fastmcp/ │ │ ├── complex_inputs.py │ │ ├── desktop.py │ │ ├── echo.py │ │ ├── memory.py │ │ ├── parameter_descriptions.py │ │ ├── readme-quickstart.py │ │ ├── screenshot.py │ │ ├── simple_echo.py │ │ ├── text_me.py │ │ └── unicode_example.py │ └── servers/ │ ├── simple-prompt/ │ │ ├── README.md │ │ ├── pyproject.toml │ │ ├── .python-version │ │ └── mcp_simple_prompt/ │ │ ├── __init__.py │ │ ├── __main__.py │ │ └── server.py │ ├── simple-resource/ │ │ ├── README.md │ │ ├── pyproject.toml │ │ ├── .python-version │ │ └── mcp_simple_resource/ │ │ ├── __init__.py │ │ ├── __main__.py │ │ └── server.py │ └── simple-tool/ │ ├── README.md │ ├── pyproject.toml │ ├── .python-version │ └── mcp_simple_tool/ │ ├── __init__.py │ ├── __main__.py │ └── server.py ├── src/ │ └── mcp/ │ ├── __init__.py │ ├── py.typed │ ├── types.py │ ├── cli/ │ │ ├── __init__.py │ │ ├── claude.py │ │ └── cli.py │ ├── client/ │ │ ├── __init__.py │ │ ├── __main__.py │ │ ├── session.py │ │ ├── sse.py │ │ └── stdio.py │ ├── server/ │ │ ├── __init__.py │ │ ├── __main__.py │ │ ├── models.py │ │ ├── session.py │ │ ├── sse.py │ │ ├── stdio.py │ │ ├── websocket.py │ │ ├── fastmcp/ │ │ │ ├── __init__.py │ │ │ ├── exceptions.py │ │ │ ├── server.py │ │ │ ├── prompts/ │ │ │ │ ├── __init__.py │ │ │ │ ├── base.py │ │ │ │ ├── manager.py │ │ │ │ └── prompt_manager.py │ │ │ ├── resources/ │ │ │ │ ├── __init__.py │ │ │ │ ├── base.py │ │ │ │ ├── resource_manager.py │ │ │ │ ├── templates.py │ │ │ │ └── types.py │ │ │ ├── tools/ │ │ │ │ ├── __init__.py │ │ │ │ ├── base.py │ │ │ │ └── tool_manager.py │ │ │ └── utilities/ │ │ │ ├── __init__.py │ │ │ ├── func_metadata.py │ │ │ ├── logging.py │ │ │ └── types.py │ │ └── lowlevel/ │ │ ├── __init__.py │ │ ├── helper_types.py │ │ └── server.py │ └── shared/ │ ├── __init__.py │ ├── context.py │ ├── exceptions.py │ ├── memory.py │ ├── progress.py │ ├── session.py │ └── version.py ├── tests/ │ ├── __init__.py │ ├── conftest.py │ ├── test_examples.py │ ├── test_types.py │ ├── client/ │ │ ├── __init__.py │ │ ├── test_config.py │ │ ├── test_session.py │ │ └── test_stdio.py │ ├── issues/ │ │ ├── test_100_tool_listing.py │ │ ├── test_129_resource_templates.py │ │ ├── test_141_resource_templates.py │ │ ├── test_152_resource_mime_type.py │ │ ├── test_176_progress_token.py │ │ ├── test_188_concurrency.py │ │ ├── test_192_request_id.py │ │ └── test_88_random_error.py │ ├── server/ │ │ ├── __init__.py │ │ ├── test_lifespan.py │ │ ├── test_read_resource.py │ │ ├── test_session.py │ │ ├── test_stdio.py │ │ └── fastmcp/ │ │ ├── __init__.py │ │ ├── test_func_metadata.py │ │ ├── test_parameter_descriptions.py │ │ ├── test_server.py │ │ ├── test_tool_manager.py │ │ ├── prompts/ │ │ │ ├── __init__.py │ │ │ ├── test_base.py │ │ │ └── test_manager.py │ │ ├── resources/ │ │ │ ├── __init__.py │ │ │ ├── test_file_resources.py │ │ │ ├── test_function_resources.py │ │ │ ├── test_resource_manager.py │ │ │ ├── test_resource_template.py │ │ │ └── test_resources.py │ │ └── servers/ │ │ ├── __init__.py │ │ └── test_file_server.py │ └── shared/ │ ├── test_memory.py │ ├── test_session.py │ └── test_sse.py └── .github/ ├── ISSUE_TEMPLATE/ │ ├── bug_report.md │ └── feature_request.md └── workflows/ ├── check-lock.yml ├── main-checks.yml ├── publish-pypi.yml ├── pull-request-checks.yml └── shared.yml ================================================ File: README.md ================================================ # MCP Python SDK <div align="center"> <strong>Python implementation of the Model Context Protocol (MCP)</strong> [![PyPI][pypi-badge]][pypi-url] [![MIT licensed][mit-badge]][mit-url] [![Python Version][python-badge]][python-url] [![Documentation][docs-badge]][docs-url] [![Specification][spec-badge]][spec-url] [![GitHub Discussions][discussions-badge]][discussions-url] </div> <!-- omit in toc --> ## Table of Contents - [Overview](#overview) - [Installation](#installation) - [Quickstart](#quickstart) - [What is MCP?](#what-is-mcp) - [Core Concepts](#core-concepts) - [Server](#server) - [Resources](#resources) - [Tools](#tools) - [Prompts](#prompts) - [Images](#images) - [Context](#context) - [Running Your Server](#running-your-server) - [Development Mode](#development-mode) - [Claude Desktop Integration](#claude-desktop-integration) - [Direct Execution](#direct-execution) - [Examples](#examples) - [Echo Server](#echo-server) - [SQLite Explorer](#sqlite-explorer) - [Advanced Usage](#advanced-usage) - [Low-Level Server](#low-level-server) - [Writing MCP Clients](#writing-mcp-clients) - [MCP Primitives](#mcp-primitives) - [Server Capabilities](#server-capabilities) - [Documentation](#documentation) - [Contributing](#contributing) - [License](#license) [pypi-badge]: https://img.shields.io/pypi/v/mcp.svg [pypi-url]: https://pypi.org/project/mcp/ [mit-badge]: https://img.shields.io/pypi/l/mcp.svg [mit-url]: https://github.com/modelcontextprotocol/python-sdk/blob/main/LICENSE [python-badge]: https://img.shields.io/pypi/pyversions/mcp.svg [python-url]: https://www.python.org/downloads/ [docs-badge]: https://img.shields.io/badge/docs-modelcontextprotocol.io-blue.svg [docs-url]: https://modelcontextprotocol.io [spec-badge]: https://img.shields.io/badge/spec-spec.modelcontextprotocol.io-blue.svg [spec-url]: https://spec.modelcontextprotocol.io [discussions-badge]: https://img.shields.io/github/discussions/modelcontextprotocol/python-sdk [discussions-url]: https://github.com/modelcontextprotocol/python-sdk/discussions ## Overview The Model Context Protocol allows applications to provide context for LLMs in a standardized way, separating the concerns of providing context from the actual LLM interaction. This Python SDK implements the full MCP specification, making it easy to: - Build MCP clients that can connect to any MCP server - Create MCP servers that expose resources, prompts and tools - Use standard transports like stdio and SSE - Handle all MCP protocol messages and lifecycle events ## Installation We recommend using [uv](https://docs.astral.sh/uv/) to manage your Python projects: ```bash uv add "mcp[cli]" ``` Alternatively: ```bash pip install mcp ``` ## Quickstart Let's create a simple MCP server that exposes a calculator tool and some data: ```python # server.py from mcp.server.fastmcp import FastMCP # Create an MCP server mcp = FastMCP("Demo") # Add an addition tool @mcp.tool() def add(a: int, b: int) -> int: """Add two numbers""" return a + b # Add a dynamic greeting resource @mcp.resource("greeting://{name}") def get_greeting(name: str) -> str: """Get a personalized greeting""" return f"Hello, {name}!" ``` You can install this server in [Claude Desktop](https://claude.ai/download) and interact with it right away by running: ```bash mcp install server.py ``` Alternatively, you can test it with the MCP Inspector: ```bash mcp dev server.py ``` ## What is MCP? The [Model Context Protocol (MCP)](https://modelcontextprotocol.io) lets you build servers that expose data and functionality to LLM applications in a secure, standardized way. Think of it like a web API, but specifically designed for LLM interactions. MCP servers can: - Expose data through **Resources** (think of these sort of like GET endpoints; they are used to load information into the LLM's context) - Provide functionality through **Tools** (sort of like POST endpoints; they are used to execute code or otherwise produce a side effect) - Define interaction patterns through **Prompts** (reusable templates for LLM interactions) - And more! ## Core Concepts ### Server The FastMCP server is your core interface to the MCP protocol. It handles connection management, protocol compliance, and message routing: ```python # Add lifespan support for startup/shutdown with strong typing from dataclasses import dataclass from typing import AsyncIterator from mcp.server.fastmcp import FastMCP # Create a named server mcp = FastMCP("My App") # Specify dependencies for deployment and development mcp = FastMCP("My App", dependencies=["pandas", "numpy"]) @dataclass class AppContext: db: Database # Replace with your actual DB type @asynccontextmanager async def app_lifespan(server: FastMCP) -> AsyncIterator[AppContext]: """Manage application lifecycle with type-safe context""" try: # Initialize on startup await db.connect() yield AppContext(db=db) finally: # Cleanup on shutdown await db.disconnect() # Pass lifespan to server mcp = FastMCP("My App", lifespan=app_lifespan) # Access type-safe lifespan context in tools @mcp.tool() def query_db(ctx: Context) -> str: """Tool that uses initialized resources""" db = ctx.request_context.lifespan_context["db"] return db.query() ``` ### Resources Resources are how you expose data to LLMs. They're similar to GET endpoints in a REST API - they provide data but shouldn't perform significant computation or have side effects: ```python @mcp.resource("config://app") def get_config() -> str: """Static configuration data""" return "App configuration here" @mcp.resource("users://{user_id}/profile") def get_user_profile(user_id: str) -> str: """Dynamic user data""" return f"Profile data for user {user_id}" ``` ### Tools Tools let LLMs take actions through your server. Unlike resources, tools are expected to perform computation and have side effects: ```python @mcp.tool() def calculate_bmi(weight_kg: float, height_m: float) -> float: """Calculate BMI given weight in kg and height in meters""" return weight_kg / (height_m ** 2) @mcp.tool() async def fetch_weather(city: str) -> str: """Fetch current weather for a city""" async with httpx.AsyncClient() as client: response = await client.get(f"https://api.weather.com/{city}") return response.text ``` ### Prompts Prompts are reusable templates that help LLMs interact with your server effectively: ```python @mcp.prompt() def review_code(code: str) -> str: return f"Please review this code:\n\n{code}" @mcp.prompt() def debug_error(error: str) -> list[Message]: return [ UserMessage("I'm seeing this error:"), UserMessage(error), AssistantMessage("I'll help debug that. What have you tried so far?") ] ``` ### Images FastMCP provides an `Image` class that automatically handles image data: ```python from mcp.server.fastmcp import FastMCP, Image from PIL import Image as PILImage @mcp.tool() def create_thumbnail(image_path: str) -> Image: """Create a thumbnail from an image""" img = PILImage.open(image_path) img.thumbnail((100, 100)) return Image(data=img.tobytes(), format="png") ``` ### Context The Context object gives your tools and resources access to MCP capabilities: ```python from mcp.server.fastmcp import FastMCP, Context @mcp.tool() async def long_task(files: list[str], ctx: Context) -> str: """Process multiple files with progress tracking""" for i, file in enumerate(files): ctx.info(f"Processing {file}") await ctx.report_progress(i, len(files)) data, mime_type = await ctx.read_resource(f"file://{file}") return "Processing complete" ``` ## Running Your Server ### Development Mode The fastest way to test and debug your server is with the MCP Inspector: ```bash mcp dev server.py # Add dependencies mcp dev server.py --with pandas --with numpy # Mount local code mcp dev server.py --with-editable . ``` ### Claude Desktop Integration Once your server is ready, install it in Claude Desktop: ```bash mcp install server.py # Custom name mcp install server.py --name "My Analytics Server" # Environment variables mcp install server.py -v API_KEY=abc123 -v DB_URL=postgres://... mcp install server.py -f .env ``` ### Direct Execution For advanced scenarios like custom deployments: ```python from mcp.server.fastmcp import FastMCP mcp = FastMCP("My App") if __name__ == "__main__": mcp.run() ``` Run it with: ```bash python server.py # or mcp run server.py ``` ## Examples ### Echo Server A simple server demonstrating resources, tools, and prompts: ```python from mcp.server.fastmcp import FastMCP mcp = FastMCP("Echo") @mcp.resource("echo://{message}") def echo_resource(message: str) -> str: """Echo a message as a resource""" return f"Resource echo: {message}" @mcp.tool() def echo_tool(message: str) -> str: """Echo a message as a tool""" return f"Tool echo: {message}" @mcp.prompt() def echo_prompt(message: str) -> str: """Create an echo prompt""" return f"Please process this message: {message}" ``` ### SQLite Explorer A more complex example showing database integration: ```python from mcp.server.fastmcp import FastMCP import sqlite3 mcp = FastMCP("SQLite Explorer") @mcp.resource("schema://main") def get_schema() -> str: """Provide the database schema as a resource""" conn = sqlite3.connect("database.db") schema = conn.execute( "SELECT sql FROM sqlite_master WHERE type='table'" ).fetchall() return "\n".join(sql[0] for sql in schema if sql[0]) @mcp.tool() def query_data(sql: str) -> str: """Execute SQL queries safely""" conn = sqlite3.connect("database.db") try: result = conn.execute(sql).fetchall() return "\n".join(str(row) for row in result) except Exception as e: return f"Error: {str(e)}" ``` ## Advanced Usage ### Low-Level Server For more control, you can use the low-level server implementation directly. This gives you full access to the protocol and allows you to customize every aspect of your server, including lifecycle management through the lifespan API: ```python from contextlib import asynccontextmanager from typing import AsyncIterator @asynccontextmanager async def server_lifespan(server: Server) -> AsyncIterator[dict]: """Manage server startup and shutdown lifecycle.""" try: # Initialize resources on startup await db.connect() yield {"db": db} finally: # Clean up on shutdown await db.disconnect() # Pass lifespan to server server = Server("example-server", lifespan=server_lifespan) # Access lifespan context in handlers @server.call_tool() async def query_db(name: str, arguments: dict) -> list: ctx = server.request_context db = ctx.lifespan_context["db"] return await db.query(arguments["query"]) ``` The lifespan API provides: - A way to initialize resources when the server starts and clean them up when it stops - Access to initialized resources through the request context in handlers - Type-safe context passing between lifespan and request handlers ```python from mcp.server.lowlevel import Server, NotificationOptions from mcp.server.models import InitializationOptions import mcp.server.stdio import mcp.types as types # Create a server instance server = Server("example-server") @server.list_prompts() async def handle_list_prompts() -> list[types.Prompt]: return [ types.Prompt( name="example-prompt", description="An example prompt template", arguments=[ types.PromptArgument( name="arg1", description="Example argument", required=True ) ] ) ] @server.get_prompt() async def handle_get_prompt( name: str, arguments: dict[str, str] | None ) -> types.GetPromptResult: if name != "example-prompt": raise ValueError(f"Unknown prompt: {name}") return types.GetPromptResult( description="Example prompt", messages=[ types.PromptMessage( role="user", content=types.TextContent( type="text", text="Example prompt text" ) ) ] ) async def run(): async with mcp.server.stdio.stdio_server() as (read_stream, write_stream): await server.run( read_stream, write_stream, InitializationOptions( server_name="example", server_version="0.1.0", capabilities=server.get_capabilities( notification_options=NotificationOptions(), experimental_capabilities={}, ) ) ) if __name__ == "__main__": import asyncio asyncio.run(run()) ``` ### Writing MCP Clients The SDK provides a high-level client interface for connecting to MCP servers: ```python from mcp import ClientSession, StdioServerParameters from mcp.client.stdio import stdio_client # Create server parameters for stdio connection server_params = StdioServerParameters( command="python", # Executable args=["example_server.py"], # Optional command line arguments env=None # Optional environment variables ) async def run(): async with stdio_client(server_params) as (read, write): async with ClientSession(read, write) as session: # Initialize the connection await session.initialize() # List available prompts prompts = await session.list_prompts() # Get a prompt prompt = await session.get_prompt("example-prompt", arguments={"arg1": "value"}) # List available resources resources = await session.list_resources() # List available tools tools = await session.list_tools() # Read a resource content, mime_type = await session.read_resource("file://some/path") # Call a tool result = await session.call_tool("tool-name", arguments={"arg1": "value"}) if __name__ == "__main__": import asyncio asyncio.run(run()) ``` ### MCP Primitives The MCP protocol defines three core primitives that servers can implement: | Primitive | Control | Description | Example Use | |-----------|-----------------------|-----------------------------------------------------|------------------------------| | Prompts | User-controlled | Interactive templates invoked by user choice | Slash commands, menu options | | Resources | Application-controlled| Contextual data managed by the client application | File contents, API responses | | Tools | Model-controlled | Functions exposed to the LLM to take actions | API calls, data updates | ### Server Capabilities MCP servers declare capabilities during initialization: | Capability | Feature Flag | Description | |-------------|------------------------------|------------------------------------| | `prompts` | `listChanged` | Prompt template management | | `resources` | `subscribe`<br/>`listChanged`| Resource exposure and updates | | `tools` | `listChanged` | Tool discovery and execution | | `logging` | - | Server logging configuration | | `completion`| - | Argument completion suggestions | ## Documentation - [Model Context Protocol documentation](https://modelcontextprotocol.io) - [Model Context Protocol specification](https://spec.modelcontextprotocol.io) - [Officially supported servers](https://github.com/modelcontextprotocol/servers) ## Contributing We are passionate about supporting contributors of all levels of experience and would love to see you get involved in the project. See the [contributing guide](CONTRIBUTING.md) to get started. ## License This project is licensed under the MIT License - see the LICENSE file for details. ================================================ File: CLAUDE.md ================================================ # Development Guidelines This document contains critical information about working with this codebase. Follow these guidelines precisely. ## Core Development Rules 1. Package Management - ONLY use uv, NEVER pip - Installation: `uv add package` - Running tools: `uv run tool` - Upgrading: `uv add --dev package --upgrade-package package` - FORBIDDEN: `uv pip install`, `@latest` syntax 2. Code Quality - Type hints required for all code - Public APIs must have docstrings - Functions must be focused and small - Follow existing patterns exactly - Line length: 88 chars maximum 3. Testing Requirements - Framework: `uv run pytest` - Async testing: use anyio, not asyncio - Coverage: test edge cases and errors - New features require tests - Bug fixes require regression tests - For commits fixing bugs or adding features based on user reports add: ```bash git commit --trailer "Reported-by:<name>" ``` Where `<name>` is the name of the user. - For commits related to a Github issue, add ```bash git commit --trailer "Github-Issue:#<number>" ``` - NEVER ever mention a `co-authored-by` or similar aspects. In particular, never mention the tool used to create the commit message or PR. ## Pull Requests - Create a detailed message of what changed. Focus on the high level description of the problem it tries to solve, and how it is solved. Don't go into the specifics of the code unless it adds clarity. - Always add `jerome3o-anthropic` and `jspahrsummers` as reviewer. - NEVER ever mention a `co-authored-by` or similar aspects. In particular, never mention the tool used to create the commit message or PR. ## Python Tools ## Code Formatting 1. Ruff - Format: `uv run ruff format .` - Check: `uv run ruff check .` - Fix: `uv run ruff check . --fix` - Critical issues: - Line length (88 chars) - Import sorting (I001) - Unused imports - Line wrapping: - Strings: use parentheses - Function calls: multi-line with proper indent - Imports: split into multiple lines 2. Type Checking - Tool: `uv run pyright` - Requirements: - Explicit None checks for Optional - Type narrowing for strings - Version warnings can be ignored if checks pass 3. Pre-commit - Config: `.pre-commit-config.yaml` - Runs: on git commit - Tools: Prettier (YAML/JSON), Ruff (Python) - Ruff updates: - Check PyPI versions - Update config rev - Commit config first ## Error Resolution 1. CI Failures - Fix order: 1. Formatting 2. Type errors 3. Linting - Type errors: - Get full line context - Check Optional types - Add type narrowing - Verify function signatures 2. Common Issues - Line length: - Break strings with parentheses - Multi-line function calls - Split imports - Types: - Add None checks - Narrow string types - Match existing patterns 3. Best Practices - Check git status before commits - Run formatters before type checks - Keep changes minimal - Follow existing patterns - Document public APIs - Test thoroughly ================================================ File: CODE_OF_CONDUCT.md ================================================ # Contributor Covenant Code of Conduct ## Our Pledge We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community. ## Our Standards Examples of behavior that contributes to a positive environment for our community include: * Demonstrating empathy and kindness toward other people * Being respectful of differing opinions, viewpoints, and experiences * Giving and gracefully accepting constructive feedback * Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience * Focusing on what is best not just for us as individuals, but for the overall community Examples of unacceptable behavior include: * The use of sexualized language or imagery, and sexual attention or advances of any kind * Trolling, insulting or derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or email address, without their explicit permission * Other conduct which could reasonably be considered inappropriate in a professional setting ## Enforcement Responsibilities Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful. Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate. ## Scope This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at mcp-coc@anthropic.com. All complaints will be reviewed and investigated promptly and fairly. All community leaders are obligated to respect the privacy and security of the reporter of any incident. ## Enforcement Guidelines Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct: ### 1. Correction **Community Impact**: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community. **Consequence**: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested. ### 2. Warning **Community Impact**: A violation through a single incident or series of actions. **Consequence**: A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban. ### 3. Temporary Ban **Community Impact**: A serious violation of community standards, including sustained inappropriate behavior. **Consequence**: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban. ### 4. Permanent Ban **Community Impact**: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals. **Consequence**: A permanent ban from any sort of public interaction within the community. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 2.0, available at https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. Community Impact Guidelines were inspired by [Mozilla's code of conduct enforcement ladder](https://github.com/mozilla/diversity). [homepage]: https://www.contributor-covenant.org For answers to common questions about this code of conduct, see the FAQ at https://www.contributor-covenant.org/faq. Translations are available at https://www.contributor-covenant.org/translations. ================================================ File: CONTRIBUTING.md ================================================ # Contributing Thank you for your interest in contributing to the MCP Python SDK! This document provides guidelines and instructions for contributing. ## Development Setup 1. Make sure you have Python 3.10+ installed 2. Install [uv](https://docs.astral.sh/uv/getting-started/installation/) 3. Fork the repository 4. Clone your fork: `git clone https://github.com/YOUR-USERNAME/python-sdk.git` 5. Install dependencies: ```bash uv sync --frozen --all-extras --dev ``` ## Development Workflow 1. Choose the correct branch for your changes: - For bug fixes to a released version: use the latest release branch (e.g. v1.1.x for 1.1.3) - For new features: use the main branch (which will become the next minor/major version) - If unsure, ask in an issue first 2. Create a new branch from your chosen base branch 3. Make your changes 4. Ensure tests pass: ```bash uv run pytest ``` 5. Run type checking: ```bash uv run pyright ``` 6. Run linting: ```bash uv run ruff check . uv run ruff format . ``` 7. Submit a pull request to the same branch you branched from ## Code Style - We use `ruff` for linting and formatting - Follow PEP 8 style guidelines - Add type hints to all functions - Include docstrings for public APIs ## Pull Request Process 1. Update documentation as needed 2. Add tests for new functionality 3. Ensure CI passes 4. Maintainers will review your code 5. Address review feedback ## Code of Conduct Please note that this project is released with a [Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms. ## License By contributing, you agree that your contributions will be licensed under the MIT License. ================================================ File: LICENSE ================================================ MIT License Copyright (c) 2024 Anthropic, PBC Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ File: RELEASE.md ================================================ # Release Process ## Bumping Dependencies 1. Change dependency 2. Upgrade lock with `uv lock --resolution lowest-direct ## Major or Minor Release 1. Create a release branch named `vX.Y.Z` where `X.Y.Z` is the version. 2. Bump version number on release branch. 3. Create an annotated, signed tag: `git tag -s -a vX.Y.Z` 4. Create a github release using `gh release create` and publish it. 5. Have the release flow being reviewed. 7. Bump version number on `main` to the next version followed by `.dev`, e.g. `v0.4.0.dev`. ================================================ File: SECURITY.md ================================================ # Security Policy Thank you for helping us keep the SDKs and systems they interact with secure. ## Reporting Security Issues This SDK is maintained by [Anthropic](https://www.anthropic.com/) as part of the Model Context Protocol project. The security of our systems and user data is Anthropic’s top priority. We appreciate the work of security researchers acting in good faith in identifying and reporting potential vulnerabilities. Our security program is managed on HackerOne and we ask that any validated vulnerability in this functionality be reported through their [submission form](https://hackerone.com/anthropic-vdp/reports/new?type=team&report_type=vulnerability). ## Vulnerability Disclosure Program Our Vulnerability Program Guidelines are defined on our [HackerOne program page](https://hackerone.com/anthropic-vdp). ================================================ File: pyproject.toml ================================================ [project] name = "mcp" version = "1.3.0.dev0" description = "Model Context Protocol SDK" readme = "README.md" requires-python = ">=3.10" authors = [{ name = "Anthropic, PBC." }] maintainers = [ { name = "David Soria Parra", email = "davidsp@anthropic.com" }, { name = "Justin Spahr-Summers", email = "justin@anthropic.com" }, ] keywords = ["git", "mcp", "llm", "automation"] license = { text = "MIT" } classifiers = [ "Development Status :: 4 - Beta", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", ] dependencies = [ "anyio>=4.5", "httpx>=0.27", "httpx-sse>=0.4", "pydantic>=2.7.2,<3.0.0", "starlette>=0.27", "sse-starlette>=1.6.1", "pydantic-settings>=2.5.2", "uvicorn>=0.23.1", ] [project.optional-dependencies] rich = ["rich>=13.9.4"] cli = ["typer>=0.12.4", "python-dotenv>=1.0.0"] [project.scripts] mcp = "mcp.cli:app [cli]" [tool.uv] resolution = "lowest-direct" dev-dependencies = [ "pyright>=1.1.391", "pytest>=8.3.4", "ruff>=0.8.5", "trio>=0.26.2", "pytest-flakefinder>=1.1.0", "pytest-xdist>=3.6.1", ] [build-system] requires = ["hatchling"] build-backend = "hatchling.build" [project.urls] Homepage = "https://modelcontextprotocol.io" Repository = "https://github.com/modelcontextprotocol/python-sdk" Issues = "https://github.com/modelcontextprotocol/python-sdk/issues" [tool.hatch.build.targets.wheel] packages = ["src/mcp"] [tool.pyright] include = ["src/mcp", "tests"] venvPath = "." venv = ".venv" [tool.ruff.lint] select = ["E", "F", "I"] ignore = [] [tool.ruff] line-length = 88 target-version = "py310" [tool.ruff.lint.per-file-ignores] "__init__.py" = ["F401"] "tests/server/fastmcp/test_func_metadata.py" = ["E501"] [tool.uv.workspace] members = ["examples/servers/*"] [tool.uv.sources] mcp = { workspace = true } ================================================ File: .pre-commit-config.yaml ================================================ fail_fast: true repos: - repo: https://github.com/pre-commit/mirrors-prettier rev: v3.1.0 hooks: - id: prettier types_or: [yaml, json5] - repo: https://github.com/astral-sh/ruff-pre-commit rev: v0.8.1 hooks: - id: ruff-format - id: ruff args: [--fix, --exit-non-zero-on-fix] - repo: local hooks: - id: uv-lock-check name: Check uv.lock is up to date entry: uv lock --check language: system files: ^(pyproject\.toml|uv\.lock)$ pass_filenames: false ================================================ File: .python-version ================================================ 3.10 ================================================ File: examples/README.md ================================================ # Python SDK Examples This folders aims to provide simple examples of using the Python SDK. Please refer to the [servers repository](https://github.com/modelcontextprotocol/servers) for real-world servers. ================================================ File: examples/clients/simple-chatbot/README.MD ================================================ # MCP Simple Chatbot This example demonstrates how to integrate the Model Context Protocol (MCP) into a simple CLI chatbot. The implementation showcases MCP's flexibility by supporting multiple tools through MCP servers and is compatible with any LLM provider that follows OpenAI API standards. ## Requirements - Python 3.10 - `python-dotenv` - `requests` - `mcp` - `uvicorn` ## Installation 1. **Install the dependencies:** ```bash pip install -r requirements.txt ``` 2. **Set up environment variables:** Create a `.env` file in the root directory and add your API key: ```plaintext LLM_API_KEY=your_api_key_here ``` 3. **Configure servers:** The `servers_config.json` follows the same structure as Claude Desktop, allowing for easy integration of multiple servers. Here's an example: ```json { "mcpServers": { "sqlite": { "command": "uvx", "args": ["mcp-server-sqlite", "--db-path", "./test.db"] }, "puppeteer": { "command": "npx", "args": ["-y", "@modelcontextprotocol/server-puppeteer"] } } } ``` Environment variables are supported as well. Pass them as you would with the Claude Desktop App. Example: ```json { "mcpServers": { "server_name": { "command": "uvx", "args": ["mcp-server-name", "--additional-args"], "env": { "API_KEY": "your_api_key_here" } } } } ``` ## Usage 1. **Run the client:** ```bash python main.py ``` 2. **Interact with the assistant:** The assistant will automatically detect available tools and can respond to queries based on the tools provided by the configured servers. 3. **Exit the session:** Type `quit` or `exit` to end the session. ## Architecture - **Tool Discovery**: Tools are automatically discovered from configured servers. - **System Prompt**: Tools are dynamically included in the system prompt, allowing the LLM to understand available capabilities. - **Server Integration**: Supports any MCP-compatible server, tested with various server implementations including Uvicorn and Node.js. ### Class Structure - **Configuration**: Manages environment variables and server configurations - **Server**: Handles MCP server initialization, tool discovery, and execution - **Tool**: Represents individual tools with their properties and formatting - **LLMClient**: Manages communication with the LLM provider - **ChatSession**: Orchestrates the interaction between user, LLM, and tools ### Logic Flow 1. **Tool Integration**: - Tools are dynamically discovered from MCP servers - Tool descriptions are automatically included in system prompt - Tool execution is handled through standardized MCP protocol 2. **Runtime Flow**: - User input is received - Input is sent to LLM with context of available tools - LLM response is parsed: - If it's a tool call → execute tool and return result - If it's a direct response → return to user - Tool results are sent back to LLM for interpretation - Final response is presented to user ================================================ File: examples/clients/simple-chatbot/pyproject.toml ================================================ [project] name = "mcp-simple-chatbot" version = "0.1.0" description = "A simple CLI chatbot using the Model Context Protocol (MCP)" readme = "README.md" requires-python = ">=3.10" authors = [{ name = "Edoardo Cilia" }] keywords = ["mcp", "llm", "chatbot", "cli"] license = { text = "MIT" } classifiers = [ "Development Status :: 4 - Beta", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", ] dependencies = [ "python-dotenv>=1.0.0", "requests>=2.31.0", "mcp>=1.0.0", "uvicorn>=0.32.1" ] [project.scripts] mcp-simple-chatbot = "mcp_simple_chatbot.client:main" [build-system] requires = ["hatchling"] build-backend = "hatchling.build" [tool.hatch.build.targets.wheel] packages = ["mcp_simple_chatbot"] [tool.pyright] include = ["mcp_simple_chatbot"] venvPath = "." venv = ".venv" [tool.ruff.lint] select = ["E", "F", "I"] ignore = [] [tool.ruff] line-length = 88 target-version = "py310" [tool.uv] dev-dependencies = ["pyright>=1.1.379", "pytest>=8.3.3", "ruff>=0.6.9"] ================================================ File: examples/clients/simple-chatbot/.python-version ================================================ 3.10 ================================================ File: examples/clients/simple-chatbot/mcp_simple_chatbot/main.py ================================================ import asyncio import json import logging import os import shutil from contextlib import AsyncExitStack from typing import Any import httpx from dotenv import load_dotenv from mcp import ClientSession, StdioServerParameters from mcp.client.stdio import stdio_client # Configure logging logging.basicConfig( level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s" ) class Configuration: """Manages configuration and environment variables for the MCP client.""" def __init__(self) -> None: """Initialize configuration with environment variables.""" self.load_env() self.api_key = os.getenv("GROQ_API_KEY") @staticmethod def load_env() -> None: """Load environment variables from .env file.""" load_dotenv() @staticmethod def load_config(file_path: str) -> dict[str, Any]: """Load server configuration from JSON file. Args: file_path: Path to the JSON configuration file. Returns: Dict containing server configuration. Raises: FileNotFoundError: If configuration file doesn't exist. JSONDecodeError: If configuration file is invalid JSON. """ with open(file_path, "r") as f: return json.load(f) @property def llm_api_key(self) -> str: """Get the LLM API key. Returns: The API key as a string. Raises: ValueError: If the API key is not found in environment variables. """ if not self.api_key: raise ValueError("LLM_API_KEY not found in environment variables") return self.api_key class Server: """Manages MCP server connections and tool execution.""" def __init__(self, name: str, config: dict[str, Any]) -> None: self.name: str = name self.config: dict[str, Any] = config self.stdio_context: Any | None = None self.session: ClientSession | None = None self._cleanup_lock: asyncio.Lock = asyncio.Lock() self.exit_stack: AsyncExitStack = AsyncExitStack() async def initialize(self) -> None: """Initialize the server connection.""" command = ( shutil.which("npx") if self.config["command"] == "npx" else self.config["command"] ) if command is None: raise ValueError("The command must be a valid string and cannot be None.") server_params = StdioServerParameters( command=command, args=self.config["args"], env={**os.environ, **self.config["env"]} if self.config.get("env") else None, ) try: stdio_transport = await self.exit_stack.enter_async_context( stdio_client(server_params) ) read, write = stdio_transport session = await self.exit_stack.enter_async_context( ClientSession(read, write) ) await session.initialize() self.session = session except Exception as e: logging.error(f"Error initializing server {self.name}: {e}") await self.cleanup() raise async def list_tools(self) -> list[Any]: """List available tools from the server. Returns: A list of available tools. Raises: RuntimeError: If the server is not initialized. """ if not self.session: raise RuntimeError(f"Server {self.name} not initialized") tools_response = await self.session.list_tools() tools = [] for item in tools_response: if isinstance(item, tuple) and item[0] == "tools": for tool in item[1]: tools.append(Tool(tool.name, tool.description, tool.inputSchema)) return tools async def execute_tool( self, tool_name: str, arguments: dict[str, Any], retries: int = 2, delay: float = 1.0, ) -> Any: """Execute a tool with retry mechanism. Args: tool_name: Name of the tool to execute. arguments: Tool arguments. retries: Number of retry attempts. delay: Delay between retries in seconds. Returns: Tool execution result. Raises: RuntimeError: If server is not initialized. Exception: If tool execution fails after all retries. """ if not self.session: raise RuntimeError(f"Server {self.name} not initialized") attempt = 0 while attempt < retries: try: logging.info(f"Executing {tool_name}...") result = await self.session.call_tool(tool_name, arguments) return result except Exception as e: attempt += 1 logging.warning( f"Error executing tool: {e}. Attempt {attempt} of {retries}." ) if attempt < retries: logging.info(f"Retrying in {delay} seconds...") await asyncio.sleep(delay) else: logging.error("Max retries reached. Failing.") raise async def cleanup(self) -> None: """Clean up server resources.""" async with self._cleanup_lock: try: await self.exit_stack.aclose() self.session = None self.stdio_context = None except Exception as e: logging.error(f"Error during cleanup of server {self.name}: {e}") class Tool: """Represents a tool with its properties and formatting.""" def __init__( self, name: str, description: str, input_schema: dict[str, Any] ) -> None: self.name: str = name self.description: str = description self.input_schema: dict[str, Any] = input_schema def format_for_llm(self) -> str: """Format tool information for LLM. Returns: A formatted string describing the tool. """ args_desc = [] if "properties" in self.input_schema: for param_name, param_info in self.input_schema["properties"].items(): arg_desc = ( f"- {param_name}: {param_info.get('description', 'No description')}" ) if param_name in self.input_schema.get("required", []): arg_desc += " (required)" args_desc.append(arg_desc) return f""" Tool: {self.name} Description: {self.description} Arguments: {chr(10).join(args_desc)} """ class LLMClient: """Manages communication with the LLM provider.""" def __init__(self, api_key: str) -> None: self.api_key: str = api_key def get_response(self, messages: list[dict[str, str]]) -> str: """Get a response from the LLM. Args: messages: A list of message dictionaries. Returns: The LLM's response as a string. Raises: httpx.RequestError: If the request to the LLM fails. """ url = "https://api.groq.com/openai/v1/chat/completions" headers = { "Content-Type": "application/json", "Authorization": f"Bearer {self.api_key}", } payload = { "messages": messages, "model": "llama-3.2-90b-vision-preview", "temperature": 0.7, "max_tokens": 4096, "top_p": 1, "stream": False, "stop": None, } try: with httpx.Client() as client: response = client.post(url, headers=headers, json=payload) response.raise_for_status() data = response.json() return data["choices"][0]["message"]["content"] except httpx.RequestError as e: error_message = f"Error getting LLM response: {str(e)}" logging.error(error_message) if isinstance(e, httpx.HTTPStatusError): status_code = e.response.status_code logging.error(f"Status code: {status_code}") logging.error(f"Response details: {e.response.text}") return ( f"I encountered an error: {error_message}. " "Please try again or rephrase your request." ) class ChatSession: """Orchestrates the interaction between user, LLM, and tools.""" def __init__(self, servers: list[Server], llm_client: LLMClient) -> None: self.servers: list[Server] = servers self.llm_client: LLMClient = llm_client async def cleanup_servers(self) -> None: """Clean up all servers properly.""" cleanup_tasks = [] for server in self.servers: cleanup_tasks.append(asyncio.create_task(server.cleanup())) if cleanup_tasks: try: await asyncio.gather(*cleanup_tasks, return_exceptions=True) except Exception as e: logging.warning(f"Warning during final cleanup: {e}") async def process_llm_response(self, llm_response: str) -> str: """Process the LLM response and execute tools if needed. Args: llm_response: The response from the LLM. Returns: The result of tool execution or the original response. """ import json try: tool_call = json.loads(llm_response) if "tool" in tool_call and "arguments" in tool_call: logging.info(f"Executing tool: {tool_call['tool']}") logging.info(f"With arguments: {tool_call['arguments']}") for server in self.servers: tools = await server.list_tools() if any(tool.name == tool_call["tool"] for tool in tools): try: result = await server.execute_tool( tool_call["tool"], tool_call["arguments"] ) if isinstance(result, dict) and "progress" in result: progress = result["progress"] total = result["total"] percentage = (progress / total) * 100 logging.info( f"Progress: {progress}/{total} " f"({percentage:.1f}%)" ) return f"Tool execution result: {result}" except Exception as e: error_msg = f"Error executing tool: {str(e)}" logging.error(error_msg) return error_msg return f"No server found with tool: {tool_call['tool']}" return llm_response except json.JSONDecodeError: return llm_response async def start(self) -> None: """Main chat session handler.""" try: for server in self.servers: try: await server.initialize() except Exception as e: logging.error(f"Failed to initialize server: {e}") await self.cleanup_servers() return all_tools = [] for server in self.servers: tools = await server.list_tools() all_tools.extend(tools) tools_description = "\n".join([tool.format_for_llm() for tool in all_tools]) system_message = ( "You are a helpful assistant with access to these tools:\n\n" f"{tools_description}\n" "Choose the appropriate tool based on the user's question. " "If no tool is needed, reply directly.\n\n" "IMPORTANT: When you need to use a tool, you must ONLY respond with " "the exact JSON object format below, nothing else:\n" "{\n" ' "tool": "tool-name",\n' ' "arguments": {\n' ' "argument-name": "value"\n' " }\n" "}\n\n" "After receiving a tool's response:\n" "1. Transform the raw data into a natural, conversational response\n" "2. Keep responses concise but informative\n" "3. Focus on the most relevant information\n" "4. Use appropriate context from the user's question\n" "5. Avoid simply repeating the raw data\n\n" "Please use only the tools that are explicitly defined above." ) messages = [{"role": "system", "content": system_message}] while True: try: user_input = input("You: ").strip().lower() if user_input in ["quit", "exit"]: logging.info("\nExiting...") break messages.append({"role": "user", "content": user_input}) llm_response = self.llm_client.get_response(messages) logging.info("\nAssistant: %s", llm_response) result = await self.process_llm_response(llm_response) if result != llm_response: messages.append({"role": "assistant", "content": llm_response}) messages.append({"role": "system", "content": result}) final_response = self.llm_client.get_response(messages) logging.info("\nFinal response: %s", final_response) messages.append( {"role": "assistant", "content": final_response} ) else: messages.append({"role": "assistant", "content": llm_response}) except KeyboardInterrupt: logging.info("\nExiting...") break finally: await self.cleanup_servers() async def main() -> None: """Initialize and run the chat session.""" config = Configuration() server_config = config.load_config("servers_config.json") servers = [ Server(name, srv_config) for name, srv_config in server_config["mcpServers"].items() ] llm_client = LLMClient(config.llm_api_key) chat_session = ChatSession(servers, llm_client) await chat_session.start() if __name__ == "__main__": asyncio.run(main()) ================================================ File: examples/clients/simple-chatbot/mcp_simple_chatbot/requirements.txt ================================================ python-dotenv>=1.0.0 requests>=2.31.0 mcp>=1.0.0 uvicorn>=0.32.1 ================================================ File: examples/clients/simple-chatbot/mcp_simple_chatbot/servers_config.json ================================================ { "mcpServers": { "sqlite": { "command": "uvx", "args": ["mcp-server-sqlite", "--db-path", "./test.db"] }, "puppeteer": { "command": "npx", "args": ["-y", "@modelcontextprotocol/server-puppeteer"] } } } ================================================ File: examples/clients/simple-chatbot/mcp_simple_chatbot/.env.example ================================================ GROQ_API_KEY=gsk_1234567890 ================================================ File: examples/fastmcp/complex_inputs.py ================================================ """ FastMCP Complex inputs Example Demonstrates validation via pydantic with complex models. """ from typing import Annotated from pydantic import BaseModel, Field from mcp.server.fastmcp import FastMCP mcp = FastMCP("Shrimp Tank") class ShrimpTank(BaseModel): class Shrimp(BaseModel): name: Annotated[str, Field(max_length=10)] shrimp: list[Shrimp] @mcp.tool() def name_shrimp( tank: ShrimpTank, # You can use pydantic Field in function signatures for validation. extra_names: Annotated[list[str], Field(max_length=10)], ) -> list[str]: """List all shrimp names in the tank""" return [shrimp.name for shrimp in tank.shrimp] + extra_names ================================================ File: examples/fastmcp/desktop.py ================================================ """ FastMCP Desktop Example A simple example that exposes the desktop directory as a resource. """ from pathlib import Path from mcp.server.fastmcp import FastMCP # Create server mcp = FastMCP("Demo") @mcp.resource("dir://desktop") def desktop() -> list[str]: """List the files in the user's desktop""" desktop = Path.home() / "Desktop" return [str(f) for f in desktop.iterdir()] @mcp.tool() def add(a: int, b: int) -> int: """Add two numbers""" return a + b ================================================ File: examples/fastmcp/echo.py ================================================ """ FastMCP Echo Server """ from mcp.server.fastmcp import FastMCP # Create server mcp = FastMCP("Echo Server") @mcp.tool() def echo_tool(text: str) -> str: """Echo the input text""" return text @mcp.resource("echo://static") def echo_resource() -> str: return "Echo!" @mcp.resource("echo://{text}") def echo_template(text: str) -> str: """Echo the input text""" return f"Echo: {text}" @mcp.prompt("echo") def echo_prompt(text: str) -> str: return text ================================================ File: examples/fastmcp/memory.py ================================================ # /// script # dependencies = ["pydantic-ai-slim[openai]", "asyncpg", "numpy", "pgvector"] # /// # uv pip install 'pydantic-ai-slim[openai]' asyncpg numpy pgvector """ Recursive memory system inspired by the human brain's clustering of memories. Uses OpenAI's 'text-embedding-3-small' model and pgvector for efficient similarity search. """ import asyncio import math import os from dataclasses import dataclass from datetime import datetime, timezone from pathlib import Path from typing import Annotated, Self import asyncpg import numpy as np from openai import AsyncOpenAI from pgvector.asyncpg import register_vector # Import register_vector from pydantic import BaseModel, Field from pydantic_ai import Agent from mcp.server.fastmcp import FastMCP MAX_DEPTH = 5 SIMILARITY_THRESHOLD = 0.7 DECAY_FACTOR = 0.99 REINFORCEMENT_FACTOR = 1.1 DEFAULT_LLM_MODEL = "openai:gpt-4o" DEFAULT_EMBEDDING_MODEL = "text-embedding-3-small" mcp = FastMCP( "memory", dependencies=[ "pydantic-ai-slim[openai]", "asyncpg", "numpy", "pgvector", ], ) DB_DSN = "postgresql://postgres:postgres@localhost:54320/memory_db" # reset memory with rm ~/.fastmcp/{USER}/memory/* PROFILE_DIR = ( Path.home() / ".fastmcp" / os.environ.get("USER", "anon") / "memory" ).resolve() PROFILE_DIR.mkdir(parents=True, exist_ok=True) def cosine_similarity(a: list[float], b: list[float]) -> float: a_array = np.array(a, dtype=np.float64) b_array = np.array(b, dtype=np.float64) return np.dot(a_array, b_array) / ( np.linalg.norm(a_array) * np.linalg.norm(b_array) ) async def do_ai[T]( user_prompt: str, system_prompt: str, result_type: type[T] | Annotated, deps=None, ) -> T: agent = Agent( DEFAULT_LLM_MODEL, system_prompt=system_prompt, result_type=result_type, ) result = await agent.run(user_prompt, deps=deps) return result.data @dataclass class Deps: openai: AsyncOpenAI pool: asyncpg.Pool async def get_db_pool() -> asyncpg.Pool: async def init(conn): await conn.execute("CREATE EXTENSION IF NOT EXISTS vector;") await register_vector(conn) pool = await asyncpg.create_pool(DB_DSN, init=init) return pool class MemoryNode(BaseModel): id: int | None = None content: str summary: str = "" importance: float = 1.0 access_count: int = 0 timestamp: float = Field( default_factory=lambda: datetime.now(timezone.utc).timestamp() ) embedding: list[float] @classmethod async def from_content(cls, content: str, deps: Deps): embedding = await get_embedding(content, deps) return cls(content=content, embedding=embedding) async def save(self, deps: Deps): async with deps.pool.acquire() as conn: if self.id is None: result = await conn.fetchrow( """ INSERT INTO memories (content, summary, importance, access_count, timestamp, embedding) VALUES ($1, $2, $3, $4, $5, $6) RETURNING id """, self.content, self.summary, self.importance, self.access_count, self.timestamp, self.embedding, ) self.id = result["id"] else: await conn.execute( """ UPDATE memories SET content = $1, summary = $2, importance = $3, access_count = $4, timestamp = $5, embedding = $6 WHERE id = $7 """, self.content, self.summary, self.importance, self.access_count, self.timestamp, self.embedding, self.id, ) async def merge_with(self, other: Self, deps: Deps): self.content = await do_ai( f"{self.content}\n\n{other.content}", "Combine the following two texts into a single, coherent text.", str, deps, ) self.importance += other.importance self.access_count += other.access_count self.embedding = [(a + b) / 2 for a, b in zip(self.embedding, other.embedding)] self.summary = await do_ai( self.content, "Summarize the following text concisely.", str, deps ) await self.save(deps) # Delete the merged node from the database if other.id is not None: await delete_memory(other.id, deps) def get_effective_importance(self): return self.importance * (1 + math.log(self.access_count + 1)) async def get_embedding(text: str, deps: Deps) -> list[float]: embedding_response = await deps.openai.embeddings.create( input=text, model=DEFAULT_EMBEDDING_MODEL, ) return embedding_response.data[0].embedding async def delete_memory(memory_id: int, deps: Deps): async with deps.pool.acquire() as conn: await conn.execute("DELETE FROM memories WHERE id = $1", memory_id) async def add_memory(content: str, deps: Deps): new_memory = await MemoryNode.from_content(content, deps) await new_memory.save(deps) similar_memories = await find_similar_memories(new_memory.embedding, deps) for memory in similar_memories: if memory.id != new_memory.id: await new_memory.merge_with(memory, deps) await update_importance(new_memory.embedding, deps) await prune_memories(deps) return f"Remembered: {content}" async def find_similar_memories(embedding: list[float], deps: Deps) -> list[MemoryNode]: async with deps.pool.acquire() as conn: rows = await conn.fetch( """ SELECT id, content, summary, importance, access_count, timestamp, embedding FROM memories ORDER BY embedding <-> $1 LIMIT 5 """, embedding, ) memories = [ MemoryNode( id=row["id"], content=row["content"], summary=row["summary"], importance=row["importance"], access_count=row["access_count"], timestamp=row["timestamp"], embedding=row["embedding"], ) for row in rows ] return memories async def update_importance(user_embedding: list[float], deps: Deps): async with deps.pool.acquire() as conn: rows = await conn.fetch( "SELECT id, importance, access_count, embedding FROM memories" ) for row in rows: memory_embedding = row["embedding"] similarity = cosine_similarity(user_embedding, memory_embedding) if similarity > SIMILARITY_THRESHOLD: new_importance = row["importance"] * REINFORCEMENT_FACTOR new_access_count = row["access_count"] + 1 else: new_importance = row["importance"] * DECAY_FACTOR new_access_count = row["access_count"] await conn.execute( """ UPDATE memories SET importance = $1, access_count = $2 WHERE id = $3 """, new_importance, new_access_count, row["id"], ) async def prune_memories(deps: Deps): async with deps.pool.acquire() as conn: rows = await conn.fetch( """ SELECT id, importance, access_count FROM memories ORDER BY importance DESC OFFSET $1 """, MAX_DEPTH, ) for row in rows: await conn.execute("DELETE FROM memories WHERE id = $1", row["id"]) async def display_memory_tree(deps: Deps) -> str: async with deps.pool.acquire() as conn: rows = await conn.fetch( """ SELECT content, summary, importance, access_count FROM memories ORDER BY importance DESC LIMIT $1 """, MAX_DEPTH, ) result = "" for row in rows: effective_importance = row["importance"] * ( 1 + math.log(row["access_count"] + 1) ) summary = row["summary"] or row["content"] result += f"- {summary} (Importance: {effective_importance:.2f})\n" return result @mcp.tool() async def remember( contents: list[str] = Field( description="List of observations or memories to store" ), ): deps = Deps(openai=AsyncOpenAI(), pool=await get_db_pool()) try: return "\n".join( await asyncio.gather(*[add_memory(content, deps) for content in contents]) ) finally: await deps.pool.close() @mcp.tool() async def read_profile() -> str: deps = Deps(openai=AsyncOpenAI(), pool=await get_db_pool()) profile = await display_memory_tree(deps) await deps.pool.close() return profile async def initialize_database(): pool = await asyncpg.create_pool( "postgresql://postgres:postgres@localhost:54320/postgres" ) try: async with pool.acquire() as conn: await conn.execute(""" SELECT pg_terminate_backend(pg_stat_activity.pid) FROM pg_stat_activity WHERE pg_stat_activity.datname = 'memory_db' AND pid <> pg_backend_pid(); """) await conn.execute("DROP DATABASE IF EXISTS memory_db;") await conn.execute("CREATE DATABASE memory_db;") finally: await pool.close() pool = await asyncpg.create_pool(DB_DSN) try: async with pool.acquire() as conn: await conn.execute("CREATE EXTENSION IF NOT EXISTS vector;") await register_vector(conn) await conn.execute(""" CREATE TABLE IF NOT EXISTS memories ( id SERIAL PRIMARY KEY, content TEXT NOT NULL, summary TEXT, importance REAL NOT NULL, access_count INT NOT NULL, timestamp DOUBLE PRECISION NOT NULL, embedding vector(1536) NOT NULL ); CREATE INDEX IF NOT EXISTS idx_memories_embedding ON memories USING hnsw (embedding vector_l2_ops); """) finally: await pool.close() if __name__ == "__main__": asyncio.run(initialize_database()) ================================================ File: examples/fastmcp/parameter_descriptions.py ================================================ """ FastMCP Example showing parameter descriptions """ from pydantic import Field from mcp.server.fastmcp import FastMCP # Create server mcp = FastMCP("Parameter Descriptions Server") @mcp.tool() def greet_user( name: str = Field(description="The name of the person to greet"), title: str = Field(description="Optional title like Mr/Ms/Dr", default=""), times: int = Field(description="Number of times to repeat the greeting", default=1), ) -> str: """Greet a user with optional title and repetition""" greeting = f"Hello {title + ' ' if title else ''}{name}!" return "\n".join([greeting] * times) ================================================ File: examples/fastmcp/readme-quickstart.py ================================================ from mcp.server.fastmcp import FastMCP # Create an MCP server mcp = FastMCP("Demo") # Add an addition tool @mcp.tool() def add(a: int, b: int) -> int: """Add two numbers""" return a + b # Add a dynamic greeting resource @mcp.resource("greeting://{name}") def get_greeting(name: str) -> str: """Get a personalized greeting""" return f"Hello, {name}!" ================================================ File: examples/fastmcp/screenshot.py ================================================ """ FastMCP Screenshot Example Give Claude a tool to capture and view screenshots. """ import io from mcp.server.fastmcp import FastMCP from mcp.server.fastmcp.utilities.types import Image # Create server mcp = FastMCP("Screenshot Demo", dependencies=["pyautogui", "Pillow"]) @mcp.tool() def take_screenshot() -> Image: """ Take a screenshot of the user's screen and return it as an image. Use this tool anytime the user wants you to look at something they're doing. """ import pyautogui buffer = io.BytesIO() # if the file exceeds ~1MB, it will be rejected by Claude screenshot = pyautogui.screenshot() screenshot.convert("RGB").save(buffer, format="JPEG", quality=60, optimize=True) return Image(data=buffer.getvalue(), format="jpeg") ================================================ File: examples/fastmcp/simple_echo.py ================================================ """ FastMCP Echo Server """ from mcp.server.fastmcp import FastMCP # Create server mcp = FastMCP("Echo Server") @mcp.tool() def echo(text: str) -> str: """Echo the input text""" return text ================================================ File: examples/fastmcp/text_me.py ================================================ # /// script # dependencies = [] # /// """ FastMCP Text Me Server -------------------------------- This defines a simple FastMCP server that sends a text message to a phone number via https://surgemsg.com/. To run this example, create a `.env` file with the following values: SURGE_API_KEY=... SURGE_ACCOUNT_ID=... SURGE_MY_PHONE_NUMBER=... SURGE_MY_FIRST_NAME=... SURGE_MY_LAST_NAME=... Visit https://surgemsg.com/ and click "Get Started" to obtain these values. """ from typing import Annotated import httpx from pydantic import BeforeValidator from pydantic_settings import BaseSettings, SettingsConfigDict from mcp.server.fastmcp import FastMCP class SurgeSettings(BaseSettings): model_config: SettingsConfigDict = SettingsConfigDict( env_prefix="SURGE_", env_file=".env" ) api_key: str account_id: str my_phone_number: Annotated[ str, BeforeValidator(lambda v: "+" + v if not v.startswith("+") else v) ] my_first_name: str my_last_name: str # Create server mcp = FastMCP("Text me") surge_settings = SurgeSettings() # type: ignore @mcp.tool(name="textme", description="Send a text message to me") def text_me(text_content: str) -> str: """Send a text message to a phone number via https://surgemsg.com/""" with httpx.Client() as client: response = client.post( "https://api.surgemsg.com/messages", headers={ "Authorization": f"Bearer {surge_settings.api_key}", "Surge-Account": surge_settings.account_id, "Content-Type": "application/json", }, json={ "body": text_content, "conversation": { "contact": { "first_name": surge_settings.my_first_name, "last_name": surge_settings.my_last_name, "phone_number": surge_settings.my_phone_number, } }, }, ) response.raise_for_status() return f"Message sent: {text_content}" ================================================ File: examples/fastmcp/unicode_example.py ================================================ """ Example FastMCP server that uses Unicode characters in various places to help test Unicode handling in tools and inspectors. """ from mcp.server.fastmcp import FastMCP mcp = FastMCP() @mcp.tool( description="🌟 A tool that uses various Unicode characters in its description: " "á é í ó ú ñ 漢字 🎉" ) def hello_unicode(name: str = "世界", greeting: str = "¡Hola") -> str: """ A simple tool that demonstrates Unicode handling in: - Tool description (emojis, accents, CJK characters) - Parameter defaults (CJK characters) - Return values (Spanish punctuation, emojis) """ return f"{greeting}, {name}! 👋" @mcp.tool(description="🎨 Tool that returns a list of emoji categories") def list_emoji_categories() -> list[str]: """Returns a list of emoji categories with emoji examples.""" return [ "😀 Smileys & Emotion", "👋 People & Body", "🐶 Animals & Nature", "🍎 Food & Drink", "⚽ Activities", "🌍 Travel & Places", "💡 Objects", "❤️ Symbols", "🚩 Flags", ] @mcp.tool(description="🔤 Tool that returns text in different scripts") def multilingual_hello() -> str: """Returns hello in different scripts and writing systems.""" return "\n".join( [ "English: Hello!", "Spanish: ¡Hola!", "French: Bonjour!", "German: Grüß Gott!", "Russian: Привет!", "Greek: Γεια σας!", "Hebrew: !שָׁלוֹם", "Arabic: !مرحبا", "Hindi: नमस्ते!", "Chinese: 你好!", "Japanese: こんにちは!", "Korean: 안녕하세요!", "Thai: สวัสดี!", ] ) if __name__ == "__main__": mcp.run() ================================================ File: examples/servers/simple-prompt/README.md ================================================ # MCP Simple Prompt A simple MCP server that exposes a customizable prompt template with optional context and topic parameters. ## Usage Start the server using either stdio (default) or SSE transport: ```bash # Using stdio transport (default) uv run mcp-simple-prompt # Using SSE transport on custom port uv run mcp-simple-prompt --transport sse --port 8000 ``` The server exposes a prompt named "simple" that accepts two optional arguments: - `context`: Additional context to consider - `topic`: Specific topic to focus on ## Example Using the MCP client, you can retrieve the prompt like this using the STDIO transport: ```python import asyncio from mcp.client.session import ClientSession from mcp.client.stdio import StdioServerParameters, stdio_client async def main(): async with stdio_client( StdioServerParameters(command="uv", args=["run", "mcp-simple-prompt"]) ) as (read, write): async with ClientSession(read, write) as session: await session.initialize() # List available prompts prompts = await session.list_prompts() print(prompts) # Get the prompt with arguments prompt = await session.get_prompt( "simple", { "context": "User is a software developer", "topic": "Python async programming", }, ) print(prompt) asyncio.run(main()) ``` ================================================ File: examples/servers/simple-prompt/pyproject.toml ================================================ [project] name = "mcp-simple-prompt" version = "0.1.0" description = "A simple MCP server exposing a customizable prompt" readme = "README.md" requires-python = ">=3.10" authors = [{ name = "Anthropic, PBC." }] maintainers = [ { name = "David Soria Parra", email = "davidsp@anthropic.com" }, { name = "Justin Spahr-Summers", email = "justin@anthropic.com" }, ] keywords = ["mcp", "llm", "automation", "web", "fetch"] license = { text = "MIT" } classifiers = [ "Development Status :: 4 - Beta", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", ] dependencies = ["anyio>=4.5", "click>=8.1.0", "httpx>=0.27", "mcp"] [project.scripts] mcp-simple-prompt = "mcp_simple_prompt.server:main" [build-system] requires = ["hatchling"] build-backend = "hatchling.build" [tool.hatch.build.targets.wheel] packages = ["mcp_simple_prompt"] [tool.pyright] include = ["mcp_simple_prompt"] venvPath = "." venv = ".venv" [tool.ruff.lint] select = ["E", "F", "I"] ignore = [] [tool.ruff] line-length = 88 target-version = "py310" [tool.uv] dev-dependencies = ["pyright>=1.1.378", "pytest>=8.3.3", "ruff>=0.6.9"] ================================================ File: examples/servers/simple-prompt/.python-version ================================================ 3.10 ================================================ File: examples/servers/simple-prompt/mcp_simple_prompt/__init__.py ================================================ ================================================ File: examples/servers/simple-prompt/mcp_simple_prompt/__main__.py ================================================ import sys from .server import main sys.exit(main()) ================================================ File: examples/servers/simple-prompt/mcp_simple_prompt/server.py ================================================ import anyio import click import mcp.types as types from mcp.server.lowlevel import Server def create_messages( context: str | None = None, topic: str | None = None ) -> list[types.PromptMessage]: """Create the messages for the prompt.""" messages = [] # Add context if provided if context: messages.append( types.PromptMessage( role="user", content=types.TextContent( type="text", text=f"Here is some relevant context: {context}" ), ) ) # Add the main prompt prompt = "Please help me with " if topic: prompt += f"the following topic: {topic}" else: prompt += "whatever questions I may have." messages.append( types.PromptMessage( role="user", content=types.TextContent(type="text", text=prompt) ) ) return messages @click.command() @click.option("--port", default=8000, help="Port to listen on for SSE") @click.option( "--transport", type=click.Choice(["stdio", "sse"]), default="stdio", help="Transport type", ) def main(port: int, transport: str) -> int: app = Server("mcp-simple-prompt") @app.list_prompts() async def list_prompts() -> list[types.Prompt]: return [ types.Prompt( name="simple", description="A simple prompt that can take optional context and topic " "arguments", arguments=[ types.PromptArgument( name="context", description="Additional context to consider", required=False, ), types.PromptArgument( name="topic", description="Specific topic to focus on", required=False, ), ], ) ] @app.get_prompt() async def get_prompt( name: str, arguments: dict[str, str] | None = None ) -> types.GetPromptResult: if name != "simple": raise ValueError(f"Unknown prompt: {name}") if arguments is None: arguments = {} return types.GetPromptResult( messages=create_messages( context=arguments.get("context"), topic=arguments.get("topic") ), description="A simple prompt with optional context and topic arguments", ) if transport == "sse": from mcp.server.sse import SseServerTransport from starlette.applications import Starlette from starlette.routing import Mount, Route sse = SseServerTransport("/messages/") async def handle_sse(request): async with sse.connect_sse( request.scope, request.receive, request._send ) as streams: await app.run( streams[0], streams[1], app.create_initialization_options() ) starlette_app = Starlette( debug=True, routes=[ Route("/sse", endpoint=handle_sse), Mount("/messages/", app=sse.handle_post_message), ], ) import uvicorn uvicorn.run(starlette_app, host="0.0.0.0", port=port) else: from mcp.server.stdio import stdio_server async def arun(): async with stdio_server() as streams: await app.run( streams[0], streams[1], app.create_initialization_options() ) anyio.run(arun) return 0 ================================================ File: examples/servers/simple-resource/README.md ================================================ # MCP Simple Resource A simple MCP server that exposes sample text files as resources. ## Usage Start the server using either stdio (default) or SSE transport: ```bash # Using stdio transport (default) uv run mcp-simple-resource # Using SSE transport on custom port uv run mcp-simple-resource --transport sse --port 8000 ``` The server exposes some basic text file resources that can be read by clients. ## Example Using the MCP client, you can retrieve resources like this using the STDIO transport: ```python import asyncio from mcp.types import AnyUrl from mcp.client.session import ClientSession from mcp.client.stdio import StdioServerParameters, stdio_client async def main(): async with stdio_client( StdioServerParameters(command="uv", args=["run", "mcp-simple-resource"]) ) as (read, write): async with ClientSession(read, write) as session: await session.initialize() # List available resources resources = await session.list_resources() print(resources) # Get a specific resource resource = await session.read_resource(AnyUrl("file:///greeting.txt")) print(resource) asyncio.run(main()) ``` ================================================ File: examples/servers/simple-resource/pyproject.toml ================================================ [project] name = "mcp-simple-resource" version = "0.1.0" description = "A simple MCP server exposing sample text resources" readme = "README.md" requires-python = ">=3.10" authors = [{ name = "Anthropic, PBC." }] maintainers = [ { name = "David Soria Parra", email = "davidsp@anthropic.com" }, { name = "Justin Spahr-Summers", email = "justin@anthropic.com" }, ] keywords = ["mcp", "llm", "automation", "web", "fetch"] license = { text = "MIT" } classifiers = [ "Development Status :: 4 - Beta", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", ] dependencies = ["anyio>=4.5", "click>=8.1.0", "httpx>=0.27", "mcp"] [project.scripts] mcp-simple-resource = "mcp_simple_resource.server:main" [build-system] requires = ["hatchling"] build-backend = "hatchling.build" [tool.hatch.build.targets.wheel] packages = ["mcp_simple_resource"] [tool.pyright] include = ["mcp_simple_resource"] venvPath = "." venv = ".venv" [tool.ruff.lint] select = ["E", "F", "I"] ignore = [] [tool.ruff] line-length = 88 target-version = "py310" [tool.uv] dev-dependencies = ["pyright>=1.1.378", "pytest>=8.3.3", "ruff>=0.6.9"] ================================================ File: examples/servers/simple-resource/.python-version ================================================ 3.10 ================================================ File: examples/servers/simple-resource/mcp_simple_resource/__init__.py ================================================ ================================================ File: examples/servers/simple-resource/mcp_simple_resource/__main__.py ================================================ import sys from server import main sys.exit(main()) ================================================ File: examples/servers/simple-resource/mcp_simple_resource/server.py ================================================ import anyio import click import mcp.types as types from mcp.server.lowlevel import Server from pydantic import FileUrl SAMPLE_RESOURCES = { "greeting": "Hello! This is a sample text resource.", "help": "This server provides a few sample text resources for testing.", "about": "This is the simple-resource MCP server implementation.", } @click.command() @click.option("--port", default=8000, help="Port to listen on for SSE") @click.option( "--transport", type=click.Choice(["stdio", "sse"]), default="stdio", help="Transport type", ) def main(port: int, transport: str) -> int: app = Server("mcp-simple-resource") @app.list_resources() async def list_resources() -> list[types.Resource]: return [ types.Resource( uri=FileUrl(f"file:///{name}.txt"), name=name, description=f"A sample text resource named {name}", mimeType="text/plain", ) for name in SAMPLE_RESOURCES.keys() ] @app.read_resource() async def read_resource(uri: FileUrl) -> str | bytes: name = uri.path.replace(".txt", "").lstrip("/") if name not in SAMPLE_RESOURCES: raise ValueError(f"Unknown resource: {uri}") return SAMPLE_RESOURCES[name] if transport == "sse": from mcp.server.sse import SseServerTransport from starlette.applications import Starlette from starlette.routing import Mount, Route sse = SseServerTransport("/messages/") async def handle_sse(request): async with sse.connect_sse( request.scope, request.receive, request._send ) as streams: await app.run( streams[0], streams[1], app.create_initialization_options() ) starlette_app = Starlette( debug=True, routes=[ Route("/sse", endpoint=handle_sse), Mount("/messages/", app=sse.handle_post_message), ], ) import uvicorn uvicorn.run(starlette_app, host="0.0.0.0", port=port) else: from mcp.server.stdio import stdio_server async def arun(): async with stdio_server() as streams: await app.run( streams[0], streams[1], app.create_initialization_options() ) anyio.run(arun) return 0 ================================================ File: examples/servers/simple-tool/README.md ================================================ A simple MCP server that exposes a website fetching tool. ## Usage Start the server using either stdio (default) or SSE transport: ```bash # Using stdio transport (default) uv run mcp-simple-tool # Using SSE transport on custom port uv run mcp-simple-tool --transport sse --port 8000 ``` The server exposes a tool named "fetch" that accepts one required argument: - `url`: The URL of the website to fetch ## Example Using the MCP client, you can use the tool like this using the STDIO transport: ```python import asyncio from mcp.client.session import ClientSession from mcp.client.stdio import StdioServerParameters, stdio_client async def main(): async with stdio_client( StdioServerParameters(command="uv", args=["run", "mcp-simple-tool"]) ) as (read, write): async with ClientSession(read, write) as session: await session.initialize() # List available tools tools = await session.list_tools() print(tools) # Call the fetch tool result = await session.call_tool("fetch", {"url": "https://example.com"}) print(result) asyncio.run(main()) ``` ================================================ File: examples/servers/simple-tool/pyproject.toml ================================================ [project] name = "mcp-simple-tool" version = "0.1.0" description = "A simple MCP server exposing a website fetching tool" readme = "README.md" requires-python = ">=3.10" authors = [{ name = "Anthropic, PBC." }] maintainers = [ { name = "David Soria Parra", email = "davidsp@anthropic.com" }, { name = "Justin Spahr-Summers", email = "justin@anthropic.com" }, ] keywords = ["mcp", "llm", "automation", "web", "fetch"] license = { text = "MIT" } classifiers = [ "Development Status :: 4 - Beta", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", ] dependencies = ["anyio>=4.5", "click>=8.1.0", "httpx>=0.27", "mcp"] [project.scripts] mcp-simple-tool = "mcp_simple_tool.server:main" [build-system] requires = ["hatchling"] build-backend = "hatchling.build" [tool.hatch.build.targets.wheel] packages = ["mcp_simple_tool"] [tool.pyright] include = ["mcp_simple_tool"] venvPath = "." venv = ".venv" [tool.ruff.lint] select = ["E", "F", "I"] ignore = [] [tool.ruff] line-length = 88 target-version = "py310" [tool.uv] dev-dependencies = ["pyright>=1.1.378", "pytest>=8.3.3", "ruff>=0.6.9"] ================================================ File: examples/servers/simple-tool/.python-version ================================================ 3.10 ================================================ File: examples/servers/simple-tool/mcp_simple_tool/__init__.py ================================================ ================================================ File: examples/servers/simple-tool/mcp_simple_tool/__main__.py ================================================ import sys from server import main sys.exit(main()) ================================================ File: examples/servers/simple-tool/mcp_simple_tool/server.py ================================================ import anyio import click import httpx import mcp.types as types from mcp.server.lowlevel import Server async def fetch_website( url: str, ) -> list[types.TextContent | types.ImageContent | types.EmbeddedResource]: headers = { "User-Agent": "MCP Test Server (github.com/modelcontextprotocol/python-sdk)" } async with httpx.AsyncClient(follow_redirects=True, headers=headers) as client: response = await client.get(url) response.raise_for_status() return [types.TextContent(type="text", text=response.text)] @click.command() @click.option("--port", default=8000, help="Port to listen on for SSE") @click.option( "--transport", type=click.Choice(["stdio", "sse"]), default="stdio", help="Transport type", ) def main(port: int, transport: str) -> int: app = Server("mcp-website-fetcher") @app.call_tool() async def fetch_tool( name: str, arguments: dict ) -> list[types.TextContent | types.ImageContent | types.EmbeddedResource]: if name != "fetch": raise ValueError(f"Unknown tool: {name}") if "url" not in arguments: raise ValueError("Missing required argument 'url'") return await fetch_website(arguments["url"]) @app.list_tools() async def list_tools() -> list[types.Tool]: return [ types.Tool( name="fetch", description="Fetches a website and returns its content", inputSchema={ "type": "object", "required": ["url"], "properties": { "url": { "type": "string", "description": "URL to fetch", } }, }, ) ] if transport == "sse": from mcp.server.sse import SseServerTransport from starlette.applications import Starlette from starlette.routing import Mount, Route sse = SseServerTransport("/messages/") async def handle_sse(request): async with sse.connect_sse( request.scope, request.receive, request._send ) as streams: await app.run( streams[0], streams[1], app.create_initialization_options() ) starlette_app = Starlette( debug=True, routes=[ Route("/sse", endpoint=handle_sse), Mount("/messages/", app=sse.handle_post_message), ], ) import uvicorn uvicorn.run(starlette_app, host="0.0.0.0", port=port) else: from mcp.server.stdio import stdio_server async def arun(): async with stdio_server() as streams: await app.run( streams[0], streams[1], app.create_initialization_options() ) anyio.run(arun) return 0 ================================================ File: src/mcp/__init__.py ================================================ from .client.session import ClientSession from .client.stdio import StdioServerParameters, stdio_client from .server.session import ServerSession from .server.stdio import stdio_server from .shared.exceptions import McpError from .types import ( CallToolRequest, ClientCapabilities, ClientNotification, ClientRequest, ClientResult, CompleteRequest, CreateMessageRequest, CreateMessageResult, ErrorData, GetPromptRequest, GetPromptResult, Implementation, IncludeContext, InitializedNotification, InitializeRequest, InitializeResult, JSONRPCError, JSONRPCRequest, JSONRPCResponse, ListPromptsRequest, ListPromptsResult, ListResourcesRequest, ListResourcesResult, ListToolsResult, LoggingLevel, LoggingMessageNotification, Notification, PingRequest, ProgressNotification, PromptsCapability, ReadResourceRequest, ReadResourceResult, Resource, ResourcesCapability, ResourceUpdatedNotification, RootsCapability, SamplingMessage, ServerCapabilities, ServerNotification, ServerRequest, ServerResult, SetLevelRequest, StopReason, SubscribeRequest, Tool, ToolsCapability, UnsubscribeRequest, ) from .types import ( Role as SamplingRole, ) __all__ = [ "CallToolRequest", "ClientCapabilities", "ClientNotification", "ClientRequest", "ClientResult", "ClientSession", "CreateMessageRequest", "CreateMessageResult", "ErrorData", "GetPromptRequest", "GetPromptResult", "Implementation", "IncludeContext", "InitializeRequest", "InitializeResult", "InitializedNotification", "JSONRPCError", "JSONRPCRequest", "ListPromptsRequest", "ListPromptsResult", "ListResourcesRequest", "ListResourcesResult", "ListToolsResult", "LoggingLevel", "LoggingMessageNotification", "McpError", "Notification", "PingRequest", "ProgressNotification", "PromptsCapability", "ReadResourceRequest", "ReadResourceResult", "ResourcesCapability", "ResourceUpdatedNotification", "Resource", "RootsCapability", "SamplingMessage", "SamplingRole", "ServerCapabilities", "ServerNotification", "ServerRequest", "ServerResult", "ServerSession", "SetLevelRequest", "StdioServerParameters", "StopReason", "SubscribeRequest", "Tool", "ToolsCapability", "UnsubscribeRequest", "stdio_client", "stdio_server", "CompleteRequest", "JSONRPCResponse", ] ================================================ File: src/mcp/types.py ================================================ from typing import ( Annotated, Any, Callable, Generic, Literal, TypeAlias, TypeVar, ) from pydantic import BaseModel, ConfigDict, Field, FileUrl, RootModel from pydantic.networks import AnyUrl, UrlConstraints """ Model Context Protocol bindings for Python These bindings were generated from https://github.com/modelcontextprotocol/specification, using Claude, with a prompt something like the following: Generate idiomatic Python bindings for this schema for MCP, or the "Model Context Protocol." The schema is defined in TypeScript, but there's also a JSON Schema version for reference. * For the bindings, let's use Pydantic V2 models. * Each model should allow extra fields everywhere, by specifying `model_config = ConfigDict(extra='allow')`. Do this in every case, instead of a custom base class. * Union types should be represented with a Pydantic `RootModel`. * Define additional model classes instead of using dictionaries. Do this even if they're not separate types in the schema. """ LATEST_PROTOCOL_VERSION = "2024-11-05" ProgressToken = str | int Cursor = str Role = Literal["user", "assistant"] RequestId = str | int AnyFunction: TypeAlias = Callable[..., Any] class RequestParams(BaseModel): class Meta(BaseModel): progressToken: ProgressToken | None = None """ If specified, the caller is requesting out-of-band progress notifications for this request (as represented by notifications/progress). The value of this parameter is an opaque token that will be attached to any subsequent notifications. The receiver is not obligated to provide these notifications. """ model_config = ConfigDict(extra="allow") meta: Meta | None = Field(alias="_meta", default=None) class NotificationParams(BaseModel): class Meta(BaseModel): model_config = ConfigDict(extra="allow") meta: Meta | None = Field(alias="_meta", default=None) """ This parameter name is reserved by MCP to allow clients and servers to attach additional metadata to their notifications. """ RequestParamsT = TypeVar("RequestParamsT", bound=RequestParams) NotificationParamsT = TypeVar("NotificationParamsT", bound=NotificationParams) MethodT = TypeVar("MethodT", bound=str) class Request(BaseModel, Generic[RequestParamsT, MethodT]): """Base class for JSON-RPC requests.""" method: MethodT params: RequestParamsT model_config = ConfigDict(extra="allow") class PaginatedRequest(Request[RequestParamsT, MethodT]): cursor: Cursor | None = None """ An opaque token representing the current pagination position. If provided, the server should return results starting after this cursor. """ class Notification(BaseModel, Generic[NotificationParamsT, MethodT]): """Base class for JSON-RPC notifications.""" method: MethodT model_config = ConfigDict(extra="allow") class Result(BaseModel): """Base class for JSON-RPC results.""" model_config = ConfigDict(extra="allow") meta: dict[str, Any] | None = Field(alias="_meta", default=None) """ This result property is reserved by the protocol to allow clients and servers to attach additional metadata to their responses. """ class PaginatedResult(Result): nextCursor: Cursor | None = None """ An opaque token representing the pagination position after the last returned result. If present, there may be more results available. """ class JSONRPCRequest(Request): """A request that expects a response.""" jsonrpc: Literal["2.0"] id: RequestId params: dict[str, Any] | None = None class JSONRPCNotification(Notification): """A notification which does not expect a response.""" jsonrpc: Literal["2.0"] params: dict[str, Any] | None = None class JSONRPCResponse(BaseModel): """A successful (non-error) response to a request.""" jsonrpc: Literal["2.0"] id: RequestId result: dict[str, Any] model_config = ConfigDict(extra="allow") # Standard JSON-RPC error codes PARSE_ERROR = -32700 INVALID_REQUEST = -32600 METHOD_NOT_FOUND = -32601 INVALID_PARAMS = -32602 INTERNAL_ERROR = -32603 class ErrorData(BaseModel): """Error information for JSON-RPC error responses.""" code: int """The error type that occurred.""" message: str """ A short description of the error. The message SHOULD be limited to a concise single sentence. """ data: Any | None = None """ Additional information about the error. The value of this member is defined by the sender (e.g. detailed error information, nested errors etc.). """ model_config = ConfigDict(extra="allow") class JSONRPCError(BaseModel): """A response to a request that indicates an error occurred.""" jsonrpc: Literal["2.0"] id: str | int error: ErrorData model_config = ConfigDict(extra="allow") class JSONRPCMessage( RootModel[JSONRPCRequest | JSONRPCNotification | JSONRPCResponse | JSONRPCError] ): pass class EmptyResult(Result): """A response that indicates success but carries no data.""" class Implementation(BaseModel): """Describes the name and version of an MCP implementation.""" name: str version: str model_config = ConfigDict(extra="allow") class RootsCapability(BaseModel): """Capability for root operations.""" listChanged: bool | None = None """Whether the client supports notifications for changes to the roots list.""" model_config = ConfigDict(extra="allow") class SamplingCapability(BaseModel): """Capability for logging operations.""" model_config = ConfigDict(extra="allow") class ClientCapabilities(BaseModel): """Capabilities a client may support.""" experimental: dict[str, dict[str, Any]] | None = None """Experimental, non-standard capabilities that the client supports.""" sampling: SamplingCapability | None = None """Present if the client supports sampling from an LLM.""" roots: RootsCapability | None = None """Present if the client supports listing roots.""" model_config = ConfigDict(extra="allow") class PromptsCapability(BaseModel): """Capability for prompts operations.""" listChanged: bool | None = None """Whether this server supports notifications for changes to the prompt list.""" model_config = ConfigDict(extra="allow") class ResourcesCapability(BaseModel): """Capability for resources operations.""" subscribe: bool | None = None """Whether this server supports subscribing to resource updates.""" listChanged: bool | None = None """Whether this server supports notifications for changes to the resource list.""" model_config = ConfigDict(extra="allow") class ToolsCapability(BaseModel): """Capability for tools operations.""" listChanged: bool | None = None """Whether this server supports notifications for changes to the tool list.""" model_config = ConfigDict(extra="allow") class LoggingCapability(BaseModel): """Capability for logging operations.""" model_config = ConfigDict(extra="allow") class ServerCapabilities(BaseModel): """Capabilities that a server may support.""" experimental: dict[str, dict[str, Any]] | None = None """Experimental, non-standard capabilities that the server supports.""" logging: LoggingCapability | None = None """Present if the server supports sending log messages to the client.""" prompts: PromptsCapability | None = None """Present if the server offers any prompt templates.""" resources: ResourcesCapability | None = None """Present if the server offers any resources to read.""" tools: ToolsCapability | None = None """Present if the server offers any tools to call.""" model_config = ConfigDict(extra="allow") class InitializeRequestParams(RequestParams): """Parameters for the initialize request.""" protocolVersion: str | int """The latest version of the Model Context Protocol that the client supports.""" capabilities: ClientCapabilities clientInfo: Implementation model_config = ConfigDict(extra="allow") class InitializeRequest(Request): """ This request is sent from the client to the server when it first connects, asking it to begin initialization. """ method: Literal["initialize"] params: InitializeRequestParams class InitializeResult(Result): """After receiving an initialize request from the client, the server sends this.""" protocolVersion: str | int """The version of the Model Context Protocol that the server wants to use.""" capabilities: ServerCapabilities serverInfo: Implementation instructions: str | None = None """Instructions describing how to use the server and its features.""" class InitializedNotification(Notification): """ This notification is sent from the client to the server after initialization has finished. """ method: Literal["notifications/initialized"] params: NotificationParams | None = None class PingRequest(Request): """ A ping, issued by either the server or the client, to check that the other party is still alive. """ method: Literal["ping"] params: RequestParams | None = None class ProgressNotificationParams(NotificationParams): """Parameters for progress notifications.""" progressToken: ProgressToken """ The progress token which was given in the initial request, used to associate this notification with the request that is proceeding. """ progress: float """ The progress thus far. This should increase every time progress is made, even if the total is unknown. """ total: float | None = None """Total number of items to process (or total progress required), if known.""" model_config = ConfigDict(extra="allow") class ProgressNotification(Notification): """ An out-of-band notification used to inform the receiver of a progress update for a long-running request. """ method: Literal["notifications/progress"] params: ProgressNotificationParams class ListResourcesRequest(PaginatedRequest): """Sent from the client to request a list of resources the server has.""" method: Literal["resources/list"] params: RequestParams | None = None class Annotations(BaseModel): audience: list[Role] | None = None priority: Annotated[float, Field(ge=0.0, le=1.0)] | None = None model_config = ConfigDict(extra="allow") class Resource(BaseModel): """A known resource that the server is capable of reading.""" uri: Annotated[AnyUrl, UrlConstraints(host_required=False)] """The URI of this resource.""" name: str """A human-readable name for this resource.""" description: str | None = None """A description of what this resource represents.""" mimeType: str | None = None """The MIME type of this resource, if known.""" size: int | None = None """ The size of the raw resource content, in bytes (i.e., before base64 encoding or any tokenization), if known. This can be used by Hosts to display file sizes and estimate context window usage. """ annotations: Annotations | None = None model_config = ConfigDict(extra="allow") class ResourceTemplate(BaseModel): """A template description for resources available on the server.""" uriTemplate: str """ A URI template (according to RFC 6570) that can be used to construct resource URIs. """ name: str """A human-readable name for the type of resource this template refers to.""" description: str | None = None """A human-readable description of what this template is for.""" mimeType: str | None = None """ The MIME type for all resources that match this template. This should only be included if all resources matching this template have the same type. """ annotations: Annotations | None = None model_config = ConfigDict(extra="allow") class ListResourcesResult(PaginatedResult): """The server's response to a resources/list request from the client.""" resources: list[Resource] class ListResourceTemplatesRequest(PaginatedRequest): """Sent from the client to request a list of resource templates the server has.""" method: Literal["resources/templates/list"] params: RequestParams | None = None class ListResourceTemplatesResult(PaginatedResult): """The server's response to a resources/templates/list request from the client.""" resourceTemplates: list[ResourceTemplate] class ReadResourceRequestParams(RequestParams): """Parameters for reading a resource.""" uri: Annotated[AnyUrl, UrlConstraints(host_required=False)] """ The URI of the resource to read. The URI can use any protocol; it is up to the server how to interpret it. """ model_config = ConfigDict(extra="allow") class ReadResourceRequest(Request): """Sent from the client to the server, to read a specific resource URI.""" method: Literal["resources/read"] params: ReadResourceRequestParams class ResourceContents(BaseModel): """The contents of a specific resource or sub-resource.""" uri: Annotated[AnyUrl, UrlConstraints(host_required=False)] """The URI of this resource.""" mimeType: str | None = None """The MIME type of this resource, if known.""" model_config = ConfigDict(extra="allow") class TextResourceContents(ResourceContents): """Text contents of a resource.""" text: str """ The text of the item. This must only be set if the item can actually be represented as text (not binary data). """ class BlobResourceContents(ResourceContents): """Binary contents of a resource.""" blob: str """A base64-encoded string representing the binary data of the item.""" class ReadResourceResult(Result): """The server's response to a resources/read request from the client.""" contents: list[TextResourceContents | BlobResourceContents] class ResourceListChangedNotification(Notification): """ An optional notification from the server to the client, informing it that the list of resources it can read from has changed. """ method: Literal["notifications/resources/list_changed"] params: NotificationParams | None = None class SubscribeRequestParams(RequestParams): """Parameters for subscribing to a resource.""" uri: Annotated[AnyUrl, UrlConstraints(host_required=False)] """ The URI of the resource to subscribe to. The URI can use any protocol; it is up to the server how to interpret it. """ model_config = ConfigDict(extra="allow") class SubscribeRequest(Request): """ Sent from the client to request resources/updated notifications from the server whenever a particular resource changes. """ method: Literal["resources/subscribe"] params: SubscribeRequestParams class UnsubscribeRequestParams(RequestParams): """Parameters for unsubscribing from a resource.""" uri: Annotated[AnyUrl, UrlConstraints(host_required=False)] """The URI of the resource to unsubscribe from.""" model_config = ConfigDict(extra="allow") class UnsubscribeRequest(Request): """ Sent from the client to request cancellation of resources/updated notifications from the server. """ method: Literal["resources/unsubscribe"] params: UnsubscribeRequestParams class ResourceUpdatedNotificationParams(NotificationParams): """Parameters for resource update notifications.""" uri: Annotated[AnyUrl, UrlConstraints(host_required=False)] """ The URI of the resource that has been updated. This might be a sub-resource of the one that the client actually subscribed to. """ model_config = ConfigDict(extra="allow") class ResourceUpdatedNotification(Notification): """ A notification from the server to the client, informing it that a resource has changed and may need to be read again. """ method: Literal["notifications/resources/updated"] params: ResourceUpdatedNotificationParams class ListPromptsRequest(PaginatedRequest): """Sent from the client to request a list of prompts and prompt templates.""" method: Literal["prompts/list"] params: RequestParams | None = None class PromptArgument(BaseModel): """An argument for a prompt template.""" name: str """The name of the argument.""" description: str | None = None """A human-readable description of the argument.""" required: bool | None = None """Whether this argument must be provided.""" model_config = ConfigDict(extra="allow") class Prompt(BaseModel): """A prompt or prompt template that the server offers.""" name: str """The name of the prompt or prompt template.""" description: str | None = None """An optional description of what this prompt provides.""" arguments: list[PromptArgument] | None = None """A list of arguments to use for templating the prompt.""" model_config = ConfigDict(extra="allow") class ListPromptsResult(PaginatedResult): """The server's response to a prompts/list request from the client.""" prompts: list[Prompt] class GetPromptRequestParams(RequestParams): """Parameters for getting a prompt.""" name: str """The name of the prompt or prompt template.""" arguments: dict[str, str] | None = None """Arguments to use for templating the prompt.""" model_config = ConfigDict(extra="allow") class GetPromptRequest(Request): """Used by the client to get a prompt provided by the server.""" method: Literal["prompts/get"] params: GetPromptRequestParams class TextContent(BaseModel): """Text content for a message.""" type: Literal["text"] text: str """The text content of the message.""" annotations: Annotations | None = None model_config = ConfigDict(extra="allow") class ImageContent(BaseModel): """Image content for a message.""" type: Literal["image"] data: str """The base64-encoded image data.""" mimeType: str """ The MIME type of the image. Different providers may support different image types. """ annotations: Annotations | None = None model_config = ConfigDict(extra="allow") class SamplingMessage(BaseModel): """Describes a message issued to or received from an LLM API.""" role: Role content: TextContent | ImageContent model_config = ConfigDict(extra="allow") class EmbeddedResource(BaseModel): """ The contents of a resource, embedded into a prompt or tool call result. It is up to the client how best to render embedded resources for the benefit of the LLM and/or the user. """ type: Literal["resource"] resource: TextResourceContents | BlobResourceContents annotations: Annotations | None = None model_config = ConfigDict(extra="allow") class PromptMessage(BaseModel): """Describes a message returned as part of a prompt.""" role: Role content: TextContent | ImageContent | EmbeddedResource model_config = ConfigDict(extra="allow") class GetPromptResult(Result): """The server's response to a prompts/get request from the client.""" description: str | None = None """An optional description for the prompt.""" messages: list[PromptMessage] class PromptListChangedNotification(Notification): """ An optional notification from the server to the client, informing it that the list of prompts it offers has changed. """ method: Literal["notifications/prompts/list_changed"] params: NotificationParams | None = None class ListToolsRequest(PaginatedRequest): """Sent from the client to request a list of tools the server has.""" method: Literal["tools/list"] params: RequestParams | None = None class Tool(BaseModel): """Definition for a tool the client can call.""" name: str """The name of the tool.""" description: str | None = None """A human-readable description of the tool.""" inputSchema: dict[str, Any] """A JSON Schema object defining the expected parameters for the tool.""" model_config = ConfigDict(extra="allow") class ListToolsResult(PaginatedResult): """The server's response to a tools/list request from the client.""" tools: list[Tool] class CallToolRequestParams(RequestParams): """Parameters for calling a tool.""" name: str arguments: dict[str, Any] | None = None model_config = ConfigDict(extra="allow") class CallToolRequest(Request): """Used by the client to invoke a tool provided by the server.""" method: Literal["tools/call"] params: CallToolRequestParams class CallToolResult(Result): """The server's response to a tool call.""" content: list[TextContent | ImageContent | EmbeddedResource] isError: bool = False class ToolListChangedNotification(Notification): """ An optional notification from the server to the client, informing it that the list of tools it offers has changed. """ method: Literal["notifications/tools/list_changed"] params: NotificationParams | None = None LoggingLevel = Literal[ "debug", "info", "notice", "warning", "error", "critical", "alert", "emergency" ] class SetLevelRequestParams(RequestParams): """Parameters for setting the logging level.""" level: LoggingLevel """The level of logging that the client wants to receive from the server.""" model_config = ConfigDict(extra="allow") class SetLevelRequest(Request): """A request from the client to the server, to enable or adjust logging.""" method: Literal["logging/setLevel"] params: SetLevelRequestParams class LoggingMessageNotificationParams(NotificationParams): """Parameters for logging message notifications.""" level: LoggingLevel """The severity of this log message.""" logger: str | None = None """An optional name of the logger issuing this message.""" data: Any """ The data to be logged, such as a string message or an object. Any JSON serializable type is allowed here. """ model_config = ConfigDict(extra="allow") class LoggingMessageNotification(Notification): """Notification of a log message passed from server to client.""" method: Literal["notifications/message"] params: LoggingMessageNotificationParams IncludeContext = Literal["none", "thisServer", "allServers"] class ModelHint(BaseModel): """Hints to use for model selection.""" name: str | None = None """A hint for a model name.""" model_config = ConfigDict(extra="allow") class ModelPreferences(BaseModel): """ The server's preferences for model selection, requested of the client during sampling. Because LLMs can vary along multiple dimensions, choosing the "best" model is rarely straightforward. Different models excel in different areas—some are faster but less capable, others are more capable but more expensive, and so on. This interface allows servers to express their priorities across multiple dimensions to help clients make an appropriate selection for their use case. These preferences are always advisory. The client MAY ignore them. It is also up to the client to decide how to interpret these preferences and how to balance them against other considerations. """ hints: list[ModelHint] | None = None """ Optional hints to use for model selection. If multiple hints are specified, the client MUST evaluate them in order (such that the first match is taken). The client SHOULD prioritize these hints over the numeric priorities, but MAY still use the priorities to select from ambiguous matches. """ costPriority: float | None = None """ How much to prioritize cost when selecting a model. A value of 0 means cost is not important, while a value of 1 means cost is the most important factor. """ speedPriority: float | None = None """ How much to prioritize sampling speed (latency) when selecting a model. A value of 0 means speed is not important, while a value of 1 means speed is the most important factor. """ intelligencePriority: float | None = None """ How much to prioritize intelligence and capabilities when selecting a model. A value of 0 means intelligence is not important, while a value of 1 means intelligence is the most important factor. """ model_config = ConfigDict(extra="allow") class CreateMessageRequestParams(RequestParams): """Parameters for creating a message.""" messages: list[SamplingMessage] modelPreferences: ModelPreferences | None = None """ The server's preferences for which model to select. The client MAY ignore these preferences. """ systemPrompt: str | None = None """An optional system prompt the server wants to use for sampling.""" includeContext: IncludeContext | None = None """ A request to include context from one or more MCP servers (including the caller), to be attached to the prompt. """ temperature: float | None = None maxTokens: int """The maximum number of tokens to sample, as requested by the server.""" stopSequences: list[str] | None = None metadata: dict[str, Any] | None = None """Optional metadata to pass through to the LLM provider.""" model_config = ConfigDict(extra="allow") class CreateMessageRequest(Request): """A request from the server to sample an LLM via the client.""" method: Literal["sampling/createMessage"] params: CreateMessageRequestParams StopReason = Literal["endTurn", "stopSequence", "maxTokens"] | str class CreateMessageResult(Result): """The client's response to a sampling/create_message request from the server.""" role: Role content: TextContent | ImageContent model: str """The name of the model that generated the message.""" stopReason: StopReason | None = None """The reason why sampling stopped, if known.""" class ResourceReference(BaseModel): """A reference to a resource or resource template definition.""" type: Literal["ref/resource"] uri: str """The URI or URI template of the resource.""" model_config = ConfigDict(extra="allow") class PromptReference(BaseModel): """Identifies a prompt.""" type: Literal["ref/prompt"] name: str """The name of the prompt or prompt template""" model_config = ConfigDict(extra="allow") class CompletionArgument(BaseModel): """The argument's information for completion requests.""" name: str """The name of the argument""" value: str """The value of the argument to use for completion matching.""" model_config = ConfigDict(extra="allow") class CompleteRequestParams(RequestParams): """Parameters for completion requests.""" ref: ResourceReference | PromptReference argument: CompletionArgument model_config = ConfigDict(extra="allow") class CompleteRequest(Request): """A request from the client to the server, to ask for completion options.""" method: Literal["completion/complete"] params: CompleteRequestParams class Completion(BaseModel): """Completion information.""" values: list[str] """An array of completion values. Must not exceed 100 items.""" total: int | None = None """ The total number of completion options available. This can exceed the number of values actually sent in the response. """ hasMore: bool | None = None """ Indicates whether there are additional completion options beyond those provided in the current response, even if the exact total is unknown. """ model_config = ConfigDict(extra="allow") class CompleteResult(Result): """The server's response to a completion/complete request""" completion: Completion class ListRootsRequest(Request): """ Sent from the server to request a list of root URIs from the client. Roots allow servers to ask for specific directories or files to operate on. A common example for roots is providing a set of repositories or directories a server should operate on. This request is typically used when the server needs to understand the file system structure or access specific locations that the client has permission to read from. """ method: Literal["roots/list"] params: RequestParams | None = None class Root(BaseModel): """Represents a root directory or file that the server can operate on.""" uri: FileUrl """ The URI identifying the root. This *must* start with file:// for now. This restriction may be relaxed in future versions of the protocol to allow other URI schemes. """ name: str | None = None """ An optional name for the root. This can be used to provide a human-readable identifier for the root, which may be useful for display purposes or for referencing the root in other parts of the application. """ model_config = ConfigDict(extra="allow") class ListRootsResult(Result): """ The client's response to a roots/list request from the server. This result contains an array of Root objects, each representing a root directory or file that the server can operate on. """ roots: list[Root] class RootsListChangedNotification(Notification): """ A notification from the client to the server, informing it that the list of roots has changed. This notification should be sent whenever the client adds, removes, or modifies any root. The server should then request an updated list of roots using the ListRootsRequest. """ method: Literal["notifications/roots/list_changed"] params: NotificationParams | None = None class CancelledNotificationParams(NotificationParams): """Parameters for cancellation notifications.""" requestId: RequestId """The ID of the request to cancel.""" reason: str | None = None """An optional string describing the reason for the cancellation.""" model_config = ConfigDict(extra="allow") class CancelledNotification(Notification): """ This notification can be sent by either side to indicate that it is cancelling a previously-issued request. """ method: Literal["notifications/cancelled"] params: CancelledNotificationParams class ClientRequest( RootModel[ PingRequest | InitializeRequest | CompleteRequest | SetLevelRequest | GetPromptRequest | ListPromptsRequest | ListResourcesRequest | ListResourceTemplatesRequest | ReadResourceRequest | SubscribeRequest | UnsubscribeRequest | CallToolRequest | ListToolsRequest ] ): pass class ClientNotification( RootModel[ CancelledNotification | ProgressNotification | InitializedNotification | RootsListChangedNotification ] ): pass class ClientResult(RootModel[EmptyResult | CreateMessageResult | ListRootsResult]): pass class ServerRequest(RootModel[PingRequest | CreateMessageRequest | ListRootsRequest]): pass class ServerNotification( RootModel[ CancelledNotification | ProgressNotification | LoggingMessageNotification | ResourceUpdatedNotification | ResourceListChangedNotification | ToolListChangedNotification | PromptListChangedNotification ] ): pass class ServerResult( RootModel[ EmptyResult | InitializeResult | CompleteResult | GetPromptResult | ListPromptsResult | ListResourcesResult | ListResourceTemplatesResult | ReadResourceResult | CallToolResult | ListToolsResult ] ): pass ================================================ File: src/mcp/cli/__init__.py ================================================ """FastMCP CLI package.""" from .cli import app if __name__ == "__main__": app() ================================================ File: src/mcp/cli/claude.py ================================================ """Claude app integration utilities.""" import json import sys from pathlib import Path from mcp.server.fastmcp.utilities.logging import get_logger logger = get_logger(__name__) MCP_PACKAGE = "mcp[cli]" def get_claude_config_path() -> Path | None: """Get the Claude config directory based on platform.""" if sys.platform == "win32": path = Path(Path.home(), "AppData", "Roaming", "Claude") elif sys.platform == "darwin": path = Path(Path.home(), "Library", "Application Support", "Claude") else: return None if path.exists(): return path return None def update_claude_config( file_spec: str, server_name: str, *, with_editable: Path | None = None, with_packages: list[str] | None = None, env_vars: dict[str, str] | None = None, ) -> bool: """Add or update a FastMCP server in Claude's configuration. Args: file_spec: Path to the server file, optionally with :object suffix server_name: Name for the server in Claude's config with_editable: Optional directory to install in editable mode with_packages: Optional list of additional packages to install env_vars: Optional dictionary of environment variables. These are merged with any existing variables, with new values taking precedence. Raises: RuntimeError: If Claude Desktop's config directory is not found, indicating Claude Desktop may not be installed or properly set up. """ config_dir = get_claude_config_path() if not config_dir: raise RuntimeError( "Claude Desktop config directory not found. Please ensure Claude Desktop" " is installed and has been run at least once to initialize its config." ) config_file = config_dir / "claude_desktop_config.json" if not config_file.exists(): try: config_file.write_text("{}") except Exception as e: logger.error( "Failed to create Claude config file", extra={ "error": str(e), "config_file": str(config_file), }, ) return False try: config = json.loads(config_file.read_text()) if "mcpServers" not in config: config["mcpServers"] = {} # Always preserve existing env vars and merge with new ones if ( server_name in config["mcpServers"] and "env" in config["mcpServers"][server_name] ): existing_env = config["mcpServers"][server_name]["env"] if env_vars: # New vars take precedence over existing ones env_vars = {**existing_env, **env_vars} else: env_vars = existing_env # Build uv run command args = ["run"] # Collect all packages in a set to deduplicate packages = {MCP_PACKAGE} if with_packages: packages.update(pkg for pkg in with_packages if pkg) # Add all packages with --with for pkg in sorted(packages): args.extend(["--with", pkg]) if with_editable: args.extend(["--with-editable", str(with_editable)]) # Convert file path to absolute before adding to command # Split off any :object suffix first if ":" in file_spec: file_path, server_object = file_spec.rsplit(":", 1) file_spec = f"{Path(file_path).resolve()}:{server_object}" else: file_spec = str(Path(file_spec).resolve()) # Add fastmcp run command args.extend(["mcp", "run", file_spec]) server_config = { "command": "uv", "args": args, } # Add environment variables if specified if env_vars: server_config["env"] = env_vars config["mcpServers"][server_name] = server_config config_file.write_text(json.dumps(config, indent=2)) logger.info( f"Added server '{server_name}' to Claude config", extra={"config_file": str(config_file)}, ) return True except Exception as e: logger.error( "Failed to update Claude config", extra={ "error": str(e), "config_file": str(config_file), }, ) return False ================================================ File: src/mcp/cli/cli.py ================================================ """MCP CLI tools.""" import importlib.metadata import importlib.util import os import subprocess import sys from pathlib import Path from typing import Annotated try: import typer except ImportError: print("Error: typer is required. Install with 'pip install mcp[cli]'") sys.exit(1) try: from mcp.cli import claude from mcp.server.fastmcp.utilities.logging import get_logger except ImportError: print("Error: mcp.server.fastmcp is not installed or not in PYTHONPATH") sys.exit(1) try: import dotenv except ImportError: dotenv = None logger = get_logger("cli") app = typer.Typer( name="mcp", help="MCP development tools", add_completion=False, no_args_is_help=True, # Show help if no args provided ) def _get_npx_command(): """Get the correct npx command for the current platform.""" if sys.platform == "win32": # Try both npx.cmd and npx.exe on Windows for cmd in ["npx.cmd", "npx.exe", "npx"]: try: subprocess.run( [cmd, "--version"], check=True, capture_output=True, shell=True ) return cmd except subprocess.CalledProcessError: continue return None return "npx" # On Unix-like systems, just use npx def _parse_env_var(env_var: str) -> tuple[str, str]: """Parse environment variable string in format KEY=VALUE.""" if "=" not in env_var: logger.error( f"Invalid environment variable format: {env_var}. Must be KEY=VALUE" ) sys.exit(1) key, value = env_var.split("=", 1) return key.strip(), value.strip() def _build_uv_command( file_spec: str, with_editable: Path | None = None, with_packages: list[str] | None = None, ) -> list[str]: """Build the uv run command that runs a MCP server through mcp run.""" cmd = ["uv"] cmd.extend(["run", "--with", "mcp"]) if with_editable: cmd.extend(["--with-editable", str(with_editable)]) if with_packages: for pkg in with_packages: if pkg: cmd.extend(["--with", pkg]) # Add mcp run command cmd.extend(["mcp", "run", file_spec]) return cmd def _parse_file_path(file_spec: str) -> tuple[Path, str | None]: """Parse a file path that may include a server object specification. Args: file_spec: Path to file, optionally with :object suffix Returns: Tuple of (file_path, server_object) """ # First check if we have a Windows path (e.g., C:\...) has_windows_drive = len(file_spec) > 1 and file_spec[1] == ":" # Split on the last colon, but only if it's not part of the Windows drive letter # and there's actually another colon in the string after the drive letter if ":" in (file_spec[2:] if has_windows_drive else file_spec): file_str, server_object = file_spec.rsplit(":", 1) else: file_str, server_object = file_spec, None # Resolve the file path file_path = Path(file_str).expanduser().resolve() if not file_path.exists(): logger.error(f"File not found: {file_path}") sys.exit(1) if not file_path.is_file(): logger.error(f"Not a file: {file_path}") sys.exit(1) return file_path, server_object def _import_server(file: Path, server_object: str | None = None): """Import a MCP server from a file. Args: file: Path to the file server_object: Optional object name in format "module:object" or just "object" Returns: The server object """ # Add parent directory to Python path so imports can be resolved file_dir = str(file.parent) if file_dir not in sys.path: sys.path.insert(0, file_dir) # Import the module spec = importlib.util.spec_from_file_location("server_module", file) if not spec or not spec.loader: logger.error("Could not load module", extra={"file": str(file)}) sys.exit(1) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) # If no object specified, try common server names if not server_object: # Look for the most common server object names for name in ["mcp", "server", "app"]: if hasattr(module, name): return getattr(module, name) logger.error( f"No server object found in {file}. Please either:\n" "1. Use a standard variable name (mcp, server, or app)\n" "2. Specify the object name with file:object syntax", extra={"file": str(file)}, ) sys.exit(1) # Handle module:object syntax if ":" in server_object: module_name, object_name = server_object.split(":", 1) try: server_module = importlib.import_module(module_name) server = getattr(server_module, object_name, None) except ImportError: logger.error( f"Could not import module '{module_name}'", extra={"file": str(file)}, ) sys.exit(1) else: # Just object name server = getattr(module, server_object, None) if server is None: logger.error( f"Server object '{server_object}' not found", extra={"file": str(file)}, ) sys.exit(1) return server @app.command() def version() -> None: """Show the MCP version.""" try: version = importlib.metadata.version("mcp") print(f"MCP version {version}") except importlib.metadata.PackageNotFoundError: print("MCP version unknown (package not installed)") sys.exit(1) @app.command() def dev( file_spec: str = typer.Argument( ..., help="Python file to run, optionally with :object suffix", ), with_editable: Annotated[ Path | None, typer.Option( "--with-editable", "-e", help="Directory containing pyproject.toml to install in editable mode", exists=True, file_okay=False, resolve_path=True, ), ] = None, with_packages: Annotated[ list[str], typer.Option( "--with", help="Additional packages to install", ), ] = [], ) -> None: """Run a MCP server with the MCP Inspector.""" file, server_object = _parse_file_path(file_spec) logger.debug( "Starting dev server", extra={ "file": str(file), "server_object": server_object, "with_editable": str(with_editable) if with_editable else None, "with_packages": with_packages, }, ) try: # Import server to get dependencies server = _import_server(file, server_object) if hasattr(server, "dependencies"): with_packages = list(set(with_packages + server.dependencies)) uv_cmd = _build_uv_command(file_spec, with_editable, with_packages) # Get the correct npx command npx_cmd = _get_npx_command() if not npx_cmd: logger.error( "npx not found. Please ensure Node.js and npm are properly installed " "and added to your system PATH." ) sys.exit(1) # Run the MCP Inspector command with shell=True on Windows shell = sys.platform == "win32" process = subprocess.run( [npx_cmd, "@modelcontextprotocol/inspector"] + uv_cmd, check=True, shell=shell, env=dict(os.environ.items()), # Convert to list of tuples for env update ) sys.exit(process.returncode) except subprocess.CalledProcessError as e: logger.error( "Dev server failed", extra={ "file": str(file), "error": str(e), "returncode": e.returncode, }, ) sys.exit(e.returncode) except FileNotFoundError: logger.error( "npx not found. Please ensure Node.js and npm are properly installed " "and added to your system PATH. You may need to restart your terminal " "after installation.", extra={"file": str(file)}, ) sys.exit(1) @app.command() def run( file_spec: str = typer.Argument( ..., help="Python file to run, optionally with :object suffix", ), transport: Annotated[ str | None, typer.Option( "--transport", "-t", help="Transport protocol to use (stdio or sse)", ), ] = None, ) -> None: """Run a MCP server. The server can be specified in two ways: 1. Module approach: server.py - runs the module directly, expecting a server.run() call 2. Import approach: server.py:app - imports and runs the specified server object Note: This command runs the server directly. You are responsible for ensuring all dependencies are available. For dependency management, use mcp install or mcp dev instead. """ file, server_object = _parse_file_path(file_spec) logger.debug( "Running server", extra={ "file": str(file), "server_object": server_object, "transport": transport, }, ) try: # Import and get server object server = _import_server(file, server_object) # Run the server kwargs = {} if transport: kwargs["transport"] = transport server.run(**kwargs) except Exception as e: logger.error( f"Failed to run server: {e}", extra={ "file": str(file), "error": str(e), }, ) sys.exit(1) @app.command() def install( file_spec: str = typer.Argument( ..., help="Python file to run, optionally with :object suffix", ), server_name: Annotated[ str | None, typer.Option( "--name", "-n", help="Custom name for the server (defaults to server's name attribute or" " file name)", ), ] = None, with_editable: Annotated[ Path | None, typer.Option( "--with-editable", "-e", help="Directory containing pyproject.toml to install in editable mode", exists=True, file_okay=False, resolve_path=True, ), ] = None, with_packages: Annotated[ list[str], typer.Option( "--with", help="Additional packages to install", ), ] = [], env_vars: Annotated[ list[str], typer.Option( "--env-var", "-v", help="Environment variables in KEY=VALUE format", ), ] = [], env_file: Annotated[ Path | None, typer.Option( "--env-file", "-f", help="Load environment variables from a .env file", exists=True, file_okay=True, dir_okay=False, resolve_path=True, ), ] = None, ) -> None: """Install a MCP server in the Claude desktop app. Environment variables are preserved once added and only updated if new values are explicitly provided. """ file, server_object = _parse_file_path(file_spec) logger.debug( "Installing server", extra={ "file": str(file), "server_name": server_name, "server_object": server_object, "with_editable": str(with_editable) if with_editable else None, "with_packages": with_packages, }, ) if not claude.get_claude_config_path(): logger.error("Claude app not found") sys.exit(1) # Try to import server to get its name, but fall back to file name if dependencies # missing name = server_name server = None if not name: try: server = _import_server(file, server_object) name = server.name except (ImportError, ModuleNotFoundError) as e: logger.debug( "Could not import server (likely missing dependencies), using file" " name", extra={"error": str(e)}, ) name = file.stem # Get server dependencies if available server_dependencies = getattr(server, "dependencies", []) if server else [] if server_dependencies: with_packages = list(set(with_packages + server_dependencies)) # Process environment variables if provided env_dict: dict[str, str] | None = None if env_file or env_vars: env_dict = {} # Load from .env file if specified if env_file: if dotenv: try: env_dict |= { k: v for k, v in dotenv.dotenv_values(env_file).items() if v is not None } except Exception as e: logger.error(f"Failed to load .env file: {e}") sys.exit(1) else: logger.error("python-dotenv is not installed. Cannot load .env file.") sys.exit(1) # Add command line environment variables for env_var in env_vars: key, value = _parse_env_var(env_var) env_dict[key] = value if claude.update_claude_config( file_spec, name, with_editable=with_editable, with_packages=with_packages, env_vars=env_dict, ): logger.info(f"Successfully installed {name} in Claude app") else: logger.error(f"Failed to install {name} in Claude app") sys.exit(1) ================================================ File: src/mcp/client/__main__.py ================================================ import argparse import logging import sys from functools import partial from urllib.parse import urlparse import anyio from mcp.client.session import ClientSession from mcp.client.sse import sse_client from mcp.client.stdio import StdioServerParameters, stdio_client if not sys.warnoptions: import warnings warnings.simplefilter("ignore") logging.basicConfig(level=logging.INFO) logger = logging.getLogger("client") async def receive_loop(session: ClientSession): logger.info("Starting receive loop") async for message in session.incoming_messages: if isinstance(message, Exception): logger.error("Error: %s", message) continue logger.info("Received message from server: %s", message) async def run_session(read_stream, write_stream): async with ( ClientSession(read_stream, write_stream) as session, anyio.create_task_group() as tg, ): tg.start_soon(receive_loop, session) logger.info("Initializing session") await session.initialize() logger.info("Initialized") async def main(command_or_url: str, args: list[str], env: list[tuple[str, str]]): env_dict = dict(env) if urlparse(command_or_url).scheme in ("http", "https"): # Use SSE client for HTTP(S) URLs async with sse_client(command_or_url) as streams: await run_session(*streams) else: # Use stdio client for commands server_parameters = StdioServerParameters( command=command_or_url, args=args, env=env_dict ) async with stdio_client(server_parameters) as streams: await run_session(*streams) def cli(): parser = argparse.ArgumentParser() parser.add_argument("command_or_url", help="Command or URL to connect to") parser.add_argument("args", nargs="*", help="Additional arguments") parser.add_argument( "-e", "--env", nargs=2, action="append", metavar=("KEY", "VALUE"), help="Environment variables to set. Can be used multiple times.", default=[], ) args = parser.parse_args() anyio.run(partial(main, args.command_or_url, args.args, args.env), backend="trio") if __name__ == "__main__": cli() ================================================ File: src/mcp/client/session.py ================================================ from datetime import timedelta from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream from pydantic import AnyUrl import mcp.types as types from mcp.shared.session import BaseSession from mcp.shared.version import SUPPORTED_PROTOCOL_VERSIONS class ClientSession( BaseSession[ types.ClientRequest, types.ClientNotification, types.ClientResult, types.ServerRequest, types.ServerNotification, ] ): def __init__( self, read_stream: MemoryObjectReceiveStream[types.JSONRPCMessage | Exception], write_stream: MemoryObjectSendStream[types.JSONRPCMessage], read_timeout_seconds: timedelta | None = None, ) -> None: super().__init__( read_stream, write_stream, types.ServerRequest, types.ServerNotification, read_timeout_seconds=read_timeout_seconds, ) async def initialize(self) -> types.InitializeResult: result = await self.send_request( types.ClientRequest( types.InitializeRequest( method="initialize", params=types.InitializeRequestParams( protocolVersion=types.LATEST_PROTOCOL_VERSION, capabilities=types.ClientCapabilities( sampling=None, experimental=None, roots=types.RootsCapability( # TODO: Should this be based on whether we # _will_ send notifications, or only whether # they're supported? listChanged=True ), ), clientInfo=types.Implementation(name="mcp", version="0.1.0"), ), ) ), types.InitializeResult, ) if result.protocolVersion not in SUPPORTED_PROTOCOL_VERSIONS: raise RuntimeError( "Unsupported protocol version from the server: " f"{result.protocolVersion}" ) await self.send_notification( types.ClientNotification( types.InitializedNotification(method="notifications/initialized") ) ) return result async def send_ping(self) -> types.EmptyResult: """Send a ping request.""" return await self.send_request( types.ClientRequest( types.PingRequest( method="ping", ) ), types.EmptyResult, ) async def send_progress_notification( self, progress_token: str | int, progress: float, total: float | None = None ) -> None: """Send a progress notification.""" await self.send_notification( types.ClientNotification( types.ProgressNotification( method="notifications/progress", params=types.ProgressNotificationParams( progressToken=progress_token, progress=progress, total=total, ), ), ) ) async def set_logging_level(self, level: types.LoggingLevel) -> types.EmptyResult: """Send a logging/setLevel request.""" return await self.send_request( types.ClientRequest( types.SetLevelRequest( method="logging/setLevel", params=types.SetLevelRequestParams(level=level), ) ), types.EmptyResult, ) async def list_resources(self) -> types.ListResourcesResult: """Send a resources/list request.""" return await self.send_request( types.ClientRequest( types.ListResourcesRequest( method="resources/list", ) ), types.ListResourcesResult, ) async def list_resource_templates(self) -> types.ListResourceTemplatesResult: """Send a resources/templates/list request.""" return await self.send_request( types.ClientRequest( types.ListResourceTemplatesRequest( method="resources/templates/list", ) ), types.ListResourceTemplatesResult, ) async def read_resource(self, uri: AnyUrl) -> types.ReadResourceResult: """Send a resources/read request.""" return await self.send_request( types.ClientRequest( types.ReadResourceRequest( method="resources/read", params=types.ReadResourceRequestParams(uri=uri), ) ), types.ReadResourceResult, ) async def subscribe_resource(self, uri: AnyUrl) -> types.EmptyResult: """Send a resources/subscribe request.""" return await self.send_request( types.ClientRequest( types.SubscribeRequest( method="resources/subscribe", params=types.SubscribeRequestParams(uri=uri), ) ), types.EmptyResult, ) async def unsubscribe_resource(self, uri: AnyUrl) -> types.EmptyResult: """Send a resources/unsubscribe request.""" return await self.send_request( types.ClientRequest( types.UnsubscribeRequest( method="resources/unsubscribe", params=types.UnsubscribeRequestParams(uri=uri), ) ), types.EmptyResult, ) async def call_tool( self, name: str, arguments: dict | None = None ) -> types.CallToolResult: """Send a tools/call request.""" return await self.send_request( types.ClientRequest( types.CallToolRequest( method="tools/call", params=types.CallToolRequestParams(name=name, arguments=arguments), ) ), types.CallToolResult, ) async def list_prompts(self) -> types.ListPromptsResult: """Send a prompts/list request.""" return await self.send_request( types.ClientRequest( types.ListPromptsRequest( method="prompts/list", ) ), types.ListPromptsResult, ) async def get_prompt( self, name: str, arguments: dict[str, str] | None = None ) -> types.GetPromptResult: """Send a prompts/get request.""" return await self.send_request( types.ClientRequest( types.GetPromptRequest( method="prompts/get", params=types.GetPromptRequestParams(name=name, arguments=arguments), ) ), types.GetPromptResult, ) async def complete( self, ref: types.ResourceReference | types.PromptReference, argument: dict ) -> types.CompleteResult: """Send a completion/complete request.""" return await self.send_request( types.ClientRequest( types.CompleteRequest( method="completion/complete", params=types.CompleteRequestParams( ref=ref, argument=types.CompletionArgument(**argument), ), ) ), types.CompleteResult, ) async def list_tools(self) -> types.ListToolsResult: """Send a tools/list request.""" return await self.send_request( types.ClientRequest( types.ListToolsRequest( method="tools/list", ) ), types.ListToolsResult, ) async def send_roots_list_changed(self) -> None: """Send a roots/list_changed notification.""" await self.send_notification( types.ClientNotification( types.RootsListChangedNotification( method="notifications/roots/list_changed", ) ) ) ================================================ File: src/mcp/client/sse.py ================================================ import logging from contextlib import asynccontextmanager from typing import Any from urllib.parse import urljoin, urlparse import anyio import httpx from anyio.abc import TaskStatus from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream from httpx_sse import aconnect_sse import mcp.types as types logger = logging.getLogger(__name__) def remove_request_params(url: str) -> str: return urljoin(url, urlparse(url).path) @asynccontextmanager async def sse_client( url: str, headers: dict[str, Any] | None = None, timeout: float = 5, sse_read_timeout: float = 60 * 5, ): """ Client transport for SSE. `sse_read_timeout` determines how long (in seconds) the client will wait for a new event before disconnecting. All other HTTP operations are controlled by `timeout`. """ read_stream: MemoryObjectReceiveStream[types.JSONRPCMessage | Exception] read_stream_writer: MemoryObjectSendStream[types.JSONRPCMessage | Exception] write_stream: MemoryObjectSendStream[types.JSONRPCMessage] write_stream_reader: MemoryObjectReceiveStream[types.JSONRPCMessage] read_stream_writer, read_stream = anyio.create_memory_object_stream(0) write_stream, write_stream_reader = anyio.create_memory_object_stream(0) async with anyio.create_task_group() as tg: try: logger.info(f"Connecting to SSE endpoint: {remove_request_params(url)}") async with httpx.AsyncClient(headers=headers) as client: async with aconnect_sse( client, "GET", url, timeout=httpx.Timeout(timeout, read=sse_read_timeout), ) as event_source: event_source.response.raise_for_status() logger.debug("SSE connection established") async def sse_reader( task_status: TaskStatus[str] = anyio.TASK_STATUS_IGNORED, ): try: async for sse in event_source.aiter_sse(): logger.debug(f"Received SSE event: {sse.event}") match sse.event: case "endpoint": endpoint_url = urljoin(url, sse.data) logger.info( f"Received endpoint URL: {endpoint_url}" ) url_parsed = urlparse(url) endpoint_parsed = urlparse(endpoint_url) if ( url_parsed.netloc != endpoint_parsed.netloc or url_parsed.scheme != endpoint_parsed.scheme ): error_msg = ( "Endpoint origin does not match " f"connection origin: {endpoint_url}" ) logger.error(error_msg) raise ValueError(error_msg) task_status.started(endpoint_url) case "message": try: message = types.JSONRPCMessage.model_validate_json( # noqa: E501 sse.data ) logger.debug( f"Received server message: {message}" ) except Exception as exc: logger.error( f"Error parsing server message: {exc}" ) await read_stream_writer.send(exc) continue await read_stream_writer.send(message) except Exception as exc: logger.error(f"Error in sse_reader: {exc}") await read_stream_writer.send(exc) finally: await read_stream_writer.aclose() async def post_writer(endpoint_url: str): try: async with write_stream_reader: async for message in write_stream_reader: logger.debug(f"Sending client message: {message}") response = await client.post( endpoint_url, json=message.model_dump( by_alias=True, mode="json", exclude_none=True, ), ) response.raise_for_status() logger.debug( "Client message sent successfully: " f"{response.status_code}" ) except Exception as exc: logger.error(f"Error in post_writer: {exc}") finally: await write_stream.aclose() endpoint_url = await tg.start(sse_reader) logger.info( f"Starting post writer with endpoint URL: {endpoint_url}" ) tg.start_soon(post_writer, endpoint_url) try: yield read_stream, write_stream finally: tg.cancel_scope.cancel() finally: await read_stream_writer.aclose() await write_stream.aclose() ================================================ File: src/mcp/client/stdio.py ================================================ import os import sys from contextlib import asynccontextmanager from typing import Literal import anyio import anyio.lowlevel from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream from anyio.streams.text import TextReceiveStream from pydantic import BaseModel, Field import mcp.types as types # Environment variables to inherit by default DEFAULT_INHERITED_ENV_VARS = ( [ "APPDATA", "HOMEDRIVE", "HOMEPATH", "LOCALAPPDATA", "PATH", "PROCESSOR_ARCHITECTURE", "SYSTEMDRIVE", "SYSTEMROOT", "TEMP", "USERNAME", "USERPROFILE", ] if sys.platform == "win32" else ["HOME", "LOGNAME", "PATH", "SHELL", "TERM", "USER"] ) def get_default_environment() -> dict[str, str]: """ Returns a default environment object including only environment variables deemed safe to inherit. """ env: dict[str, str] = {} for key in DEFAULT_INHERITED_ENV_VARS: value = os.environ.get(key) if value is None: continue if value.startswith("()"): # Skip functions, which are a security risk continue env[key] = value return env class StdioServerParameters(BaseModel): command: str """The executable to run to start the server.""" args: list[str] = Field(default_factory=list) """Command line arguments to pass to the executable.""" env: dict[str, str] | None = None """ The environment to use when spawning the process. If not specified, the result of get_default_environment() will be used. """ encoding: str = "utf-8" """ The text encoding used when sending/receiving messages to the server defaults to utf-8 """ encoding_error_handler: Literal["strict", "ignore", "replace"] = "strict" """ The text encoding error handler. See https://docs.python.org/3/library/codecs.html#codec-base-classes for explanations of possible values """ @asynccontextmanager async def stdio_client(server: StdioServerParameters): """ Client transport for stdio: this will connect to a server by spawning a process and communicating with it over stdin/stdout. """ read_stream: MemoryObjectReceiveStream[types.JSONRPCMessage | Exception] read_stream_writer: MemoryObjectSendStream[types.JSONRPCMessage | Exception] write_stream: MemoryObjectSendStream[types.JSONRPCMessage] write_stream_reader: MemoryObjectReceiveStream[types.JSONRPCMessage] read_stream_writer, read_stream = anyio.create_memory_object_stream(0) write_stream, write_stream_reader = anyio.create_memory_object_stream(0) process = await anyio.open_process( [server.command, *server.args], env=server.env if server.env is not None else get_default_environment(), stderr=sys.stderr, ) async def stdout_reader(): assert process.stdout, "Opened process is missing stdout" try: async with read_stream_writer: buffer = "" async for chunk in TextReceiveStream( process.stdout, encoding=server.encoding, errors=server.encoding_error_handler, ): lines = (buffer + chunk).split("\n") buffer = lines.pop() for line in lines: try: message = types.JSONRPCMessage.model_validate_json(line) except Exception as exc: await read_stream_writer.send(exc) continue await read_stream_writer.send(message) except anyio.ClosedResourceError: await anyio.lowlevel.checkpoint() async def stdin_writer(): assert process.stdin, "Opened process is missing stdin" try: async with write_stream_reader: async for message in write_stream_reader: json = message.model_dump_json(by_alias=True, exclude_none=True) await process.stdin.send( (json + "\n").encode( encoding=server.encoding, errors=server.encoding_error_handler, ) ) except anyio.ClosedResourceError: await anyio.lowlevel.checkpoint() async with ( anyio.create_task_group() as tg, process, ): tg.start_soon(stdout_reader) tg.start_soon(stdin_writer) yield read_stream, write_stream ================================================ File: src/mcp/server/__init__.py ================================================ from .fastmcp import FastMCP from .lowlevel import NotificationOptions, Server __all__ = ["Server", "FastMCP", "NotificationOptions"] ================================================ File: src/mcp/server/__main__.py ================================================ import importlib.metadata import logging import sys import anyio from mcp.server.models import InitializationOptions from mcp.server.session import ServerSession from mcp.server.stdio import stdio_server from mcp.types import ServerCapabilities if not sys.warnoptions: import warnings warnings.simplefilter("ignore") logging.basicConfig(level=logging.INFO) logger = logging.getLogger("server") async def receive_loop(session: ServerSession): logger.info("Starting receive loop") async for message in session.incoming_messages: if isinstance(message, Exception): logger.error("Error: %s", message) continue logger.info("Received message from client: %s", message) async def main(): version = importlib.metadata.version("mcp") async with stdio_server() as (read_stream, write_stream): async with ( ServerSession( read_stream, write_stream, InitializationOptions( server_name="mcp", server_version=version, capabilities=ServerCapabilities(), ), ) as session, write_stream, ): await receive_loop(session) if __name__ == "__main__": anyio.run(main, backend="trio") ================================================ File: src/mcp/server/models.py ================================================ """ This module provides simpler types to use with the server for managing prompts and tools. """ from pydantic import BaseModel from mcp.types import ( ServerCapabilities, ) class InitializationOptions(BaseModel): server_name: str server_version: str capabilities: ServerCapabilities instructions: str | None = None ================================================ File: src/mcp/server/session.py ================================================ """ ServerSession Module This module provides the ServerSession class, which manages communication between the server and client in the MCP (Model Context Protocol) framework. It is most commonly used in MCP servers to interact with the client. Common usage pattern: ``` server = Server(name) @server.call_tool() async def handle_tool_call(ctx: RequestContext, arguments: dict[str, Any]) -> Any: # Check client capabilities before proceeding if ctx.session.check_client_capability( types.ClientCapabilities(experimental={"advanced_tools": dict()}) ): # Perform advanced tool operations result = await perform_advanced_tool_operation(arguments) else: # Fall back to basic tool operations result = await perform_basic_tool_operation(arguments) return result @server.list_prompts() async def handle_list_prompts(ctx: RequestContext) -> list[types.Prompt]: # Access session for any necessary checks or operations if ctx.session.client_params: # Customize prompts based on client initialization parameters return generate_custom_prompts(ctx.session.client_params) else: return default_prompts ``` The ServerSession class is typically used internally by the Server class and should not be instantiated directly by users of the MCP framework. """ from enum import Enum from typing import Any import anyio import anyio.lowlevel from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream from pydantic import AnyUrl import mcp.types as types from mcp.server.models import InitializationOptions from mcp.shared.session import ( BaseSession, RequestResponder, ) class InitializationState(Enum): NotInitialized = 1 Initializing = 2 Initialized = 3 class ServerSession( BaseSession[ types.ServerRequest, types.ServerNotification, types.ServerResult, types.ClientRequest, types.ClientNotification, ] ): _initialized: InitializationState = InitializationState.NotInitialized _client_params: types.InitializeRequestParams | None = None def __init__( self, read_stream: MemoryObjectReceiveStream[types.JSONRPCMessage | Exception], write_stream: MemoryObjectSendStream[types.JSONRPCMessage], init_options: InitializationOptions, ) -> None: super().__init__( read_stream, write_stream, types.ClientRequest, types.ClientNotification ) self._initialization_state = InitializationState.NotInitialized self._init_options = init_options @property def client_params(self) -> types.InitializeRequestParams | None: return self._client_params def check_client_capability(self, capability: types.ClientCapabilities) -> bool: """Check if the client supports a specific capability.""" if self._client_params is None: return False # Get client capabilities from initialization params client_caps = self._client_params.capabilities # Check each specified capability in the passed in capability object if capability.roots is not None: if client_caps.roots is None: return False if capability.roots.listChanged and not client_caps.roots.listChanged: return False if capability.sampling is not None: if client_caps.sampling is None: return False if capability.experimental is not None: if client_caps.experimental is None: return False # Check each experimental capability for exp_key, exp_value in capability.experimental.items(): if ( exp_key not in client_caps.experimental or client_caps.experimental[exp_key] != exp_value ): return False return True async def _received_request( self, responder: RequestResponder[types.ClientRequest, types.ServerResult] ): match responder.request.root: case types.InitializeRequest(params=params): self._initialization_state = InitializationState.Initializing self._client_params = params with responder: await responder.respond( types.ServerResult( types.InitializeResult( protocolVersion=types.LATEST_PROTOCOL_VERSION, capabilities=self._init_options.capabilities, serverInfo=types.Implementation( name=self._init_options.server_name, version=self._init_options.server_version, ), instructions=self._init_options.instructions, ) ) ) case _: if self._initialization_state != InitializationState.Initialized: raise RuntimeError( "Received request before initialization was complete" ) async def _received_notification( self, notification: types.ClientNotification ) -> None: # Need this to avoid ASYNC910 await anyio.lowlevel.checkpoint() match notification.root: case types.InitializedNotification(): self._initialization_state = InitializationState.Initialized case _: if self._initialization_state != InitializationState.Initialized: raise RuntimeError( "Received notification before initialization was complete" ) async def send_log_message( self, level: types.LoggingLevel, data: Any, logger: str | None = None ) -> None: """Send a log message notification.""" await self.send_notification( types.ServerNotification( types.LoggingMessageNotification( method="notifications/message", params=types.LoggingMessageNotificationParams( level=level, data=data, logger=logger, ), ) ) ) async def send_resource_updated(self, uri: AnyUrl) -> None: """Send a resource updated notification.""" await self.send_notification( types.ServerNotification( types.ResourceUpdatedNotification( method="notifications/resources/updated", params=types.ResourceUpdatedNotificationParams(uri=uri), ) ) ) async def create_message( self, messages: list[types.SamplingMessage], *, max_tokens: int, system_prompt: str | None = None, include_context: types.IncludeContext | None = None, temperature: float | None = None, stop_sequences: list[str] | None = None, metadata: dict[str, Any] | None = None, model_preferences: types.ModelPreferences | None = None, ) -> types.CreateMessageResult: """Send a sampling/create_message request.""" return await self.send_request( types.ServerRequest( types.CreateMessageRequest( method="sampling/createMessage", params=types.CreateMessageRequestParams( messages=messages, systemPrompt=system_prompt, includeContext=include_context, temperature=temperature, maxTokens=max_tokens, stopSequences=stop_sequences, metadata=metadata, modelPreferences=model_preferences, ), ) ), types.CreateMessageResult, ) async def list_roots(self) -> types.ListRootsResult: """Send a roots/list request.""" return await self.send_request( types.ServerRequest( types.ListRootsRequest( method="roots/list", ) ), types.ListRootsResult, ) async def send_ping(self) -> types.EmptyResult: """Send a ping request.""" return await self.send_request( types.ServerRequest( types.PingRequest( method="ping", ) ), types.EmptyResult, ) async def send_progress_notification( self, progress_token: str | int, progress: float, total: float | None = None ) -> None: """Send a progress notification.""" await self.send_notification( types.ServerNotification( types.ProgressNotification( method="notifications/progress", params=types.ProgressNotificationParams( progressToken=progress_token, progress=progress, total=total, ), ) ) ) async def send_resource_list_changed(self) -> None: """Send a resource list changed notification.""" await self.send_notification( types.ServerNotification( types.ResourceListChangedNotification( method="notifications/resources/list_changed", ) ) ) async def send_tool_list_changed(self) -> None: """Send a tool list changed notification.""" await self.send_notification( types.ServerNotification( types.ToolListChangedNotification( method="notifications/tools/list_changed", ) ) ) async def send_prompt_list_changed(self) -> None: """Send a prompt list changed notification.""" await self.send_notification( types.ServerNotification( types.PromptListChangedNotification( method="notifications/prompts/list_changed", ) ) ) ================================================ File: src/mcp/server/sse.py ================================================ """ SSE Server Transport Module This module implements a Server-Sent Events (SSE) transport layer for MCP servers. Example usage: ``` # Create an SSE transport at an endpoint sse = SseServerTransport("/messages/") # Create Starlette routes for SSE and message handling routes = [ Route("/sse", endpoint=handle_sse), Mount("/messages/", app=sse.handle_post_message), ] # Define handler functions async def handle_sse(request): async with sse.connect_sse( request.scope, request.receive, request._send ) as streams: await app.run( streams[0], streams[1], app.create_initialization_options() ) # Create and run Starlette app starlette_app = Starlette(routes=routes) uvicorn.run(starlette_app, host="0.0.0.0", port=port) ``` See SseServerTransport class documentation for more details. """ import logging from contextlib import asynccontextmanager from typing import Any from urllib.parse import quote from uuid import UUID, uuid4 import anyio from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream from pydantic import ValidationError from sse_starlette import EventSourceResponse from starlette.requests import Request from starlette.responses import Response from starlette.types import Receive, Scope, Send import mcp.types as types logger = logging.getLogger(__name__) class SseServerTransport: """ SSE server transport for MCP. This class provides _two_ ASGI applications, suitable to be used with a framework like Starlette and a server like Hypercorn: 1. connect_sse() is an ASGI application which receives incoming GET requests, and sets up a new SSE stream to send server messages to the client. 2. handle_post_message() is an ASGI application which receives incoming POST requests, which should contain client messages that link to a previously-established SSE session. """ _endpoint: str _read_stream_writers: dict[ UUID, MemoryObjectSendStream[types.JSONRPCMessage | Exception] ] def __init__(self, endpoint: str) -> None: """ Creates a new SSE server transport, which will direct the client to POST messages to the relative or absolute URL given. """ super().__init__() self._endpoint = endpoint self._read_stream_writers = {} logger.debug(f"SseServerTransport initialized with endpoint: {endpoint}") @asynccontextmanager async def connect_sse(self, scope: Scope, receive: Receive, send: Send): if scope["type"] != "http": logger.error("connect_sse received non-HTTP request") raise ValueError("connect_sse can only handle HTTP requests") logger.debug("Setting up SSE connection") read_stream: MemoryObjectReceiveStream[types.JSONRPCMessage | Exception] read_stream_writer: MemoryObjectSendStream[types.JSONRPCMessage | Exception] write_stream: MemoryObjectSendStream[types.JSONRPCMessage] write_stream_reader: MemoryObjectReceiveStream[types.JSONRPCMessage] read_stream_writer, read_stream = anyio.create_memory_object_stream(0) write_stream, write_stream_reader = anyio.create_memory_object_stream(0) session_id = uuid4() session_uri = f"{quote(self._endpoint)}?session_id={session_id.hex}" self._read_stream_writers[session_id] = read_stream_writer logger.debug(f"Created new session with ID: {session_id}") sse_stream_writer, sse_stream_reader = anyio.create_memory_object_stream[ dict[str, Any] ](0) async def sse_writer(): logger.debug("Starting SSE writer") async with sse_stream_writer, write_stream_reader: await sse_stream_writer.send({"event": "endpoint", "data": session_uri}) logger.debug(f"Sent endpoint event: {session_uri}") async for message in write_stream_reader: logger.debug(f"Sending message via SSE: {message}") await sse_stream_writer.send( { "event": "message", "data": message.model_dump_json( by_alias=True, exclude_none=True ), } ) async with anyio.create_task_group() as tg: response = EventSourceResponse( content=sse_stream_reader, data_sender_callable=sse_writer ) logger.debug("Starting SSE response task") tg.start_soon(response, scope, receive, send) logger.debug("Yielding read and write streams") yield (read_stream, write_stream) async def handle_post_message( self, scope: Scope, receive: Receive, send: Send ) -> None: logger.debug("Handling POST message") request = Request(scope, receive) session_id_param = request.query_params.get("session_id") if session_id_param is None: logger.warning("Received request without session_id") response = Response("session_id is required", status_code=400) return await response(scope, receive, send) try: session_id = UUID(hex=session_id_param) logger.debug(f"Parsed session ID: {session_id}") except ValueError: logger.warning(f"Received invalid session ID: {session_id_param}") response = Response("Invalid session ID", status_code=400) return await response(scope, receive, send) writer = self._read_stream_writers.get(session_id) if not writer: logger.warning(f"Could not find session for ID: {session_id}") response = Response("Could not find session", status_code=404) return await response(scope, receive, send) json = await request.json() logger.debug(f"Received JSON: {json}") try: message = types.JSONRPCMessage.model_validate(json) logger.debug(f"Validated client message: {message}") except ValidationError as err: logger.error(f"Failed to parse message: {err}") response = Response("Could not parse message", status_code=400) await response(scope, receive, send) await writer.send(err) return logger.debug(f"Sending message to writer: {message}") response = Response("Accepted", status_code=202) await response(scope, receive, send) await writer.send(message) ================================================ File: src/mcp/server/stdio.py ================================================ """ Stdio Server Transport Module This module provides functionality for creating an stdio-based transport layer that can be used to communicate with an MCP client through standard input/output streams. Example usage: ``` async def run_server(): async with stdio_server() as (read_stream, write_stream): # read_stream contains incoming JSONRPCMessages from stdin # write_stream allows sending JSONRPCMessages to stdout server = await create_my_server() await server.run(read_stream, write_stream, init_options) anyio.run(run_server) ``` """ import sys from contextlib import asynccontextmanager import anyio import anyio.lowlevel from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream import mcp.types as types @asynccontextmanager async def stdio_server( stdin: anyio.AsyncFile[str] | None = None, stdout: anyio.AsyncFile[str] | None = None, ): """ Server transport for stdio: this communicates with an MCP client by reading from the current process' stdin and writing to stdout. """ # Purposely not using context managers for these, as we don't want to close # standard process handles. if not stdin: stdin = anyio.wrap_file(sys.stdin) if not stdout: stdout = anyio.wrap_file(sys.stdout) read_stream: MemoryObjectReceiveStream[types.JSONRPCMessage | Exception] read_stream_writer: MemoryObjectSendStream[types.JSONRPCMessage | Exception] write_stream: MemoryObjectSendStream[types.JSONRPCMessage] write_stream_reader: MemoryObjectReceiveStream[types.JSONRPCMessage] read_stream_writer, read_stream = anyio.create_memory_object_stream(0) write_stream, write_stream_reader = anyio.create_memory_object_stream(0) async def stdin_reader(): try: async with read_stream_writer: async for line in stdin: try: message = types.JSONRPCMessage.model_validate_json(line) except Exception as exc: await read_stream_writer.send(exc) continue await read_stream_writer.send(message) except anyio.ClosedResourceError: await anyio.lowlevel.checkpoint() async def stdout_writer(): try: async with write_stream_reader: async for message in write_stream_reader: json = message.model_dump_json(by_alias=True, exclude_none=True) await stdout.write(json + "\n") await stdout.flush() except anyio.ClosedResourceError: await anyio.lowlevel.checkpoint() async with anyio.create_task_group() as tg: tg.start_soon(stdin_reader) tg.start_soon(stdout_writer) yield read_stream, write_stream ================================================ File: src/mcp/server/websocket.py ================================================ import logging from contextlib import asynccontextmanager import anyio from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream from starlette.types import Receive, Scope, Send from starlette.websockets import WebSocket import mcp.types as types logger = logging.getLogger(__name__) @asynccontextmanager async def websocket_server(scope: Scope, receive: Receive, send: Send): """ WebSocket server transport for MCP. This is an ASGI application, suitable to be used with a framework like Starlette and a server like Hypercorn. """ websocket = WebSocket(scope, receive, send) await websocket.accept(subprotocol="mcp") read_stream: MemoryObjectReceiveStream[types.JSONRPCMessage | Exception] read_stream_writer: MemoryObjectSendStream[types.JSONRPCMessage | Exception] write_stream: MemoryObjectSendStream[types.JSONRPCMessage] write_stream_reader: MemoryObjectReceiveStream[types.JSONRPCMessage] read_stream_writer, read_stream = anyio.create_memory_object_stream(0) write_stream, write_stream_reader = anyio.create_memory_object_stream(0) async def ws_reader(): try: async with read_stream_writer: async for message in websocket.iter_json(): try: client_message = types.JSONRPCMessage.model_validate(message) except Exception as exc: await read_stream_writer.send(exc) continue await read_stream_writer.send(client_message) except anyio.ClosedResourceError: await websocket.close() async def ws_writer(): try: async with write_stream_reader: async for message in write_stream_reader: obj = message.model_dump( by_alias=True, mode="json", exclude_none=True ) await websocket.send_json(obj) except anyio.ClosedResourceError: await websocket.close() async with anyio.create_task_group() as tg: tg.start_soon(ws_reader) tg.start_soon(ws_writer) yield (read_stream, write_stream) ================================================ File: src/mcp/server/fastmcp/__init__.py ================================================ """FastMCP - A more ergonomic interface for MCP servers.""" from importlib.metadata import version from .server import Context, FastMCP from .utilities.types import Image __version__ = version("mcp") __all__ = ["FastMCP", "Context", "Image"] ================================================ File: src/mcp/server/fastmcp/exceptions.py ================================================ """Custom exceptions for FastMCP.""" class FastMCPError(Exception): """Base error for FastMCP.""" class ValidationError(FastMCPError): """Error in validating parameters or return values.""" class ResourceError(FastMCPError): """Error in resource operations.""" class ToolError(FastMCPError): """Error in tool operations.""" class InvalidSignature(Exception): """Invalid signature for use with FastMCP.""" ================================================ File: src/mcp/server/fastmcp/server.py ================================================ """FastMCP - A more ergonomic interface for MCP servers.""" import inspect import json import re from collections.abc import AsyncIterator from contextlib import ( AbstractAsyncContextManager, asynccontextmanager, ) from itertools import chain from typing import Any, Callable, Generic, Literal, Sequence import anyio import pydantic_core import uvicorn from pydantic import BaseModel, Field from pydantic.networks import AnyUrl from pydantic_settings import BaseSettings, SettingsConfigDict from mcp.server.fastmcp.exceptions import ResourceError from mcp.server.fastmcp.prompts import Prompt, PromptManager from mcp.server.fastmcp.resources import FunctionResource, Resource, ResourceManager from mcp.server.fastmcp.tools import ToolManager from mcp.server.fastmcp.utilities.logging import configure_logging, get_logger from mcp.server.fastmcp.utilities.types import Image from mcp.server.lowlevel.helper_types import ReadResourceContents from mcp.server.lowlevel.server import ( LifespanResultT, ) from mcp.server.lowlevel.server import ( Server as MCPServer, ) from mcp.server.lowlevel.server import ( lifespan as default_lifespan, ) from mcp.server.sse import SseServerTransport from mcp.server.stdio import stdio_server from mcp.shared.context import RequestContext from mcp.types import ( AnyFunction, EmbeddedResource, GetPromptResult, ImageContent, TextContent, ) from mcp.types import ( Prompt as MCPPrompt, ) from mcp.types import ( PromptArgument as MCPPromptArgument, ) from mcp.types import ( Resource as MCPResource, ) from mcp.types import ( ResourceTemplate as MCPResourceTemplate, ) from mcp.types import ( Tool as MCPTool, ) logger = get_logger(__name__) class Settings(BaseSettings, Generic[LifespanResultT]): """FastMCP server settings. All settings can be configured via environment variables with the prefix FASTMCP_. For example, FASTMCP_DEBUG=true will set debug=True. """ model_config = SettingsConfigDict( env_prefix="FASTMCP_", env_file=".env", extra="ignore", ) # Server settings debug: bool = False log_level: Literal["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"] = "INFO" # HTTP settings host: str = "0.0.0.0" port: int = 8000 # resource settings warn_on_duplicate_resources: bool = True # tool settings warn_on_duplicate_tools: bool = True # prompt settings warn_on_duplicate_prompts: bool = True dependencies: list[str] = Field( default_factory=list, description="List of dependencies to install in the server environment", ) lifespan: ( Callable[["FastMCP"], AbstractAsyncContextManager[LifespanResultT]] | None ) = Field(None, description="Lifespan context manager") def lifespan_wrapper( app: "FastMCP", lifespan: Callable[["FastMCP"], AbstractAsyncContextManager[LifespanResultT]], ) -> Callable[[MCPServer], AbstractAsyncContextManager[object]]: @asynccontextmanager async def wrap(s: MCPServer) -> AsyncIterator[object]: async with lifespan(app) as context: yield context return wrap class FastMCP: def __init__( self, name: str | None = None, instructions: str | None = None, **settings: Any ): self.settings = Settings(**settings) self._mcp_server = MCPServer( name=name or "FastMCP", instructions=instructions, lifespan=lifespan_wrapper(self, self.settings.lifespan) if self.settings.lifespan else default_lifespan, ) self._tool_manager = ToolManager( warn_on_duplicate_tools=self.settings.warn_on_duplicate_tools ) self._resource_manager = ResourceManager( warn_on_duplicate_resources=self.settings.warn_on_duplicate_resources ) self._prompt_manager = PromptManager( warn_on_duplicate_prompts=self.settings.warn_on_duplicate_prompts ) self.dependencies = self.settings.dependencies # Set up MCP protocol handlers self._setup_handlers() # Configure logging configure_logging(self.settings.log_level) @property def name(self) -> str: return self._mcp_server.name @property def instructions(self) -> str | None: return self._mcp_server.instructions def run(self, transport: Literal["stdio", "sse"] = "stdio") -> None: """Run the FastMCP server. Note this is a synchronous function. Args: transport: Transport protocol to use ("stdio" or "sse") """ TRANSPORTS = Literal["stdio", "sse"] if transport not in TRANSPORTS.__args__: # type: ignore raise ValueError(f"Unknown transport: {transport}") if transport == "stdio": anyio.run(self.run_stdio_async) else: # transport == "sse" anyio.run(self.run_sse_async) def _setup_handlers(self) -> None: """Set up core MCP protocol handlers.""" self._mcp_server.list_tools()(self.list_tools) self._mcp_server.call_tool()(self.call_tool) self._mcp_server.list_resources()(self.list_resources) self._mcp_server.read_resource()(self.read_resource) self._mcp_server.list_prompts()(self.list_prompts) self._mcp_server.get_prompt()(self.get_prompt) self._mcp_server.list_resource_templates()(self.list_resource_templates) async def list_tools(self) -> list[MCPTool]: """List all available tools.""" tools = self._tool_manager.list_tools() return [ MCPTool( name=info.name, description=info.description, inputSchema=info.parameters, ) for info in tools ] def get_context(self) -> "Context": """ Returns a Context object. Note that the context will only be valid during a request; outside a request, most methods will error. """ try: request_context = self._mcp_server.request_context except LookupError: request_context = None return Context(request_context=request_context, fastmcp=self) async def call_tool( self, name: str, arguments: dict[str, Any] ) -> Sequence[TextContent | ImageContent | EmbeddedResource]: """Call a tool by name with arguments.""" context = self.get_context() result = await self._tool_manager.call_tool(name, arguments, context=context) converted_result = _convert_to_content(result) return converted_result async def list_resources(self) -> list[MCPResource]: """List all available resources.""" resources = self._resource_manager.list_resources() return [ MCPResource( uri=resource.uri, name=resource.name or "", description=resource.description, mimeType=resource.mime_type, ) for resource in resources ] async def list_resource_templates(self) -> list[MCPResourceTemplate]: templates = self._resource_manager.list_templates() return [ MCPResourceTemplate( uriTemplate=template.uri_template, name=template.name, description=template.description, ) for template in templates ] async def read_resource(self, uri: AnyUrl | str) -> ReadResourceContents: """Read a resource by URI.""" resource = await self._resource_manager.get_resource(uri) if not resource: raise ResourceError(f"Unknown resource: {uri}") try: content = await resource.read() return ReadResourceContents(content=content, mime_type=resource.mime_type) except Exception as e: logger.error(f"Error reading resource {uri}: {e}") raise ResourceError(str(e)) def add_tool( self, fn: AnyFunction, name: str | None = None, description: str | None = None, ) -> None: """Add a tool to the server. The tool function can optionally request a Context object by adding a parameter with the Context type annotation. See the @tool decorator for examples. Args: fn: The function to register as a tool name: Optional name for the tool (defaults to function name) description: Optional description of what the tool does """ self._tool_manager.add_tool(fn, name=name, description=description) def tool( self, name: str | None = None, description: str | None = None ) -> Callable[[AnyFunction], AnyFunction]: """Decorator to register a tool. Tools can optionally request a Context object by adding a parameter with the Context type annotation. The context provides access to MCP capabilities like logging, progress reporting, and resource access. Args: name: Optional name for the tool (defaults to function name) description: Optional description of what the tool does Example: @server.tool() def my_tool(x: int) -> str: return str(x) @server.tool() def tool_with_context(x: int, ctx: Context) -> str: ctx.info(f"Processing {x}") return str(x) @server.tool() async def async_tool(x: int, context: Context) -> str: await context.report_progress(50, 100) return str(x) """ # Check if user passed function directly instead of calling decorator if callable(name): raise TypeError( "The @tool decorator was used incorrectly. " "Did you forget to call it? Use @tool() instead of @tool" ) def decorator(fn: AnyFunction) -> AnyFunction: self.add_tool(fn, name=name, description=description) return fn return decorator def add_resource(self, resource: Resource) -> None: """Add a resource to the server. Args: resource: A Resource instance to add """ self._resource_manager.add_resource(resource) def resource( self, uri: str, *, name: str | None = None, description: str | None = None, mime_type: str | None = None, ) -> Callable[[AnyFunction], AnyFunction]: """Decorator to register a function as a resource. The function will be called when the resource is read to generate its content. The function can return: - str for text content - bytes for binary content - other types will be converted to JSON If the URI contains parameters (e.g. "resource://{param}") or the function has parameters, it will be registered as a template resource. Args: uri: URI for the resource (e.g. "resource://my-resource" or "resource://{param}") name: Optional name for the resource description: Optional description of the resource mime_type: Optional MIME type for the resource Example: @server.resource("resource://my-resource") def get_data() -> str: return "Hello, world!" @server.resource("resource://my-resource") async get_data() -> str: data = await fetch_data() return f"Hello, world! {data}" @server.resource("resource://{city}/weather") def get_weather(city: str) -> str: return f"Weather for {city}" @server.resource("resource://{city}/weather") async def get_weather(city: str) -> str: data = await fetch_weather(city) return f"Weather for {city}: {data}" """ # Check if user passed function directly instead of calling decorator if callable(uri): raise TypeError( "The @resource decorator was used incorrectly. " "Did you forget to call it? Use @resource('uri') instead of @resource" ) def decorator(fn: AnyFunction) -> AnyFunction: # Check if this should be a template has_uri_params = "{" in uri and "}" in uri has_func_params = bool(inspect.signature(fn).parameters) if has_uri_params or has_func_params: # Validate that URI params match function params uri_params = set(re.findall(r"{(\w+)}", uri)) func_params = set(inspect.signature(fn).parameters.keys()) if uri_params != func_params: raise ValueError( f"Mismatch between URI parameters {uri_params} " f"and function parameters {func_params}" ) # Register as template self._resource_manager.add_template( fn=fn, uri_template=uri, name=name, description=description, mime_type=mime_type or "text/plain", ) else: # Register as regular resource resource = FunctionResource( uri=AnyUrl(uri), name=name, description=description, mime_type=mime_type or "text/plain", fn=fn, ) self.add_resource(resource) return fn return decorator def add_prompt(self, prompt: Prompt) -> None: """Add a prompt to the server. Args: prompt: A Prompt instance to add """ self._prompt_manager.add_prompt(prompt) def prompt( self, name: str | None = None, description: str | None = None ) -> Callable[[AnyFunction], AnyFunction]: """Decorator to register a prompt. Args: name: Optional name for the prompt (defaults to function name) description: Optional description of what the prompt does Example: @server.prompt() def analyze_table(table_name: str) -> list[Message]: schema = read_table_schema(table_name) return [ { "role": "user", "content": f"Analyze this schema:\n{schema}" } ] @server.prompt() async def analyze_file(path: str) -> list[Message]: content = await read_file(path) return [ { "role": "user", "content": { "type": "resource", "resource": { "uri": f"file://{path}", "text": content } } } ] """ # Check if user passed function directly instead of calling decorator if callable(name): raise TypeError( "The @prompt decorator was used incorrectly. " "Did you forget to call it? Use @prompt() instead of @prompt" ) def decorator(func: AnyFunction) -> AnyFunction: prompt = Prompt.from_function(func, name=name, description=description) self.add_prompt(prompt) return func return decorator async def run_stdio_async(self) -> None: """Run the server using stdio transport.""" async with stdio_server() as (read_stream, write_stream): await self._mcp_server.run( read_stream, write_stream, self._mcp_server.create_initialization_options(), ) async def run_sse_async(self) -> None: """Run the server using SSE transport.""" from starlette.applications import Starlette from starlette.routing import Mount, Route sse = SseServerTransport("/messages/") async def handle_sse(request): async with sse.connect_sse( request.scope, request.receive, request._send ) as streams: await self._mcp_server.run( streams[0], streams[1], self._mcp_server.create_initialization_options(), ) starlette_app = Starlette( debug=self.settings.debug, routes=[ Route("/sse", endpoint=handle_sse), Mount("/messages/", app=sse.handle_post_message), ], ) config = uvicorn.Config( starlette_app, host=self.settings.host, port=self.settings.port, log_level=self.settings.log_level.lower(), ) server = uvicorn.Server(config) await server.serve() async def list_prompts(self) -> list[MCPPrompt]: """List all available prompts.""" prompts = self._prompt_manager.list_prompts() return [ MCPPrompt( name=prompt.name, description=prompt.description, arguments=[ MCPPromptArgument( name=arg.name, description=arg.description, required=arg.required, ) for arg in (prompt.arguments or []) ], ) for prompt in prompts ] async def get_prompt( self, name: str, arguments: dict[str, Any] | None = None ) -> GetPromptResult: """Get a prompt by name with arguments.""" try: messages = await self._prompt_manager.render_prompt(name, arguments) return GetPromptResult(messages=pydantic_core.to_jsonable_python(messages)) except Exception as e: logger.error(f"Error getting prompt {name}: {e}") raise ValueError(str(e)) def _convert_to_content( result: Any, ) -> Sequence[TextContent | ImageContent | EmbeddedResource]: """Convert a result to a sequence of content objects.""" if result is None: return [] if isinstance(result, (TextContent, ImageContent, EmbeddedResource)): return [result] if isinstance(result, Image): return [result.to_image_content()] if isinstance(result, (list, tuple)): return list(chain.from_iterable(_convert_to_content(item) for item in result)) if not isinstance(result, str): try: result = json.dumps(pydantic_core.to_jsonable_python(result)) except Exception: result = str(result) return [TextContent(type="text", text=result)] class Context(BaseModel): """Context object providing access to MCP capabilities. This provides a cleaner interface to MCP's RequestContext functionality. It gets injected into tool and resource functions that request it via type hints. To use context in a tool function, add a parameter with the Context type annotation: ```python @server.tool() def my_tool(x: int, ctx: Context) -> str: # Log messages to the client ctx.info(f"Processing {x}") ctx.debug("Debug info") ctx.warning("Warning message") ctx.error("Error message") # Report progress ctx.report_progress(50, 100) # Access resources data = ctx.read_resource("resource://data") # Get request info request_id = ctx.request_id client_id = ctx.client_id return str(x) ``` The context parameter name can be anything as long as it's annotated with Context. The context is optional - tools that don't need it can omit the parameter. """ _request_context: RequestContext | None _fastmcp: FastMCP | None def __init__( self, *, request_context: RequestContext | None = None, fastmcp: FastMCP | None = None, **kwargs: Any, ): super().__init__(**kwargs) self._request_context = request_context self._fastmcp = fastmcp @property def fastmcp(self) -> FastMCP: """Access to the FastMCP server.""" if self._fastmcp is None: raise ValueError("Context is not available outside of a request") return self._fastmcp @property def request_context(self) -> RequestContext: """Access to the underlying request context.""" if self._request_context is None: raise ValueError("Context is not available outside of a request") return self._request_context async def report_progress( self, progress: float, total: float | None = None ) -> None: """Report progress for the current operation. Args: progress: Current progress value e.g. 24 total: Optional total value e.g. 100 """ progress_token = ( self.request_context.meta.progressToken if self.request_context.meta else None ) if progress_token is None: return await self.request_context.session.send_progress_notification( progress_token=progress_token, progress=progress, total=total ) async def read_resource(self, uri: str | AnyUrl) -> ReadResourceContents: """Read a resource by URI. Args: uri: Resource URI to read Returns: The resource content as either text or bytes """ assert ( self._fastmcp is not None ), "Context is not available outside of a request" return await self._fastmcp.read_resource(uri) async def log( self, level: Literal["debug", "info", "warning", "error"], message: str, *, logger_name: str | None = None, ) -> None: """Send a log message to the client. Args: level: Log level (debug, info, warning, error) message: Log message logger_name: Optional logger name **extra: Additional structured data to include """ await self.request_context.session.send_log_message( level=level, data=message, logger=logger_name ) @property def client_id(self) -> str | None: """Get the client ID if available.""" return ( getattr(self.request_context.meta, "client_id", None) if self.request_context.meta else None ) @property def request_id(self) -> str: """Get the unique ID for this request.""" return str(self.request_context.request_id) @property def session(self): """Access to the underlying session for advanced usage.""" return self.request_context.session # Convenience methods for common log levels async def debug(self, message: str, **extra: Any) -> None: """Send a debug log message.""" await self.log("debug", message, **extra) async def info(self, message: str, **extra: Any) -> None: """Send an info log message.""" await self.log("info", message, **extra) async def warning(self, message: str, **extra: Any) -> None: """Send a warning log message.""" await self.log("warning", message, **extra) async def error(self, message: str, **extra: Any) -> None: """Send an error log message.""" await self.log("error", message, **extra) ================================================ File: src/mcp/server/fastmcp/prompts/__init__.py ================================================ from .base import Prompt from .manager import PromptManager __all__ = ["Prompt", "PromptManager"] ================================================ File: src/mcp/server/fastmcp/prompts/base.py ================================================ """Base classes for FastMCP prompts.""" import inspect import json from collections.abc import Callable from typing import Any, Awaitable, Literal, Sequence import pydantic_core from pydantic import BaseModel, Field, TypeAdapter, validate_call from mcp.types import EmbeddedResource, ImageContent, TextContent CONTENT_TYPES = TextContent | ImageContent | EmbeddedResource class Message(BaseModel): """Base class for all prompt messages.""" role: Literal["user", "assistant"] content: CONTENT_TYPES def __init__(self, content: str | CONTENT_TYPES, **kwargs): if isinstance(content, str): content = TextContent(type="text", text=content) super().__init__(content=content, **kwargs) class UserMessage(Message): """A message from the user.""" role: Literal["user", "assistant"] = "user" def __init__(self, content: str | CONTENT_TYPES, **kwargs): super().__init__(content=content, **kwargs) class AssistantMessage(Message): """A message from the assistant.""" role: Literal["user", "assistant"] = "assistant" def __init__(self, content: str | CONTENT_TYPES, **kwargs): super().__init__(content=content, **kwargs) message_validator = TypeAdapter(UserMessage | AssistantMessage) SyncPromptResult = ( str | Message | dict[str, Any] | Sequence[str | Message | dict[str, Any]] ) PromptResult = SyncPromptResult | Awaitable[SyncPromptResult] class PromptArgument(BaseModel): """An argument that can be passed to a prompt.""" name: str = Field(description="Name of the argument") description: str | None = Field( None, description="Description of what the argument does" ) required: bool = Field( default=False, description="Whether the argument is required" ) class Prompt(BaseModel): """A prompt template that can be rendered with parameters.""" name: str = Field(description="Name of the prompt") description: str | None = Field( None, description="Description of what the prompt does" ) arguments: list[PromptArgument] | None = Field( None, description="Arguments that can be passed to the prompt" ) fn: Callable = Field(exclude=True) @classmethod def from_function( cls, fn: Callable[..., PromptResult], name: str | None = None, description: str | None = None, ) -> "Prompt": """Create a Prompt from a function. The function can return: - A string (converted to a message) - A Message object - A dict (converted to a message) - A sequence of any of the above """ func_name = name or fn.__name__ if func_name == "<lambda>": raise ValueError("You must provide a name for lambda functions") # Get schema from TypeAdapter - will fail if function isn't properly typed parameters = TypeAdapter(fn).json_schema() # Convert parameters to PromptArguments arguments = [] if "properties" in parameters: for param_name, param in parameters["properties"].items(): required = param_name in parameters.get("required", []) arguments.append( PromptArgument( name=param_name, description=param.get("description"), required=required, ) ) # ensure the arguments are properly cast fn = validate_call(fn) return cls( name=func_name, description=description or fn.__doc__ or "", arguments=arguments, fn=fn, ) async def render(self, arguments: dict[str, Any] | None = None) -> list[Message]: """Render the prompt with arguments.""" # Validate required arguments if self.arguments: required = {arg.name for arg in self.arguments if arg.required} provided = set(arguments or {}) missing = required - provided if missing: raise ValueError(f"Missing required arguments: {missing}") try: # Call function and check if result is a coroutine result = self.fn(**(arguments or {})) if inspect.iscoroutine(result): result = await result # Validate messages if not isinstance(result, (list, tuple)): result = [result] # Convert result to messages messages = [] for msg in result: try: if isinstance(msg, Message): messages.append(msg) elif isinstance(msg, dict): msg = message_validator.validate_python(msg) messages.append(msg) elif isinstance(msg, str): messages.append( UserMessage(content=TextContent(type="text", text=msg)) ) else: msg = json.dumps(pydantic_core.to_jsonable_python(msg)) messages.append(Message(role="user", content=msg)) except Exception: raise ValueError( f"Could not convert prompt result to message: {msg}" ) return messages except Exception as e: raise ValueError(f"Error rendering prompt {self.name}: {e}") ================================================ File: src/mcp/server/fastmcp/prompts/manager.py ================================================ """Prompt management functionality.""" from typing import Any from mcp.server.fastmcp.prompts.base import Message, Prompt from mcp.server.fastmcp.utilities.logging import get_logger logger = get_logger(__name__) class PromptManager: """Manages FastMCP prompts.""" def __init__(self, warn_on_duplicate_prompts: bool = True): self._prompts: dict[str, Prompt] = {} self.warn_on_duplicate_prompts = warn_on_duplicate_prompts def get_prompt(self, name: str) -> Prompt | None: """Get prompt by name.""" return self._prompts.get(name) def list_prompts(self) -> list[Prompt]: """List all registered prompts.""" return list(self._prompts.values()) def add_prompt( self, prompt: Prompt, ) -> Prompt: """Add a prompt to the manager.""" # Check for duplicates existing = self._prompts.get(prompt.name) if existing: if self.warn_on_duplicate_prompts: logger.warning(f"Prompt already exists: {prompt.name}") return existing self._prompts[prompt.name] = prompt return prompt async def render_prompt( self, name: str, arguments: dict[str, Any] | None = None ) -> list[Message]: """Render a prompt by name with arguments.""" prompt = self.get_prompt(name) if not prompt: raise ValueError(f"Unknown prompt: {name}") return await prompt.render(arguments) ================================================ File: src/mcp/server/fastmcp/prompts/prompt_manager.py ================================================ """Prompt management functionality.""" from mcp.server.fastmcp.prompts.base import Prompt from mcp.server.fastmcp.utilities.logging import get_logger logger = get_logger(__name__) class PromptManager: """Manages FastMCP prompts.""" def __init__(self, warn_on_duplicate_prompts: bool = True): self._prompts: dict[str, Prompt] = {} self.warn_on_duplicate_prompts = warn_on_duplicate_prompts def add_prompt(self, prompt: Prompt) -> Prompt: """Add a prompt to the manager.""" logger.debug(f"Adding prompt: {prompt.name}") existing = self._prompts.get(prompt.name) if existing: if self.warn_on_duplicate_prompts: logger.warning(f"Prompt already exists: {prompt.name}") return existing self._prompts[prompt.name] = prompt return prompt def get_prompt(self, name: str) -> Prompt | None: """Get prompt by name.""" return self._prompts.get(name) def list_prompts(self) -> list[Prompt]: """List all registered prompts.""" return list(self._prompts.values()) ================================================ File: src/mcp/server/fastmcp/resources/__init__.py ================================================ from .base import Resource from .resource_manager import ResourceManager from .templates import ResourceTemplate from .types import ( BinaryResource, DirectoryResource, FileResource, FunctionResource, HttpResource, TextResource, ) __all__ = [ "Resource", "TextResource", "BinaryResource", "FunctionResource", "FileResource", "HttpResource", "DirectoryResource", "ResourceTemplate", "ResourceManager", ] ================================================ File: src/mcp/server/fastmcp/resources/base.py ================================================ """Base classes and interfaces for FastMCP resources.""" import abc from typing import Annotated from pydantic import ( AnyUrl, BaseModel, ConfigDict, Field, UrlConstraints, ValidationInfo, field_validator, ) class Resource(BaseModel, abc.ABC): """Base class for all resources.""" model_config = ConfigDict(validate_default=True) uri: Annotated[AnyUrl, UrlConstraints(host_required=False)] = Field( default=..., description="URI of the resource" ) name: str | None = Field(description="Name of the resource", default=None) description: str | None = Field( description="Description of the resource", default=None ) mime_type: str = Field( default="text/plain", description="MIME type of the resource content", pattern=r"^[a-zA-Z0-9]+/[a-zA-Z0-9\-+.]+$", ) @field_validator("name", mode="before") @classmethod def set_default_name(cls, name: str | None, info: ValidationInfo) -> str: """Set default name from URI if not provided.""" if name: return name if uri := info.data.get("uri"): return str(uri) raise ValueError("Either name or uri must be provided") @abc.abstractmethod async def read(self) -> str | bytes: """Read the resource content.""" pass ================================================ File: src/mcp/server/fastmcp/resources/resource_manager.py ================================================ """Resource manager functionality.""" from typing import Callable from pydantic import AnyUrl from mcp.server.fastmcp.resources.base import Resource from mcp.server.fastmcp.resources.templates import ResourceTemplate from mcp.server.fastmcp.utilities.logging import get_logger logger = get_logger(__name__) class ResourceManager: """Manages FastMCP resources.""" def __init__(self, warn_on_duplicate_resources: bool = True): self._resources: dict[str, Resource] = {} self._templates: dict[str, ResourceTemplate] = {} self.warn_on_duplicate_resources = warn_on_duplicate_resources def add_resource(self, resource: Resource) -> Resource: """Add a resource to the manager. Args: resource: A Resource instance to add Returns: The added resource. If a resource with the same URI already exists, returns the existing resource. """ logger.debug( "Adding resource", extra={ "uri": resource.uri, "type": type(resource).__name__, "resource_name": resource.name, }, ) existing = self._resources.get(str(resource.uri)) if existing: if self.warn_on_duplicate_resources: logger.warning(f"Resource already exists: {resource.uri}") return existing self._resources[str(resource.uri)] = resource return resource def add_template( self, fn: Callable, uri_template: str, name: str | None = None, description: str | None = None, mime_type: str | None = None, ) -> ResourceTemplate: """Add a template from a function.""" template = ResourceTemplate.from_function( fn, uri_template=uri_template, name=name, description=description, mime_type=mime_type, ) self._templates[template.uri_template] = template return template async def get_resource(self, uri: AnyUrl | str) -> Resource | None: """Get resource by URI, checking concrete resources first, then templates.""" uri_str = str(uri) logger.debug("Getting resource", extra={"uri": uri_str}) # First check concrete resources if resource := self._resources.get(uri_str): return resource # Then check templates for template in self._templates.values(): if params := template.matches(uri_str): try: return await template.create_resource(uri_str, params) except Exception as e: raise ValueError(f"Error creating resource from template: {e}") raise ValueError(f"Unknown resource: {uri}") def list_resources(self) -> list[Resource]: """List all registered resources.""" logger.debug("Listing resources", extra={"count": len(self._resources)}) return list(self._resources.values()) def list_templates(self) -> list[ResourceTemplate]: """List all registered templates.""" logger.debug("Listing templates", extra={"count": len(self._templates)}) return list(self._templates.values()) ================================================ File: src/mcp/server/fastmcp/resources/templates.py ================================================ """Resource template functionality.""" import inspect import re from typing import Any, Callable from pydantic import BaseModel, Field, TypeAdapter, validate_call from mcp.server.fastmcp.resources.types import FunctionResource, Resource class ResourceTemplate(BaseModel): """A template for dynamically creating resources.""" uri_template: str = Field( description="URI template with parameters (e.g. weather://{city}/current)" ) name: str = Field(description="Name of the resource") description: str | None = Field(description="Description of what the resource does") mime_type: str = Field( default="text/plain", description="MIME type of the resource content" ) fn: Callable = Field(exclude=True) parameters: dict = Field(description="JSON schema for function parameters") @classmethod def from_function( cls, fn: Callable, uri_template: str, name: str | None = None, description: str | None = None, mime_type: str | None = None, ) -> "ResourceTemplate": """Create a template from a function.""" func_name = name or fn.__name__ if func_name == "<lambda>": raise ValueError("You must provide a name for lambda functions") # Get schema from TypeAdapter - will fail if function isn't properly typed parameters = TypeAdapter(fn).json_schema() # ensure the arguments are properly cast fn = validate_call(fn) return cls( uri_template=uri_template, name=func_name, description=description or fn.__doc__ or "", mime_type=mime_type or "text/plain", fn=fn, parameters=parameters, ) def matches(self, uri: str) -> dict[str, Any] | None: """Check if URI matches template and extract parameters.""" # Convert template to regex pattern pattern = self.uri_template.replace("{", "(?P<").replace("}", ">[^/]+)") match = re.match(f"^{pattern}$", uri) if match: return match.groupdict() return None async def create_resource(self, uri: str, params: dict[str, Any]) -> Resource: """Create a resource from the template with the given parameters.""" try: # Call function and check if result is a coroutine result = self.fn(**params) if inspect.iscoroutine(result): result = await result return FunctionResource( uri=uri, # type: ignore name=self.name, description=self.description, mime_type=self.mime_type, fn=lambda: result, # Capture result in closure ) except Exception as e: raise ValueError(f"Error creating resource from template: {e}") ================================================ File: src/mcp/server/fastmcp/resources/types.py ================================================ """Concrete resource implementations.""" import inspect import json from collections.abc import Callable from pathlib import Path from typing import Any import anyio import anyio.to_thread import httpx import pydantic.json import pydantic_core from pydantic import Field, ValidationInfo from mcp.server.fastmcp.resources.base import Resource class TextResource(Resource): """A resource that reads from a string.""" text: str = Field(description="Text content of the resource") async def read(self) -> str: """Read the text content.""" return self.text class BinaryResource(Resource): """A resource that reads from bytes.""" data: bytes = Field(description="Binary content of the resource") async def read(self) -> bytes: """Read the binary content.""" return self.data class FunctionResource(Resource): """A resource that defers data loading by wrapping a function. The function is only called when the resource is read, allowing for lazy loading of potentially expensive data. This is particularly useful when listing resources, as the function won't be called until the resource is actually accessed. The function can return: - str for text content (default) - bytes for binary content - other types will be converted to JSON """ fn: Callable[[], Any] = Field(exclude=True) async def read(self) -> str | bytes: """Read the resource by calling the wrapped function.""" try: result = ( await self.fn() if inspect.iscoroutinefunction(self.fn) else self.fn() ) if isinstance(result, Resource): return await result.read() if isinstance(result, bytes): return result if isinstance(result, str): return result try: return json.dumps(pydantic_core.to_jsonable_python(result)) except (TypeError, pydantic_core.PydanticSerializationError): # If JSON serialization fails, try str() return str(result) except Exception as e: raise ValueError(f"Error reading resource {self.uri}: {e}") class FileResource(Resource): """A resource that reads from a file. Set is_binary=True to read file as binary data instead of text. """ path: Path = Field(description="Path to the file") is_binary: bool = Field( default=False, description="Whether to read the file as binary data", ) mime_type: str = Field( default="text/plain", description="MIME type of the resource content", ) @pydantic.field_validator("path") @classmethod def validate_absolute_path(cls, path: Path) -> Path: """Ensure path is absolute.""" if not path.is_absolute(): raise ValueError("Path must be absolute") return path @pydantic.field_validator("is_binary") @classmethod def set_binary_from_mime_type(cls, is_binary: bool, info: ValidationInfo) -> bool: """Set is_binary based on mime_type if not explicitly set.""" if is_binary: return True mime_type = info.data.get("mime_type", "text/plain") return not mime_type.startswith("text/") async def read(self) -> str | bytes: """Read the file content.""" try: if self.is_binary: return await anyio.to_thread.run_sync(self.path.read_bytes) return await anyio.to_thread.run_sync(self.path.read_text) except Exception as e: raise ValueError(f"Error reading file {self.path}: {e}") class HttpResource(Resource): """A resource that reads from an HTTP endpoint.""" url: str = Field(description="URL to fetch content from") mime_type: str = Field( default="application/json", description="MIME type of the resource content" ) async def read(self) -> str | bytes: """Read the HTTP content.""" async with httpx.AsyncClient() as client: response = await client.get(self.url) response.raise_for_status() return response.text class DirectoryResource(Resource): """A resource that lists files in a directory.""" path: Path = Field(description="Path to the directory") recursive: bool = Field( default=False, description="Whether to list files recursively" ) pattern: str | None = Field( default=None, description="Optional glob pattern to filter files" ) mime_type: str = Field( default="application/json", description="MIME type of the resource content" ) @pydantic.field_validator("path") @classmethod def validate_absolute_path(cls, path: Path) -> Path: """Ensure path is absolute.""" if not path.is_absolute(): raise ValueError("Path must be absolute") return path def list_files(self) -> list[Path]: """List files in the directory.""" if not self.path.exists(): raise FileNotFoundError(f"Directory not found: {self.path}") if not self.path.is_dir(): raise NotADirectoryError(f"Not a directory: {self.path}") try: if self.pattern: return ( list(self.path.glob(self.pattern)) if not self.recursive else list(self.path.rglob(self.pattern)) ) return ( list(self.path.glob("*")) if not self.recursive else list(self.path.rglob("*")) ) except Exception as e: raise ValueError(f"Error listing directory {self.path}: {e}") async def read(self) -> str: # Always returns JSON string """Read the directory listing.""" try: files = await anyio.to_thread.run_sync(self.list_files) file_list = [str(f.relative_to(self.path)) for f in files if f.is_file()] return json.dumps({"files": file_list}, indent=2) except Exception as e: raise ValueError(f"Error reading directory {self.path}: {e}") ================================================ File: src/mcp/server/fastmcp/tools/__init__.py ================================================ from .base import Tool from .tool_manager import ToolManager __all__ = ["Tool", "ToolManager"] ================================================ File: src/mcp/server/fastmcp/tools/base.py ================================================ import inspect from typing import TYPE_CHECKING, Any, Callable from pydantic import BaseModel, Field import mcp.server.fastmcp from mcp.server.fastmcp.exceptions import ToolError from mcp.server.fastmcp.utilities.func_metadata import FuncMetadata, func_metadata if TYPE_CHECKING: from mcp.server.fastmcp.server import Context class Tool(BaseModel): """Internal tool registration info.""" fn: Callable = Field(exclude=True) name: str = Field(description="Name of the tool") description: str = Field(description="Description of what the tool does") parameters: dict = Field(description="JSON schema for tool parameters") fn_metadata: FuncMetadata = Field( description="Metadata about the function including a pydantic model for tool" " arguments" ) is_async: bool = Field(description="Whether the tool is async") context_kwarg: str | None = Field( None, description="Name of the kwarg that should receive context" ) @classmethod def from_function( cls, fn: Callable, name: str | None = None, description: str | None = None, context_kwarg: str | None = None, ) -> "Tool": """Create a Tool from a function.""" func_name = name or fn.__name__ if func_name == "<lambda>": raise ValueError("You must provide a name for lambda functions") func_doc = description or fn.__doc__ or "" is_async = inspect.iscoroutinefunction(fn) # Find context parameter if it exists if context_kwarg is None: sig = inspect.signature(fn) for param_name, param in sig.parameters.items(): if param.annotation is mcp.server.fastmcp.Context: context_kwarg = param_name break func_arg_metadata = func_metadata( fn, skip_names=[context_kwarg] if context_kwarg is not None else [], ) parameters = func_arg_metadata.arg_model.model_json_schema() return cls( fn=fn, name=func_name, description=func_doc, parameters=parameters, fn_metadata=func_arg_metadata, is_async=is_async, context_kwarg=context_kwarg, ) async def run(self, arguments: dict, context: "Context | None" = None) -> Any: """Run the tool with arguments.""" try: return await self.fn_metadata.call_fn_with_arg_validation( self.fn, self.is_async, arguments, {self.context_kwarg: context} if self.context_kwarg is not None else None, ) except Exception as e: raise ToolError(f"Error executing tool {self.name}: {e}") from e ================================================ File: src/mcp/server/fastmcp/tools/tool_manager.py ================================================ from collections.abc import Callable from typing import TYPE_CHECKING, Any from mcp.server.fastmcp.exceptions import ToolError from mcp.server.fastmcp.tools.base import Tool from mcp.server.fastmcp.utilities.logging import get_logger if TYPE_CHECKING: from mcp.server.fastmcp.server import Context logger = get_logger(__name__) class ToolManager: """Manages FastMCP tools.""" def __init__(self, warn_on_duplicate_tools: bool = True): self._tools: dict[str, Tool] = {} self.warn_on_duplicate_tools = warn_on_duplicate_tools def get_tool(self, name: str) -> Tool | None: """Get tool by name.""" return self._tools.get(name) def list_tools(self) -> list[Tool]: """List all registered tools.""" return list(self._tools.values()) def add_tool( self, fn: Callable, name: str | None = None, description: str | None = None, ) -> Tool: """Add a tool to the server.""" tool = Tool.from_function(fn, name=name, description=description) existing = self._tools.get(tool.name) if existing: if self.warn_on_duplicate_tools: logger.warning(f"Tool already exists: {tool.name}") return existing self._tools[tool.name] = tool return tool async def call_tool( self, name: str, arguments: dict, context: "Context | None" = None ) -> Any: """Call a tool by name with arguments.""" tool = self.get_tool(name) if not tool: raise ToolError(f"Unknown tool: {name}") return await tool.run(arguments, context=context) ================================================ File: src/mcp/server/fastmcp/utilities/__init__.py ================================================ """FastMCP utility modules.""" ================================================ File: src/mcp/server/fastmcp/utilities/func_metadata.py ================================================ import inspect import json from collections.abc import Awaitable, Callable, Sequence from typing import ( Annotated, Any, ForwardRef, ) from pydantic import BaseModel, ConfigDict, Field, WithJsonSchema, create_model from pydantic._internal._typing_extra import eval_type_backport from pydantic.fields import FieldInfo from pydantic_core import PydanticUndefined from mcp.server.fastmcp.exceptions import InvalidSignature from mcp.server.fastmcp.utilities.logging import get_logger logger = get_logger(__name__) class ArgModelBase(BaseModel): """A model representing the arguments to a function.""" def model_dump_one_level(self) -> dict[str, Any]: """Return a dict of the model's fields, one level deep. That is, sub-models etc are not dumped - they are kept as pydantic models. """ kwargs: dict[str, Any] = {} for field_name in self.model_fields.keys(): kwargs[field_name] = getattr(self, field_name) return kwargs model_config = ConfigDict( arbitrary_types_allowed=True, ) class FuncMetadata(BaseModel): arg_model: Annotated[type[ArgModelBase], WithJsonSchema(None)] # We can add things in the future like # - Maybe some args are excluded from attempting to parse from JSON # - Maybe some args are special (like context) for dependency injection async def call_fn_with_arg_validation( self, fn: Callable[..., Any] | Awaitable[Any], fn_is_async: bool, arguments_to_validate: dict[str, Any], arguments_to_pass_directly: dict[str, Any] | None, ) -> Any: """Call the given function with arguments validated and injected. Arguments are first attempted to be parsed from JSON, then validated against the argument model, before being passed to the function. """ arguments_pre_parsed = self.pre_parse_json(arguments_to_validate) arguments_parsed_model = self.arg_model.model_validate(arguments_pre_parsed) arguments_parsed_dict = arguments_parsed_model.model_dump_one_level() arguments_parsed_dict |= arguments_to_pass_directly or {} if fn_is_async: if isinstance(fn, Awaitable): return await fn return await fn(**arguments_parsed_dict) if isinstance(fn, Callable): return fn(**arguments_parsed_dict) raise TypeError("fn must be either Callable or Awaitable") def pre_parse_json(self, data: dict[str, Any]) -> dict[str, Any]: """Pre-parse data from JSON. Return a dict with same keys as input but with values parsed from JSON if appropriate. This is to handle cases like `["a", "b", "c"]` being passed in as JSON inside a string rather than an actual list. Claude desktop is prone to this - in fact it seems incapable of NOT doing this. For sub-models, it tends to pass dicts (JSON objects) as JSON strings, which can be pre-parsed here. """ new_data = data.copy() # Shallow copy for field_name, field_info in self.arg_model.model_fields.items(): if field_name not in data.keys(): continue if isinstance(data[field_name], str): try: pre_parsed = json.loads(data[field_name]) except json.JSONDecodeError: continue # Not JSON - skip if isinstance(pre_parsed, str): # This is likely that the raw value is e.g. `"hello"` which we # Should really be parsed as '"hello"' in Python - but if we parse # it as JSON it'll turn into just 'hello'. So we skip it. continue new_data[field_name] = pre_parsed assert new_data.keys() == data.keys() return new_data model_config = ConfigDict( arbitrary_types_allowed=True, ) def func_metadata(func: Callable, skip_names: Sequence[str] = ()) -> FuncMetadata: """Given a function, return metadata including a pydantic model representing its signature. The use case for this is ``` meta = func_to_pyd(func) validated_args = meta.arg_model.model_validate(some_raw_data_dict) return func(**validated_args.model_dump_one_level()) ``` **critically** it also provides pre-parse helper to attempt to parse things from JSON. Args: func: The function to convert to a pydantic model skip_names: A list of parameter names to skip. These will not be included in the model. Returns: A pydantic model representing the function's signature. """ sig = _get_typed_signature(func) params = sig.parameters dynamic_pydantic_model_params: dict[str, Any] = {} globalns = getattr(func, "__globals__", {}) for param in params.values(): if param.name.startswith("_"): raise InvalidSignature( f"Parameter {param.name} of {func.__name__} cannot start with '_'" ) if param.name in skip_names: continue annotation = param.annotation # `x: None` / `x: None = None` if annotation is None: annotation = Annotated[ None, Field( default=param.default if param.default is not inspect.Parameter.empty else PydanticUndefined ), ] # Untyped field if annotation is inspect.Parameter.empty: annotation = Annotated[ Any, Field(), # 🤷 WithJsonSchema({"title": param.name, "type": "string"}), ] field_info = FieldInfo.from_annotated_attribute( _get_typed_annotation(annotation, globalns), param.default if param.default is not inspect.Parameter.empty else PydanticUndefined, ) dynamic_pydantic_model_params[param.name] = (field_info.annotation, field_info) continue arguments_model = create_model( f"{func.__name__}Arguments", **dynamic_pydantic_model_params, __base__=ArgModelBase, ) resp = FuncMetadata(arg_model=arguments_model) return resp def _get_typed_annotation(annotation: Any, globalns: dict[str, Any]) -> Any: def try_eval_type(value, globalns, localns): try: return eval_type_backport(value, globalns, localns), True except NameError: return value, False if isinstance(annotation, str): annotation = ForwardRef(annotation) annotation, status = try_eval_type(annotation, globalns, globalns) # This check and raise could perhaps be skipped, and we (FastMCP) just call # model_rebuild right before using it 🤷 if status is False: raise InvalidSignature(f"Unable to evaluate type annotation {annotation}") return annotation def _get_typed_signature(call: Callable[..., Any]) -> inspect.Signature: """Get function signature while evaluating forward references""" signature = inspect.signature(call) globalns = getattr(call, "__globals__", {}) typed_params = [ inspect.Parameter( name=param.name, kind=param.kind, default=param.default, annotation=_get_typed_annotation(param.annotation, globalns), ) for param in signature.parameters.values() ] typed_signature = inspect.Signature(typed_params) return typed_signature ================================================ File: src/mcp/server/fastmcp/utilities/logging.py ================================================ """Logging utilities for FastMCP.""" import logging from typing import Literal def get_logger(name: str) -> logging.Logger: """Get a logger nested under MCPnamespace. Args: name: the name of the logger, which will be prefixed with 'FastMCP.' Returns: a configured logger instance """ return logging.getLogger(name) def configure_logging( level: Literal["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"] = "INFO", ) -> None: """Configure logging for MCP. Args: level: the log level to use """ handlers = [] try: from rich.console import Console from rich.logging import RichHandler handlers.append(RichHandler(console=Console(stderr=True), rich_tracebacks=True)) except ImportError: pass if not handlers: handlers.append(logging.StreamHandler()) logging.basicConfig( level=level, format="%(message)s", handlers=handlers, ) ================================================ File: src/mcp/server/fastmcp/utilities/types.py ================================================ """Common types used across FastMCP.""" import base64 from pathlib import Path from mcp.types import ImageContent class Image: """Helper class for returning images from tools.""" def __init__( self, path: str | Path | None = None, data: bytes | None = None, format: str | None = None, ): if path is None and data is None: raise ValueError("Either path or data must be provided") if path is not None and data is not None: raise ValueError("Only one of path or data can be provided") self.path = Path(path) if path else None self.data = data self._format = format self._mime_type = self._get_mime_type() def _get_mime_type(self) -> str: """Get MIME type from format or guess from file extension.""" if self._format: return f"image/{self._format.lower()}" if self.path: suffix = self.path.suffix.lower() return { ".png": "image/png", ".jpg": "image/jpeg", ".jpeg": "image/jpeg", ".gif": "image/gif", ".webp": "image/webp", }.get(suffix, "application/octet-stream") return "image/png" # default for raw binary data def to_image_content(self) -> ImageContent: """Convert to MCP ImageContent.""" if self.path: with open(self.path, "rb") as f: data = base64.b64encode(f.read()).decode() elif self.data is not None: data = base64.b64encode(self.data).decode() else: raise ValueError("No image data available") return ImageContent(type="image", data=data, mimeType=self._mime_type) ================================================ File: src/mcp/server/lowlevel/__init__.py ================================================ from .server import NotificationOptions, Server __all__ = ["Server", "NotificationOptions"] ================================================ File: src/mcp/server/lowlevel/helper_types.py ================================================ from dataclasses import dataclass @dataclass class ReadResourceContents: """Contents returned from a read_resource call.""" content: str | bytes mime_type: str | None = None ================================================ File: src/mcp/server/lowlevel/server.py ================================================ """ MCP Server Module This module provides a framework for creating an MCP (Model Context Protocol) server. It allows you to easily define and handle various types of requests and notifications in an asynchronous manner. Usage: 1. Create a Server instance: server = Server("your_server_name") 2. Define request handlers using decorators: @server.list_prompts() async def handle_list_prompts() -> list[types.Prompt]: # Implementation @server.get_prompt() async def handle_get_prompt( name: str, arguments: dict[str, str] | None ) -> types.GetPromptResult: # Implementation @server.list_tools() async def handle_list_tools() -> list[types.Tool]: # Implementation @server.call_tool() async def handle_call_tool( name: str, arguments: dict | None ) -> list[types.TextContent | types.ImageContent | types.EmbeddedResource]: # Implementation @server.list_resource_templates() async def handle_list_resource_templates() -> list[types.ResourceTemplate]: # Implementation 3. Define notification handlers if needed: @server.progress_notification() async def handle_progress( progress_token: str | int, progress: float, total: float | None ) -> None: # Implementation 4. Run the server: async def main(): async with mcp.server.stdio.stdio_server() as (read_stream, write_stream): await server.run( read_stream, write_stream, InitializationOptions( server_name="your_server_name", server_version="your_version", capabilities=server.get_capabilities( notification_options=NotificationOptions(), experimental_capabilities={}, ), ), ) asyncio.run(main()) The Server class provides methods to register handlers for various MCP requests and notifications. It automatically manages the request context and handles incoming messages from the client. """ import contextvars import logging import warnings from collections.abc import Awaitable, Callable from contextlib import AbstractAsyncContextManager, AsyncExitStack, asynccontextmanager from typing import Any, AsyncIterator, Generic, Sequence, TypeVar import anyio from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream from pydantic import AnyUrl import mcp.types as types from mcp.server.lowlevel.helper_types import ReadResourceContents from mcp.server.models import InitializationOptions from mcp.server.session import ServerSession from mcp.server.stdio import stdio_server as stdio_server from mcp.shared.context import RequestContext from mcp.shared.exceptions import McpError from mcp.shared.session import RequestResponder logger = logging.getLogger(__name__) LifespanResultT = TypeVar("LifespanResultT") # This will be properly typed in each Server instance's context request_ctx: contextvars.ContextVar[RequestContext[ServerSession, Any]] = ( contextvars.ContextVar("request_ctx") ) class NotificationOptions: def __init__( self, prompts_changed: bool = False, resources_changed: bool = False, tools_changed: bool = False, ): self.prompts_changed = prompts_changed self.resources_changed = resources_changed self.tools_changed = tools_changed @asynccontextmanager async def lifespan(server: "Server") -> AsyncIterator[object]: """Default lifespan context manager that does nothing. Args: server: The server instance this lifespan is managing Returns: An empty context object """ yield {} class Server(Generic[LifespanResultT]): def __init__( self, name: str, version: str | None = None, instructions: str | None = None, lifespan: Callable[ ["Server"], AbstractAsyncContextManager[LifespanResultT] ] = lifespan, ): self.name = name self.version = version self.instructions = instructions self.lifespan = lifespan self.request_handlers: dict[ type, Callable[..., Awaitable[types.ServerResult]] ] = { types.PingRequest: _ping_handler, } self.notification_handlers: dict[type, Callable[..., Awaitable[None]]] = {} self.notification_options = NotificationOptions() logger.debug(f"Initializing server '{name}'") def create_initialization_options( self, notification_options: NotificationOptions | None = None, experimental_capabilities: dict[str, dict[str, Any]] | None = None, ) -> InitializationOptions: """Create initialization options from this server instance.""" def pkg_version(package: str) -> str: try: from importlib.metadata import version v = version(package) if v is not None: return v except Exception: pass return "unknown" return InitializationOptions( server_name=self.name, server_version=self.version if self.version else pkg_version("mcp"), capabilities=self.get_capabilities( notification_options or NotificationOptions(), experimental_capabilities or {}, ), instructions=self.instructions, ) def get_capabilities( self, notification_options: NotificationOptions, experimental_capabilities: dict[str, dict[str, Any]], ) -> types.ServerCapabilities: """Convert existing handlers to a ServerCapabilities object.""" prompts_capability = None resources_capability = None tools_capability = None logging_capability = None # Set prompt capabilities if handler exists if types.ListPromptsRequest in self.request_handlers: prompts_capability = types.PromptsCapability( listChanged=notification_options.prompts_changed ) # Set resource capabilities if handler exists if types.ListResourcesRequest in self.request_handlers: resources_capability = types.ResourcesCapability( subscribe=False, listChanged=notification_options.resources_changed ) # Set tool capabilities if handler exists if types.ListToolsRequest in self.request_handlers: tools_capability = types.ToolsCapability( listChanged=notification_options.tools_changed ) # Set logging capabilities if handler exists if types.SetLevelRequest in self.request_handlers: logging_capability = types.LoggingCapability() return types.ServerCapabilities( prompts=prompts_capability, resources=resources_capability, tools=tools_capability, logging=logging_capability, experimental=experimental_capabilities, ) @property def request_context(self) -> RequestContext[ServerSession, LifespanResultT]: """If called outside of a request context, this will raise a LookupError.""" return request_ctx.get() def list_prompts(self): def decorator(func: Callable[[], Awaitable[list[types.Prompt]]]): logger.debug("Registering handler for PromptListRequest") async def handler(_: Any): prompts = await func() return types.ServerResult(types.ListPromptsResult(prompts=prompts)) self.request_handlers[types.ListPromptsRequest] = handler return func return decorator def get_prompt(self): def decorator( func: Callable[ [str, dict[str, str] | None], Awaitable[types.GetPromptResult] ], ): logger.debug("Registering handler for GetPromptRequest") async def handler(req: types.GetPromptRequest): prompt_get = await func(req.params.name, req.params.arguments) return types.ServerResult(prompt_get) self.request_handlers[types.GetPromptRequest] = handler return func return decorator def list_resources(self): def decorator(func: Callable[[], Awaitable[list[types.Resource]]]): logger.debug("Registering handler for ListResourcesRequest") async def handler(_: Any): resources = await func() return types.ServerResult( types.ListResourcesResult(resources=resources) ) self.request_handlers[types.ListResourcesRequest] = handler return func return decorator def list_resource_templates(self): def decorator(func: Callable[[], Awaitable[list[types.ResourceTemplate]]]): logger.debug("Registering handler for ListResourceTemplatesRequest") async def handler(_: Any): templates = await func() return types.ServerResult( types.ListResourceTemplatesResult(resourceTemplates=templates) ) self.request_handlers[types.ListResourceTemplatesRequest] = handler return func return decorator def read_resource(self): def decorator( func: Callable[[AnyUrl], Awaitable[str | bytes | ReadResourceContents]], ): logger.debug("Registering handler for ReadResourceRequest") async def handler(req: types.ReadResourceRequest): result = await func(req.params.uri) def create_content(data: str | bytes, mime_type: str | None): match data: case str() as data: return types.TextResourceContents( uri=req.params.uri, text=data, mimeType=mime_type or "text/plain", ) case bytes() as data: import base64 return types.BlobResourceContents( uri=req.params.uri, blob=base64.urlsafe_b64encode(data).decode(), mimeType=mime_type or "application/octet-stream", ) match result: case str() | bytes() as data: warnings.warn( "Returning str or bytes from read_resource is deprecated. " "Use ReadResourceContents instead.", DeprecationWarning, stacklevel=2, ) content = create_content(data, None) case ReadResourceContents() as contents: content = create_content(contents.content, contents.mime_type) case _: raise ValueError( f"Unexpected return type from read_resource: {type(result)}" ) return types.ServerResult( types.ReadResourceResult( contents=[content], ) ) self.request_handlers[types.ReadResourceRequest] = handler return func return decorator def set_logging_level(self): def decorator(func: Callable[[types.LoggingLevel], Awaitable[None]]): logger.debug("Registering handler for SetLevelRequest") async def handler(req: types.SetLevelRequest): await func(req.params.level) return types.ServerResult(types.EmptyResult()) self.request_handlers[types.SetLevelRequest] = handler return func return decorator def subscribe_resource(self): def decorator(func: Callable[[AnyUrl], Awaitable[None]]): logger.debug("Registering handler for SubscribeRequest") async def handler(req: types.SubscribeRequest): await func(req.params.uri) return types.ServerResult(types.EmptyResult()) self.request_handlers[types.SubscribeRequest] = handler return func return decorator def unsubscribe_resource(self): def decorator(func: Callable[[AnyUrl], Awaitable[None]]): logger.debug("Registering handler for UnsubscribeRequest") async def handler(req: types.UnsubscribeRequest): await func(req.params.uri) return types.ServerResult(types.EmptyResult()) self.request_handlers[types.UnsubscribeRequest] = handler return func return decorator def list_tools(self): def decorator(func: Callable[[], Awaitable[list[types.Tool]]]): logger.debug("Registering handler for ListToolsRequest") async def handler(_: Any): tools = await func() return types.ServerResult(types.ListToolsResult(tools=tools)) self.request_handlers[types.ListToolsRequest] = handler return func return decorator def call_tool(self): def decorator( func: Callable[ ..., Awaitable[ Sequence[ types.TextContent | types.ImageContent | types.EmbeddedResource ] ], ], ): logger.debug("Registering handler for CallToolRequest") async def handler(req: types.CallToolRequest): try: results = await func(req.params.name, (req.params.arguments or {})) return types.ServerResult( types.CallToolResult(content=list(results), isError=False) ) except Exception as e: return types.ServerResult( types.CallToolResult( content=[types.TextContent(type="text", text=str(e))], isError=True, ) ) self.request_handlers[types.CallToolRequest] = handler return func return decorator def progress_notification(self): def decorator( func: Callable[[str | int, float, float | None], Awaitable[None]], ): logger.debug("Registering handler for ProgressNotification") async def handler(req: types.ProgressNotification): await func( req.params.progressToken, req.params.progress, req.params.total ) self.notification_handlers[types.ProgressNotification] = handler return func return decorator def completion(self): """Provides completions for prompts and resource templates""" def decorator( func: Callable[ [ types.PromptReference | types.ResourceReference, types.CompletionArgument, ], Awaitable[types.Completion | None], ], ): logger.debug("Registering handler for CompleteRequest") async def handler(req: types.CompleteRequest): completion = await func(req.params.ref, req.params.argument) return types.ServerResult( types.CompleteResult( completion=completion if completion is not None else types.Completion(values=[], total=None, hasMore=None), ) ) self.request_handlers[types.CompleteRequest] = handler return func return decorator async def run( self, read_stream: MemoryObjectReceiveStream[types.JSONRPCMessage | Exception], write_stream: MemoryObjectSendStream[types.JSONRPCMessage], initialization_options: InitializationOptions, # When False, exceptions are returned as messages to the client. # When True, exceptions are raised, which will cause the server to shut down # but also make tracing exceptions much easier during testing and when using # in-process servers. raise_exceptions: bool = False, ): async with AsyncExitStack() as stack: lifespan_context = await stack.enter_async_context(self.lifespan(self)) session = await stack.enter_async_context( ServerSession(read_stream, write_stream, initialization_options) ) async with anyio.create_task_group() as tg: async for message in session.incoming_messages: logger.debug(f"Received message: {message}") tg.start_soon( self._handle_message, message, session, lifespan_context, raise_exceptions, ) async def _handle_message( self, message: RequestResponder[types.ClientRequest, types.ServerResult] | types.ClientNotification | Exception, session: ServerSession, lifespan_context: LifespanResultT, raise_exceptions: bool = False, ): with warnings.catch_warnings(record=True) as w: match message: case ( RequestResponder(request=types.ClientRequest(root=req)) as responder ): with responder: await self._handle_request( message, req, session, lifespan_context, raise_exceptions ) case types.ClientNotification(root=notify): await self._handle_notification(notify) for warning in w: logger.info(f"Warning: {warning.category.__name__}: {warning.message}") async def _handle_request( self, message: RequestResponder, req: Any, session: ServerSession, lifespan_context: LifespanResultT, raise_exceptions: bool, ): logger.info(f"Processing request of type {type(req).__name__}") if type(req) in self.request_handlers: handler = self.request_handlers[type(req)] logger.debug(f"Dispatching request of type {type(req).__name__}") token = None try: # Set our global state that can be retrieved via # app.get_request_context() token = request_ctx.set( RequestContext( message.request_id, message.request_meta, session, lifespan_context, ) ) response = await handler(req) except McpError as err: response = err.error except Exception as err: if raise_exceptions: raise err response = types.ErrorData(code=0, message=str(err), data=None) finally: # Reset the global state after we are done if token is not None: request_ctx.reset(token) await message.respond(response) else: await message.respond( types.ErrorData( code=types.METHOD_NOT_FOUND, message="Method not found", ) ) logger.debug("Response sent") async def _handle_notification(self, notify: Any): if type(notify) in self.notification_handlers: assert type(notify) in self.notification_handlers handler = self.notification_handlers[type(notify)] logger.debug( f"Dispatching notification of type " f"{type(notify).__name__}" ) try: await handler(notify) except Exception as err: logger.error(f"Uncaught exception in notification handler: " f"{err}") async def _ping_handler(request: types.PingRequest) -> types.ServerResult: return types.ServerResult(types.EmptyResult()) ================================================ File: src/mcp/shared/context.py ================================================ from dataclasses import dataclass from typing import Generic, TypeVar from mcp.shared.session import BaseSession from mcp.types import RequestId, RequestParams SessionT = TypeVar("SessionT", bound=BaseSession) LifespanContextT = TypeVar("LifespanContextT") @dataclass class RequestContext(Generic[SessionT, LifespanContextT]): request_id: RequestId meta: RequestParams.Meta | None session: SessionT lifespan_context: LifespanContextT ================================================ File: src/mcp/shared/exceptions.py ================================================ from mcp.types import ErrorData class McpError(Exception): """ Exception type raised when an error arrives over an MCP connection. """ error: ErrorData def __init__(self, error: ErrorData): """Initialize McpError.""" super().__init__(error.message) self.error = error ================================================ File: src/mcp/shared/memory.py ================================================ """ In-memory transports """ from contextlib import asynccontextmanager from datetime import timedelta from typing import AsyncGenerator import anyio from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream from mcp.client.session import ClientSession from mcp.server import Server from mcp.types import JSONRPCMessage MessageStream = tuple[ MemoryObjectReceiveStream[JSONRPCMessage | Exception], MemoryObjectSendStream[JSONRPCMessage], ] @asynccontextmanager async def create_client_server_memory_streams() -> ( AsyncGenerator[tuple[MessageStream, MessageStream], None] ): """ Creates a pair of bidirectional memory streams for client-server communication. Returns: A tuple of (client_streams, server_streams) where each is a tuple of (read_stream, write_stream) """ # Create streams for both directions server_to_client_send, server_to_client_receive = anyio.create_memory_object_stream[ JSONRPCMessage | Exception ](1) client_to_server_send, client_to_server_receive = anyio.create_memory_object_stream[ JSONRPCMessage | Exception ](1) client_streams = (server_to_client_receive, client_to_server_send) server_streams = (client_to_server_receive, server_to_client_send) async with ( server_to_client_receive, client_to_server_send, client_to_server_receive, server_to_client_send, ): yield client_streams, server_streams @asynccontextmanager async def create_connected_server_and_client_session( server: Server, read_timeout_seconds: timedelta | None = None, raise_exceptions: bool = False, ) -> AsyncGenerator[ClientSession, None]: """Creates a ClientSession that is connected to a running MCP server.""" async with create_client_server_memory_streams() as ( client_streams, server_streams, ): client_read, client_write = client_streams server_read, server_write = server_streams # Create a cancel scope for the server task async with anyio.create_task_group() as tg: tg.start_soon( lambda: server.run( server_read, server_write, server.create_initialization_options(), raise_exceptions=raise_exceptions, ) ) try: async with ClientSession( read_stream=client_read, write_stream=client_write, read_timeout_seconds=read_timeout_seconds, ) as client_session: await client_session.initialize() yield client_session finally: tg.cancel_scope.cancel() ================================================ File: src/mcp/shared/progress.py ================================================ from contextlib import contextmanager from dataclasses import dataclass, field from pydantic import BaseModel from mcp.shared.context import RequestContext from mcp.shared.session import BaseSession from mcp.types import ProgressToken class Progress(BaseModel): progress: float total: float | None @dataclass class ProgressContext: session: BaseSession progress_token: ProgressToken total: float | None current: float = field(default=0.0, init=False) async def progress(self, amount: float) -> None: self.current += amount await self.session.send_progress_notification( self.progress_token, self.current, total=self.total ) @contextmanager def progress(ctx: RequestContext, total: float | None = None): if ctx.meta is None or ctx.meta.progressToken is None: raise ValueError("No progress token provided") progress_ctx = ProgressContext(ctx.session, ctx.meta.progressToken, total) try: yield progress_ctx finally: pass ================================================ File: src/mcp/shared/session.py ================================================ import logging from contextlib import AbstractAsyncContextManager from datetime import timedelta from typing import Any, Callable, Generic, TypeVar import anyio import anyio.lowlevel import httpx from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream from pydantic import BaseModel from mcp.shared.exceptions import McpError from mcp.types import ( CancelledNotification, ClientNotification, ClientRequest, ClientResult, ErrorData, JSONRPCError, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, JSONRPCResponse, RequestParams, ServerNotification, ServerRequest, ServerResult, ) SendRequestT = TypeVar("SendRequestT", ClientRequest, ServerRequest) SendResultT = TypeVar("SendResultT", ClientResult, ServerResult) SendNotificationT = TypeVar("SendNotificationT", ClientNotification, ServerNotification) ReceiveRequestT = TypeVar("ReceiveRequestT", ClientRequest, ServerRequest) ReceiveResultT = TypeVar("ReceiveResultT", bound=BaseModel) ReceiveNotificationT = TypeVar( "ReceiveNotificationT", ClientNotification, ServerNotification ) RequestId = str | int class RequestResponder(Generic[ReceiveRequestT, SendResultT]): """Handles responding to MCP requests and manages request lifecycle. This class MUST be used as a context manager to ensure proper cleanup and cancellation handling: Example: with request_responder as resp: await resp.respond(result) The context manager ensures: 1. Proper cancellation scope setup and cleanup 2. Request completion tracking 3. Cleanup of in-flight requests """ def __init__( self, request_id: RequestId, request_meta: RequestParams.Meta | None, request: ReceiveRequestT, session: "BaseSession", on_complete: Callable[["RequestResponder[ReceiveRequestT, SendResultT]"], Any], ) -> None: self.request_id = request_id self.request_meta = request_meta self.request = request self._session = session self._completed = False self._cancel_scope = anyio.CancelScope() self._on_complete = on_complete self._entered = False # Track if we're in a context manager def __enter__(self) -> "RequestResponder[ReceiveRequestT, SendResultT]": """Enter the context manager, enabling request cancellation tracking.""" self._entered = True self._cancel_scope = anyio.CancelScope() self._cancel_scope.__enter__() return self def __exit__(self, exc_type, exc_val, exc_tb) -> None: """Exit the context manager, performing cleanup and notifying completion.""" try: if self._completed: self._on_complete(self) finally: self._entered = False if not self._cancel_scope: raise RuntimeError("No active cancel scope") self._cancel_scope.__exit__(exc_type, exc_val, exc_tb) async def respond(self, response: SendResultT | ErrorData) -> None: """Send a response for this request. Must be called within a context manager block. Raises: RuntimeError: If not used within a context manager AssertionError: If request was already responded to """ if not self._entered: raise RuntimeError("RequestResponder must be used as a context manager") assert not self._completed, "Request already responded to" if not self.cancelled: self._completed = True await self._session._send_response( request_id=self.request_id, response=response ) async def cancel(self) -> None: """Cancel this request and mark it as completed.""" if not self._entered: raise RuntimeError("RequestResponder must be used as a context manager") if not self._cancel_scope: raise RuntimeError("No active cancel scope") self._cancel_scope.cancel() self._completed = True # Mark as completed so it's removed from in_flight # Send an error response to indicate cancellation await self._session._send_response( request_id=self.request_id, response=ErrorData(code=0, message="Request cancelled", data=None), ) @property def in_flight(self) -> bool: return not self._completed and not self.cancelled @property def cancelled(self) -> bool: return self._cancel_scope is not None and self._cancel_scope.cancel_called class BaseSession( AbstractAsyncContextManager, Generic[ SendRequestT, SendNotificationT, SendResultT, ReceiveRequestT, ReceiveNotificationT, ], ): """ Implements an MCP "session" on top of read/write streams, including features like request/response linking, notifications, and progress. This class is an async context manager that automatically starts processing messages when entered. """ _response_streams: dict[ RequestId, MemoryObjectSendStream[JSONRPCResponse | JSONRPCError] ] _request_id: int _in_flight: dict[RequestId, RequestResponder[ReceiveRequestT, SendResultT]] def __init__( self, read_stream: MemoryObjectReceiveStream[JSONRPCMessage | Exception], write_stream: MemoryObjectSendStream[JSONRPCMessage], receive_request_type: type[ReceiveRequestT], receive_notification_type: type[ReceiveNotificationT], # If none, reading will never time out read_timeout_seconds: timedelta | None = None, ) -> None: self._read_stream = read_stream self._write_stream = write_stream self._response_streams = {} self._request_id = 0 self._receive_request_type = receive_request_type self._receive_notification_type = receive_notification_type self._read_timeout_seconds = read_timeout_seconds self._in_flight = {} self._incoming_message_stream_writer, self._incoming_message_stream_reader = ( anyio.create_memory_object_stream[ RequestResponder[ReceiveRequestT, SendResultT] | ReceiveNotificationT | Exception ]() ) async def __aenter__(self): self._task_group = anyio.create_task_group() await self._task_group.__aenter__() self._task_group.start_soon(self._receive_loop) return self async def __aexit__(self, exc_type, exc_val, exc_tb): # Using BaseSession as a context manager should not block on exit (this # would be very surprising behavior), so make sure to cancel the tasks # in the task group. self._task_group.cancel_scope.cancel() return await self._task_group.__aexit__(exc_type, exc_val, exc_tb) async def send_request( self, request: SendRequestT, result_type: type[ReceiveResultT], ) -> ReceiveResultT: """ Sends a request and wait for a response. Raises an McpError if the response contains an error. Do not use this method to emit notifications! Use send_notification() instead. """ request_id = self._request_id self._request_id = request_id + 1 response_stream, response_stream_reader = anyio.create_memory_object_stream[ JSONRPCResponse | JSONRPCError ](1) self._response_streams[request_id] = response_stream jsonrpc_request = JSONRPCRequest( jsonrpc="2.0", id=request_id, **request.model_dump(by_alias=True, mode="json", exclude_none=True), ) # TODO: Support progress callbacks await self._write_stream.send(JSONRPCMessage(jsonrpc_request)) try: with anyio.fail_after( None if self._read_timeout_seconds is None else self._read_timeout_seconds.total_seconds() ): response_or_error = await response_stream_reader.receive() except TimeoutError: raise McpError( ErrorData( code=httpx.codes.REQUEST_TIMEOUT, message=( f"Timed out while waiting for response to " f"{request.__class__.__name__}. Waited " f"{self._read_timeout_seconds} seconds." ), ) ) if isinstance(response_or_error, JSONRPCError): raise McpError(response_or_error.error) else: return result_type.model_validate(response_or_error.result) async def send_notification(self, notification: SendNotificationT) -> None: """ Emits a notification, which is a one-way message that does not expect a response. """ jsonrpc_notification = JSONRPCNotification( jsonrpc="2.0", **notification.model_dump(by_alias=True, mode="json", exclude_none=True), ) await self._write_stream.send(JSONRPCMessage(jsonrpc_notification)) async def _send_response( self, request_id: RequestId, response: SendResultT | ErrorData ) -> None: if isinstance(response, ErrorData): jsonrpc_error = JSONRPCError(jsonrpc="2.0", id=request_id, error=response) await self._write_stream.send(JSONRPCMessage(jsonrpc_error)) else: jsonrpc_response = JSONRPCResponse( jsonrpc="2.0", id=request_id, result=response.model_dump( by_alias=True, mode="json", exclude_none=True ), ) await self._write_stream.send(JSONRPCMessage(jsonrpc_response)) async def _receive_loop(self) -> None: async with ( self._read_stream, self._write_stream, self._incoming_message_stream_writer, ): async for message in self._read_stream: if isinstance(message, Exception): await self._incoming_message_stream_writer.send(message) elif isinstance(message.root, JSONRPCRequest): validated_request = self._receive_request_type.model_validate( message.root.model_dump( by_alias=True, mode="json", exclude_none=True ) ) responder = RequestResponder( request_id=message.root.id, request_meta=validated_request.root.params.meta if validated_request.root.params else None, request=validated_request, session=self, on_complete=lambda r: self._in_flight.pop(r.request_id, None), ) self._in_flight[responder.request_id] = responder await self._received_request(responder) if not responder._completed: await self._incoming_message_stream_writer.send(responder) elif isinstance(message.root, JSONRPCNotification): try: notification = self._receive_notification_type.model_validate( message.root.model_dump( by_alias=True, mode="json", exclude_none=True ) ) # Handle cancellation notifications if isinstance(notification.root, CancelledNotification): cancelled_id = notification.root.params.requestId if cancelled_id in self._in_flight: await self._in_flight[cancelled_id].cancel() else: await self._received_notification(notification) await self._incoming_message_stream_writer.send( notification ) except Exception as e: # For other validation errors, log and continue logging.warning( f"Failed to validate notification: {e}. " f"Message was: {message.root}" ) else: # Response or error stream = self._response_streams.pop(message.root.id, None) if stream: await stream.send(message.root) else: await self._incoming_message_stream_writer.send( RuntimeError( "Received response with an unknown " f"request ID: {message}" ) ) async def _received_request( self, responder: RequestResponder[ReceiveRequestT, SendResultT] ) -> None: """ Can be overridden by subclasses to handle a request without needing to listen on the message stream. If the request is responded to within this method, it will not be forwarded on to the message stream. """ async def _received_notification(self, notification: ReceiveNotificationT) -> None: """ Can be overridden by subclasses to handle a notification without needing to listen on the message stream. """ async def send_progress_notification( self, progress_token: str | int, progress: float, total: float | None = None ) -> None: """ Sends a progress notification for a request that is currently being processed. """ @property def incoming_messages( self, ) -> MemoryObjectReceiveStream[ RequestResponder[ReceiveRequestT, SendResultT] | ReceiveNotificationT | Exception ]: return self._incoming_message_stream_reader ================================================ File: src/mcp/shared/version.py ================================================ from mcp.types import LATEST_PROTOCOL_VERSION SUPPORTED_PROTOCOL_VERSIONS = [1, LATEST_PROTOCOL_VERSION] ================================================ File: tests/conftest.py ================================================ import pytest @pytest.fixture def anyio_backend(): return "asyncio" ================================================ File: tests/test_examples.py ================================================ """Tests for example servers""" import pytest from mcp.shared.memory import ( create_connected_server_and_client_session as client_session, ) from mcp.types import TextContent, TextResourceContents @pytest.mark.anyio async def test_simple_echo(): """Test the simple echo server""" from examples.fastmcp.simple_echo import mcp async with client_session(mcp._mcp_server) as client: result = await client.call_tool("echo", {"text": "hello"}) assert len(result.content) == 1 content = result.content[0] assert isinstance(content, TextContent) assert content.text == "hello" @pytest.mark.anyio async def test_complex_inputs(): """Test the complex inputs server""" from examples.fastmcp.complex_inputs import mcp async with client_session(mcp._mcp_server) as client: tank = {"shrimp": [{"name": "bob"}, {"name": "alice"}]} result = await client.call_tool( "name_shrimp", {"tank": tank, "extra_names": ["charlie"]} ) assert len(result.content) == 3 assert isinstance(result.content[0], TextContent) assert isinstance(result.content[1], TextContent) assert isinstance(result.content[2], TextContent) assert result.content[0].text == "bob" assert result.content[1].text == "alice" assert result.content[2].text == "charlie" @pytest.mark.anyio async def test_desktop(monkeypatch): """Test the desktop server""" from pathlib import Path from pydantic import AnyUrl from examples.fastmcp.desktop import mcp # Mock desktop directory listing mock_files = [Path("/fake/path/file1.txt"), Path("/fake/path/file2.txt")] monkeypatch.setattr(Path, "iterdir", lambda self: mock_files) monkeypatch.setattr(Path, "home", lambda: Path("/fake/home")) async with client_session(mcp._mcp_server) as client: # Test the add function result = await client.call_tool("add", {"a": 1, "b": 2}) assert len(result.content) == 1 content = result.content[0] assert isinstance(content, TextContent) assert content.text == "3" # Test the desktop resource result = await client.read_resource(AnyUrl("dir://desktop")) assert len(result.contents) == 1 content = result.contents[0] assert isinstance(content, TextResourceContents) assert isinstance(content.text, str) assert "/fake/path/file1.txt" in content.text assert "/fake/path/file2.txt" in content.text ================================================ File: tests/test_types.py ================================================ import pytest from mcp.types import ( LATEST_PROTOCOL_VERSION, ClientRequest, JSONRPCMessage, JSONRPCRequest, ) @pytest.mark.anyio async def test_jsonrpc_request(): json_data = { "jsonrpc": "2.0", "id": 1, "method": "initialize", "params": { "protocolVersion": LATEST_PROTOCOL_VERSION, "capabilities": {"batch": None, "sampling": None}, "clientInfo": {"name": "mcp", "version": "0.1.0"}, }, } request = JSONRPCMessage.model_validate(json_data) assert isinstance(request.root, JSONRPCRequest) ClientRequest.model_validate(request.model_dump(by_alias=True, exclude_none=True)) assert request.root.jsonrpc == "2.0" assert request.root.id == 1 assert request.root.method == "initialize" assert request.root.params is not None assert request.root.params["protocolVersion"] == LATEST_PROTOCOL_VERSION ================================================ File: tests/client/test_config.py ================================================ import json import subprocess from unittest.mock import patch import pytest from mcp.cli.claude import update_claude_config @pytest.fixture def temp_config_dir(tmp_path): """Create a temporary Claude config directory.""" config_dir = tmp_path / "Claude" config_dir.mkdir() return config_dir @pytest.fixture def mock_config_path(temp_config_dir): """Mock get_claude_config_path to return our temporary directory.""" with patch("mcp.cli.claude.get_claude_config_path", return_value=temp_config_dir): yield temp_config_dir def test_command_execution(mock_config_path): """Test that the generated command can actually be executed.""" # Setup server_name = "test_server" file_spec = "test_server.py:app" # Update config success = update_claude_config( file_spec=file_spec, server_name=server_name, ) assert success # Read the generated config config_file = mock_config_path / "claude_desktop_config.json" config = json.loads(config_file.read_text()) # Get the command and args server_config = config["mcpServers"][server_name] command = server_config["command"] args = server_config["args"] test_args = [command] + args + ["--help"] result = subprocess.run(test_args, capture_output=True, text=True, timeout=5) assert result.returncode == 0 assert "usage" in result.stdout.lower() ================================================ File: tests/client/test_session.py ================================================ import anyio import pytest from mcp.client.session import ClientSession from mcp.types import ( LATEST_PROTOCOL_VERSION, ClientNotification, ClientRequest, Implementation, InitializedNotification, InitializeRequest, InitializeResult, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, JSONRPCResponse, ServerCapabilities, ServerResult, ) @pytest.mark.anyio async def test_client_session_initialize(): client_to_server_send, client_to_server_receive = anyio.create_memory_object_stream[ JSONRPCMessage ](1) server_to_client_send, server_to_client_receive = anyio.create_memory_object_stream[ JSONRPCMessage ](1) initialized_notification = None async def mock_server(): nonlocal initialized_notification jsonrpc_request = await client_to_server_receive.receive() assert isinstance(jsonrpc_request.root, JSONRPCRequest) request = ClientRequest.model_validate( jsonrpc_request.model_dump(by_alias=True, mode="json", exclude_none=True) ) assert isinstance(request.root, InitializeRequest) result = ServerResult( InitializeResult( protocolVersion=LATEST_PROTOCOL_VERSION, capabilities=ServerCapabilities( logging=None, resources=None, tools=None, experimental=None, prompts=None, ), serverInfo=Implementation(name="mock-server", version="0.1.0"), instructions="The server instructions.", ) ) async with server_to_client_send: await server_to_client_send.send( JSONRPCMessage( JSONRPCResponse( jsonrpc="2.0", id=jsonrpc_request.root.id, result=result.model_dump( by_alias=True, mode="json", exclude_none=True ), ) ) ) jsonrpc_notification = await client_to_server_receive.receive() assert isinstance(jsonrpc_notification.root, JSONRPCNotification) initialized_notification = ClientNotification.model_validate( jsonrpc_notification.model_dump( by_alias=True, mode="json", exclude_none=True ) ) async def listen_session(): async for message in session.incoming_messages: if isinstance(message, Exception): raise message async with ( ClientSession(server_to_client_receive, client_to_server_send) as session, anyio.create_task_group() as tg, ): tg.start_soon(mock_server) tg.start_soon(listen_session) result = await session.initialize() # Assert the result assert isinstance(result, InitializeResult) assert result.protocolVersion == LATEST_PROTOCOL_VERSION assert isinstance(result.capabilities, ServerCapabilities) assert result.serverInfo == Implementation(name="mock-server", version="0.1.0") assert result.instructions == "The server instructions." # Check that the client sent the initialized notification assert initialized_notification assert isinstance(initialized_notification.root, InitializedNotification) ================================================ File: tests/client/test_stdio.py ================================================ import pytest from mcp.client.stdio import StdioServerParameters, stdio_client from mcp.types import JSONRPCMessage, JSONRPCRequest, JSONRPCResponse @pytest.mark.anyio async def test_stdio_client(): server_parameters = StdioServerParameters(command="/usr/bin/tee") async with stdio_client(server_parameters) as (read_stream, write_stream): # Test sending and receiving messages messages = [ JSONRPCMessage(root=JSONRPCRequest(jsonrpc="2.0", id=1, method="ping")), JSONRPCMessage(root=JSONRPCResponse(jsonrpc="2.0", id=2, result={})), ] async with write_stream: for message in messages: await write_stream.send(message) read_messages = [] async with read_stream: async for message in read_stream: if isinstance(message, Exception): raise message read_messages.append(message) if len(read_messages) == 2: break assert len(read_messages) == 2 assert read_messages[0] == JSONRPCMessage( root=JSONRPCRequest(jsonrpc="2.0", id=1, method="ping") ) assert read_messages[1] == JSONRPCMessage( root=JSONRPCResponse(jsonrpc="2.0", id=2, result={}) ) ================================================ File: tests/issues/test_100_tool_listing.py ================================================ import pytest from mcp.server.fastmcp import FastMCP pytestmark = pytest.mark.anyio async def test_list_tools_returns_all_tools(): mcp = FastMCP("TestTools") # Create 100 tools with unique names num_tools = 100 for i in range(num_tools): @mcp.tool(name=f"tool_{i}") def dummy_tool_func(): f"""Tool number {i}""" return i globals()[f"dummy_tool_{i}"] = ( dummy_tool_func # Keep reference to avoid garbage collection ) # Get all tools tools = await mcp.list_tools() # Verify we get all tools assert len(tools) == num_tools, f"Expected {num_tools} tools, but got {len(tools)}" # Verify each tool is unique and has the correct name tool_names = [tool.name for tool in tools] expected_names = [f"tool_{i}" for i in range(num_tools)] assert sorted(tool_names) == sorted( expected_names ), "Tool names don't match expected names" ================================================ File: tests/issues/test_129_resource_templates.py ================================================ import pytest from mcp import types from mcp.server.fastmcp import FastMCP @pytest.mark.anyio async def test_resource_templates(): # Create an MCP server mcp = FastMCP("Demo") # Add a dynamic greeting resource @mcp.resource("greeting://{name}") def get_greeting(name: str) -> str: """Get a personalized greeting""" return f"Hello, {name}!" @mcp.resource("users://{user_id}/profile") def get_user_profile(user_id: str) -> str: """Dynamic user data""" return f"Profile data for user {user_id}" # Get the list of resource templates using the underlying server # Note: list_resource_templates() returns a decorator that wraps the handler # The handler returns a ServerResult with a ListResourceTemplatesResult inside result = await mcp._mcp_server.request_handlers[types.ListResourceTemplatesRequest]( types.ListResourceTemplatesRequest( method="resources/templates/list", params=None, cursor=None ) ) assert isinstance(result.root, types.ListResourceTemplatesResult) templates = result.root.resourceTemplates # Verify we get both templates back assert len(templates) == 2 # Verify template details greeting_template = next(t for t in templates if t.name == "get_greeting") assert greeting_template.uriTemplate == "greeting://{name}" assert greeting_template.description == "Get a personalized greeting" profile_template = next(t for t in templates if t.name == "get_user_profile") assert profile_template.uriTemplate == "users://{user_id}/profile" assert profile_template.description == "Dynamic user data" ================================================ File: tests/issues/test_141_resource_templates.py ================================================ import pytest from pydantic import AnyUrl from mcp.server.fastmcp import FastMCP from mcp.shared.memory import ( create_connected_server_and_client_session as client_session, ) from mcp.types import ( ListResourceTemplatesResult, TextResourceContents, ) @pytest.mark.anyio async def test_resource_template_edge_cases(): """Test server-side resource template validation""" mcp = FastMCP("Demo") # Test case 1: Template with multiple parameters @mcp.resource("resource://users/{user_id}/posts/{post_id}") def get_user_post(user_id: str, post_id: str) -> str: return f"Post {post_id} by user {user_id}" # Test case 2: Template with optional parameter (should fail) with pytest.raises(ValueError, match="Mismatch between URI parameters"): @mcp.resource("resource://users/{user_id}/profile") def get_user_profile(user_id: str, optional_param: str | None = None) -> str: return f"Profile for user {user_id}" # Test case 3: Template with mismatched parameters with pytest.raises(ValueError, match="Mismatch between URI parameters"): @mcp.resource("resource://users/{user_id}/profile") def get_user_profile_mismatch(different_param: str) -> str: return f"Profile for user {different_param}" # Test case 4: Template with extra function parameters with pytest.raises(ValueError, match="Mismatch between URI parameters"): @mcp.resource("resource://users/{user_id}/profile") def get_user_profile_extra(user_id: str, extra_param: str) -> str: return f"Profile for user {user_id}" # Test case 5: Template with missing function parameters with pytest.raises(ValueError, match="Mismatch between URI parameters"): @mcp.resource("resource://users/{user_id}/profile/{section}") def get_user_profile_missing(user_id: str) -> str: return f"Profile for user {user_id}" # Verify valid template works result = await mcp.read_resource("resource://users/123/posts/456") assert result.content == "Post 456 by user 123" assert result.mime_type == "text/plain" # Verify invalid parameters raise error with pytest.raises(ValueError, match="Unknown resource"): await mcp.read_resource("resource://users/123/posts") # Missing post_id with pytest.raises(ValueError, match="Unknown resource"): await mcp.read_resource( "resource://users/123/posts/456/extra" ) # Extra path component @pytest.mark.anyio async def test_resource_template_client_interaction(): """Test client-side resource template interaction""" mcp = FastMCP("Demo") # Register some templated resources @mcp.resource("resource://users/{user_id}/posts/{post_id}") def get_user_post(user_id: str, post_id: str) -> str: return f"Post {post_id} by user {user_id}" @mcp.resource("resource://users/{user_id}/profile") def get_user_profile(user_id: str) -> str: return f"Profile for user {user_id}" async with client_session(mcp._mcp_server) as session: # Initialize the session await session.initialize() # List available resources resources = await session.list_resource_templates() assert isinstance(resources, ListResourceTemplatesResult) assert len(resources.resourceTemplates) == 2 # Verify resource templates are listed correctly templates = [r.uriTemplate for r in resources.resourceTemplates] assert "resource://users/{user_id}/posts/{post_id}" in templates assert "resource://users/{user_id}/profile" in templates # Read a resource with valid parameters result = await session.read_resource(AnyUrl("resource://users/123/posts/456")) contents = result.contents[0] assert isinstance(contents, TextResourceContents) assert contents.text == "Post 456 by user 123" assert contents.mimeType == "text/plain" # Read another resource with valid parameters result = await session.read_resource(AnyUrl("resource://users/789/profile")) contents = result.contents[0] assert isinstance(contents, TextResourceContents) assert contents.text == "Profile for user 789" assert contents.mimeType == "text/plain" # Verify invalid resource URIs raise appropriate errors with pytest.raises(Exception): # Specific exception type may vary await session.read_resource( AnyUrl("resource://users/123/posts") ) # Missing post_id with pytest.raises(Exception): # Specific exception type may vary await session.read_resource( AnyUrl("resource://users/123/invalid") ) # Invalid template ================================================ File: tests/issues/test_152_resource_mime_type.py ================================================ import base64 import pytest from pydantic import AnyUrl from mcp import types from mcp.server.fastmcp import FastMCP from mcp.server.lowlevel import Server from mcp.server.lowlevel.helper_types import ReadResourceContents from mcp.shared.memory import ( create_connected_server_and_client_session as client_session, ) pytestmark = pytest.mark.anyio async def test_fastmcp_resource_mime_type(): """Test that mime_type parameter is respected for resources.""" mcp = FastMCP("test") # Create a small test image as bytes image_bytes = b"fake_image_data" base64_string = base64.b64encode(image_bytes).decode("utf-8") @mcp.resource("test://image", mime_type="image/png") def get_image_as_string() -> str: """Return a test image as base64 string.""" return base64_string @mcp.resource("test://image_bytes", mime_type="image/png") def get_image_as_bytes() -> bytes: """Return a test image as bytes.""" return image_bytes # Test that resources are listed with correct mime type async with client_session(mcp._mcp_server) as client: # List resources and verify mime types resources = await client.list_resources() assert resources.resources is not None mapping = {str(r.uri): r for r in resources.resources} # Find our resources string_resource = mapping["test://image"] bytes_resource = mapping["test://image_bytes"] # Verify mime types assert ( string_resource.mimeType == "image/png" ), "String resource mime type not respected" assert ( bytes_resource.mimeType == "image/png" ), "Bytes resource mime type not respected" # Also verify the content can be read correctly string_result = await client.read_resource(AnyUrl("test://image")) assert len(string_result.contents) == 1 assert ( getattr(string_result.contents[0], "text") == base64_string ), "Base64 string mismatch" assert ( string_result.contents[0].mimeType == "image/png" ), "String content mime type not preserved" bytes_result = await client.read_resource(AnyUrl("test://image_bytes")) assert len(bytes_result.contents) == 1 assert ( base64.b64decode(getattr(bytes_result.contents[0], "blob")) == image_bytes ), "Bytes mismatch" assert ( bytes_result.contents[0].mimeType == "image/png" ), "Bytes content mime type not preserved" async def test_lowlevel_resource_mime_type(): """Test that mime_type parameter is respected for resources.""" server = Server("test") # Create a small test image as bytes image_bytes = b"fake_image_data" base64_string = base64.b64encode(image_bytes).decode("utf-8") # Create test resources with specific mime types test_resources = [ types.Resource( uri=AnyUrl("test://image"), name="test image", mimeType="image/png" ), types.Resource( uri=AnyUrl("test://image_bytes"), name="test image bytes", mimeType="image/png", ), ] @server.list_resources() async def handle_list_resources(): return test_resources @server.read_resource() async def handle_read_resource(uri: AnyUrl): if str(uri) == "test://image": return ReadResourceContents(content=base64_string, mime_type="image/png") elif str(uri) == "test://image_bytes": return ReadResourceContents( content=bytes(image_bytes), mime_type="image/png" ) raise Exception(f"Resource not found: {uri}") # Test that resources are listed with correct mime type async with client_session(server) as client: # List resources and verify mime types resources = await client.list_resources() assert resources.resources is not None mapping = {str(r.uri): r for r in resources.resources} # Find our resources string_resource = mapping["test://image"] bytes_resource = mapping["test://image_bytes"] # Verify mime types assert ( string_resource.mimeType == "image/png" ), "String resource mime type not respected" assert ( bytes_resource.mimeType == "image/png" ), "Bytes resource mime type not respected" # Also verify the content can be read correctly string_result = await client.read_resource(AnyUrl("test://image")) assert len(string_result.contents) == 1 assert ( getattr(string_result.contents[0], "text") == base64_string ), "Base64 string mismatch" assert ( string_result.contents[0].mimeType == "image/png" ), "String content mime type not preserved" bytes_result = await client.read_resource(AnyUrl("test://image_bytes")) assert len(bytes_result.contents) == 1 assert ( base64.b64decode(getattr(bytes_result.contents[0], "blob")) == image_bytes ), "Bytes mismatch" assert ( bytes_result.contents[0].mimeType == "image/png" ), "Bytes content mime type not preserved" ================================================ File: tests/issues/test_176_progress_token.py ================================================ from unittest.mock import AsyncMock, MagicMock import pytest from mcp.server.fastmcp import Context from mcp.shared.context import RequestContext pytestmark = pytest.mark.anyio async def test_progress_token_zero_first_call(): """Test that progress notifications work when progress_token is 0 on first call.""" # Create mock session with progress notification tracking mock_session = AsyncMock() mock_session.send_progress_notification = AsyncMock() # Create request context with progress token 0 mock_meta = MagicMock() mock_meta.progressToken = 0 # This is the key test case - token is 0 request_context = RequestContext( request_id="test-request", session=mock_session, meta=mock_meta, lifespan_context=None, ) # Create context with our mocks ctx = Context(request_context=request_context, fastmcp=MagicMock()) # Test progress reporting await ctx.report_progress(0, 10) # First call with 0 await ctx.report_progress(5, 10) # Middle progress await ctx.report_progress(10, 10) # Complete # Verify progress notifications assert ( mock_session.send_progress_notification.call_count == 3 ), "All progress notifications should be sent" mock_session.send_progress_notification.assert_any_call( progress_token=0, progress=0.0, total=10.0 ) mock_session.send_progress_notification.assert_any_call( progress_token=0, progress=5.0, total=10.0 ) mock_session.send_progress_notification.assert_any_call( progress_token=0, progress=10.0, total=10.0 ) ================================================ File: tests/issues/test_188_concurrency.py ================================================ import anyio from pydantic import AnyUrl from mcp.server.fastmcp import FastMCP from mcp.shared.memory import ( create_connected_server_and_client_session as create_session, ) _sleep_time_seconds = 0.01 _resource_name = "slow://slow_resource" async def test_messages_are_executed_concurrently(): server = FastMCP("test") @server.tool("sleep") async def sleep_tool(): await anyio.sleep(_sleep_time_seconds) return "done" @server.resource(_resource_name) async def slow_resource(): await anyio.sleep(_sleep_time_seconds) return "slow" async with create_session(server._mcp_server) as client_session: start_time = anyio.current_time() async with anyio.create_task_group() as tg: for _ in range(10): tg.start_soon(client_session.call_tool, "sleep") tg.start_soon(client_session.read_resource, AnyUrl(_resource_name)) end_time = anyio.current_time() duration = end_time - start_time assert duration < 3 * _sleep_time_seconds print(duration) def main(): anyio.run(test_messages_are_executed_concurrently) if __name__ == "__main__": import logging logging.basicConfig(level=logging.DEBUG) main() ================================================ File: tests/issues/test_192_request_id.py ================================================ import anyio import pytest from mcp.server.lowlevel import NotificationOptions, Server from mcp.server.models import InitializationOptions from mcp.types import ( LATEST_PROTOCOL_VERSION, ClientCapabilities, Implementation, InitializeRequestParams, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, NotificationParams, ) @pytest.mark.anyio async def test_request_id_match() -> None: """Test that the server preserves request IDs in responses.""" server = Server("test") custom_request_id = "test-123" # Create memory streams for communication client_writer, client_reader = anyio.create_memory_object_stream(1) server_writer, server_reader = anyio.create_memory_object_stream(1) # Server task to process the request async def run_server(): async with client_reader, server_writer: await server.run( client_reader, server_writer, InitializationOptions( server_name="test", server_version="1.0.0", capabilities=server.get_capabilities( notification_options=NotificationOptions(), experimental_capabilities={}, ), ), raise_exceptions=True, ) # Start server task async with anyio.create_task_group() as tg: tg.start_soon(run_server) # Send initialize request init_req = JSONRPCRequest( id="init-1", method="initialize", params=InitializeRequestParams( protocolVersion=LATEST_PROTOCOL_VERSION, capabilities=ClientCapabilities(), clientInfo=Implementation(name="test-client", version="1.0.0"), ).model_dump(by_alias=True, exclude_none=True), jsonrpc="2.0", ) await client_writer.send(JSONRPCMessage(root=init_req)) await server_reader.receive() # Get init response but don't need to check it # Send initialized notification initialized_notification = JSONRPCNotification( method="notifications/initialized", params=NotificationParams().model_dump(by_alias=True, exclude_none=True), jsonrpc="2.0", ) await client_writer.send(JSONRPCMessage(root=initialized_notification)) # Send ping request with custom ID ping_request = JSONRPCRequest( id=custom_request_id, method="ping", params={}, jsonrpc="2.0" ) await client_writer.send(JSONRPCMessage(root=ping_request)) # Read response response = await server_reader.receive() # Verify response ID matches request ID assert ( response.root.id == custom_request_id ), "Response ID should match request ID" # Cancel server task tg.cancel_scope.cancel() ================================================ File: tests/issues/test_88_random_error.py ================================================ """Test to reproduce issue #88: Random error thrown on response.""" from datetime import timedelta from pathlib import Path from typing import Sequence import anyio import pytest from mcp.client.session import ClientSession from mcp.server.lowlevel import Server from mcp.shared.exceptions import McpError from mcp.types import ( EmbeddedResource, ImageContent, TextContent, ) @pytest.mark.anyio async def test_notification_validation_error(tmp_path: Path): """Test that timeouts are handled gracefully and don't break the server. This test verifies that when a client request times out: 1. The server task stays alive 2. The server can still handle new requests 3. The client can make new requests 4. No resources are leaked """ server = Server(name="test") request_count = 0 slow_request_started = anyio.Event() slow_request_complete = anyio.Event() @server.call_tool() async def slow_tool( name: str, arg ) -> Sequence[TextContent | ImageContent | EmbeddedResource]: nonlocal request_count request_count += 1 if name == "slow": # Signal that slow request has started slow_request_started.set() # Long enough to ensure timeout await anyio.sleep(0.2) # Signal completion slow_request_complete.set() return [TextContent(type="text", text=f"slow {request_count}")] elif name == "fast": # Fast enough to complete before timeout await anyio.sleep(0.01) return [TextContent(type="text", text=f"fast {request_count}")] return [TextContent(type="text", text=f"unknown {request_count}")] async def server_handler(read_stream, write_stream): await server.run( read_stream, write_stream, server.create_initialization_options(), raise_exceptions=True, ) async def client(read_stream, write_stream): # Use a timeout that's: # - Long enough for fast operations (>10ms) # - Short enough for slow operations (<200ms) # - Not too short to avoid flakiness async with ClientSession( read_stream, write_stream, read_timeout_seconds=timedelta(milliseconds=50) ) as session: await session.initialize() # First call should work (fast operation) result = await session.call_tool("fast") assert result.content == [TextContent(type="text", text="fast 1")] assert not slow_request_complete.is_set() # Second call should timeout (slow operation) with pytest.raises(McpError) as exc_info: await session.call_tool("slow") assert "Timed out while waiting" in str(exc_info.value) # Wait for slow request to complete in the background with anyio.fail_after(1): # Timeout after 1 second await slow_request_complete.wait() # Third call should work (fast operation), # proving server is still responsive result = await session.call_tool("fast") assert result.content == [TextContent(type="text", text="fast 3")] # Run server and client in separate task groups to avoid cancellation server_writer, server_reader = anyio.create_memory_object_stream(1) client_writer, client_reader = anyio.create_memory_object_stream(1) server_ready = anyio.Event() async def wrapped_server_handler(read_stream, write_stream): server_ready.set() await server_handler(read_stream, write_stream) async with anyio.create_task_group() as tg: tg.start_soon(wrapped_server_handler, server_reader, client_writer) # Wait for server to start and initialize with anyio.fail_after(1): # Timeout after 1 second await server_ready.wait() # Run client in a separate task to avoid cancellation async with anyio.create_task_group() as client_tg: client_tg.start_soon(client, client_reader, server_writer) ================================================ File: tests/server/test_lifespan.py ================================================ """Tests for lifespan functionality in both low-level and FastMCP servers.""" from contextlib import asynccontextmanager from typing import AsyncIterator import anyio import pytest from pydantic import TypeAdapter from mcp.server.fastmcp import Context, FastMCP from mcp.server.lowlevel.server import NotificationOptions, Server from mcp.server.models import InitializationOptions from mcp.types import ( ClientCapabilities, Implementation, InitializeRequestParams, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, ) @pytest.mark.anyio async def test_lowlevel_server_lifespan(): """Test that lifespan works in low-level server.""" @asynccontextmanager async def test_lifespan(server: Server) -> AsyncIterator[dict]: """Test lifespan context that tracks startup/shutdown.""" context = {"started": False, "shutdown": False} try: context["started"] = True yield context finally: context["shutdown"] = True server = Server("test", lifespan=test_lifespan) # Create memory streams for testing send_stream1, receive_stream1 = anyio.create_memory_object_stream(100) send_stream2, receive_stream2 = anyio.create_memory_object_stream(100) # Create a tool that accesses lifespan context @server.call_tool() async def check_lifespan(name: str, arguments: dict) -> list: ctx = server.request_context assert isinstance(ctx.lifespan_context, dict) assert ctx.lifespan_context["started"] assert not ctx.lifespan_context["shutdown"] return [{"type": "text", "text": "true"}] # Run server in background task async with anyio.create_task_group() as tg: async def run_server(): await server.run( receive_stream1, send_stream2, InitializationOptions( server_name="test", server_version="0.1.0", capabilities=server.get_capabilities( notification_options=NotificationOptions(), experimental_capabilities={}, ), ), raise_exceptions=True, ) tg.start_soon(run_server) # Initialize the server params = InitializeRequestParams( protocolVersion="2024-11-05", capabilities=ClientCapabilities(), clientInfo=Implementation(name="test-client", version="0.1.0"), ) await send_stream1.send( JSONRPCMessage( root=JSONRPCRequest( jsonrpc="2.0", id=1, method="initialize", params=TypeAdapter(InitializeRequestParams).dump_python(params), ) ) ) response = await receive_stream2.receive() # Send initialized notification await send_stream1.send( JSONRPCMessage( root=JSONRPCNotification( jsonrpc="2.0", method="notifications/initialized", ) ) ) # Call the tool to verify lifespan context await send_stream1.send( JSONRPCMessage( root=JSONRPCRequest( jsonrpc="2.0", id=2, method="tools/call", params={"name": "check_lifespan", "arguments": {}}, ) ) ) # Get response and verify response = await receive_stream2.receive() assert response.root.result["content"][0]["text"] == "true" # Cancel server task tg.cancel_scope.cancel() @pytest.mark.anyio async def test_fastmcp_server_lifespan(): """Test that lifespan works in FastMCP server.""" @asynccontextmanager async def test_lifespan(server: FastMCP) -> AsyncIterator[dict]: """Test lifespan context that tracks startup/shutdown.""" context = {"started": False, "shutdown": False} try: context["started"] = True yield context finally: context["shutdown"] = True server = FastMCP("test", lifespan=test_lifespan) # Create memory streams for testing send_stream1, receive_stream1 = anyio.create_memory_object_stream(100) send_stream2, receive_stream2 = anyio.create_memory_object_stream(100) # Add a tool that checks lifespan context @server.tool() def check_lifespan(ctx: Context) -> bool: """Tool that checks lifespan context.""" assert isinstance(ctx.request_context.lifespan_context, dict) assert ctx.request_context.lifespan_context["started"] assert not ctx.request_context.lifespan_context["shutdown"] return True # Run server in background task async with anyio.create_task_group() as tg: async def run_server(): await server._mcp_server.run( receive_stream1, send_stream2, server._mcp_server.create_initialization_options(), raise_exceptions=True, ) tg.start_soon(run_server) # Initialize the server params = InitializeRequestParams( protocolVersion="2024-11-05", capabilities=ClientCapabilities(), clientInfo=Implementation(name="test-client", version="0.1.0"), ) await send_stream1.send( JSONRPCMessage( root=JSONRPCRequest( jsonrpc="2.0", id=1, method="initialize", params=TypeAdapter(InitializeRequestParams).dump_python(params), ) ) ) response = await receive_stream2.receive() # Send initialized notification await send_stream1.send( JSONRPCMessage( root=JSONRPCNotification( jsonrpc="2.0", method="notifications/initialized", ) ) ) # Call the tool to verify lifespan context await send_stream1.send( JSONRPCMessage( root=JSONRPCRequest( jsonrpc="2.0", id=2, method="tools/call", params={"name": "check_lifespan", "arguments": {}}, ) ) ) # Get response and verify response = await receive_stream2.receive() assert response.root.result["content"][0]["text"] == "true" # Cancel server task tg.cancel_scope.cancel() ================================================ File: tests/server/test_read_resource.py ================================================ from pathlib import Path from tempfile import NamedTemporaryFile import pytest from pydantic import AnyUrl, FileUrl import mcp.types as types from mcp.server.lowlevel.server import ReadResourceContents, Server @pytest.fixture def temp_file(): """Create a temporary file for testing.""" with NamedTemporaryFile(mode="w", delete=False) as f: f.write("test content") path = Path(f.name).resolve() yield path try: path.unlink() except FileNotFoundError: pass @pytest.mark.anyio async def test_read_resource_text(temp_file: Path): server = Server("test") @server.read_resource() async def read_resource(uri: AnyUrl) -> ReadResourceContents: return ReadResourceContents(content="Hello World", mime_type="text/plain") # Get the handler directly from the server handler = server.request_handlers[types.ReadResourceRequest] # Create a request request = types.ReadResourceRequest( method="resources/read", params=types.ReadResourceRequestParams(uri=FileUrl(temp_file.as_uri())), ) # Call the handler result = await handler(request) assert isinstance(result.root, types.ReadResourceResult) assert len(result.root.contents) == 1 content = result.root.contents[0] assert isinstance(content, types.TextResourceContents) assert content.text == "Hello World" assert content.mimeType == "text/plain" @pytest.mark.anyio async def test_read_resource_binary(temp_file: Path): server = Server("test") @server.read_resource() async def read_resource(uri: AnyUrl) -> ReadResourceContents: return ReadResourceContents( content=b"Hello World", mime_type="application/octet-stream" ) # Get the handler directly from the server handler = server.request_handlers[types.ReadResourceRequest] # Create a request request = types.ReadResourceRequest( method="resources/read", params=types.ReadResourceRequestParams(uri=FileUrl(temp_file.as_uri())), ) # Call the handler result = await handler(request) assert isinstance(result.root, types.ReadResourceResult) assert len(result.root.contents) == 1 content = result.root.contents[0] assert isinstance(content, types.BlobResourceContents) assert content.mimeType == "application/octet-stream" @pytest.mark.anyio async def test_read_resource_default_mime(temp_file: Path): server = Server("test") @server.read_resource() async def read_resource(uri: AnyUrl) -> ReadResourceContents: return ReadResourceContents( content="Hello World", # No mime_type specified, should default to text/plain ) # Get the handler directly from the server handler = server.request_handlers[types.ReadResourceRequest] # Create a request request = types.ReadResourceRequest( method="resources/read", params=types.ReadResourceRequestParams(uri=FileUrl(temp_file.as_uri())), ) # Call the handler result = await handler(request) assert isinstance(result.root, types.ReadResourceResult) assert len(result.root.contents) == 1 content = result.root.contents[0] assert isinstance(content, types.TextResourceContents) assert content.text == "Hello World" assert content.mimeType == "text/plain" ================================================ File: tests/server/test_session.py ================================================ import anyio import pytest from mcp.client.session import ClientSession from mcp.server import Server from mcp.server.lowlevel import NotificationOptions from mcp.server.models import InitializationOptions from mcp.server.session import ServerSession from mcp.types import ( ClientNotification, InitializedNotification, JSONRPCMessage, PromptsCapability, ResourcesCapability, ServerCapabilities, ) @pytest.mark.anyio async def test_server_session_initialize(): server_to_client_send, server_to_client_receive = anyio.create_memory_object_stream[ JSONRPCMessage ](1) client_to_server_send, client_to_server_receive = anyio.create_memory_object_stream[ JSONRPCMessage ](1) async def run_client(client: ClientSession): async for message in client_session.incoming_messages: if isinstance(message, Exception): raise message received_initialized = False async def run_server(): nonlocal received_initialized async with ServerSession( client_to_server_receive, server_to_client_send, InitializationOptions( server_name="mcp", server_version="0.1.0", capabilities=ServerCapabilities(), ), ) as server_session: async for message in server_session.incoming_messages: if isinstance(message, Exception): raise message if isinstance(message, ClientNotification) and isinstance( message.root, InitializedNotification ): received_initialized = True return try: async with ( ClientSession( server_to_client_receive, client_to_server_send ) as client_session, anyio.create_task_group() as tg, ): tg.start_soon(run_client, client_session) tg.start_soon(run_server) await client_session.initialize() except anyio.ClosedResourceError: pass assert received_initialized @pytest.mark.anyio async def test_server_capabilities(): server = Server("test") notification_options = NotificationOptions() experimental_capabilities = {} # Initially no capabilities caps = server.get_capabilities(notification_options, experimental_capabilities) assert caps.prompts is None assert caps.resources is None # Add a prompts handler @server.list_prompts() async def list_prompts(): return [] caps = server.get_capabilities(notification_options, experimental_capabilities) assert caps.prompts == PromptsCapability(listChanged=False) assert caps.resources is None # Add a resources handler @server.list_resources() async def list_resources(): return [] caps = server.get_capabilities(notification_options, experimental_capabilities) assert caps.prompts == PromptsCapability(listChanged=False) assert caps.resources == ResourcesCapability(subscribe=False, listChanged=False) ================================================ File: tests/server/test_stdio.py ================================================ import io import anyio import pytest from mcp.server.stdio import stdio_server from mcp.types import JSONRPCMessage, JSONRPCRequest, JSONRPCResponse @pytest.mark.anyio async def test_stdio_server(): stdin = io.StringIO() stdout = io.StringIO() messages = [ JSONRPCMessage(root=JSONRPCRequest(jsonrpc="2.0", id=1, method="ping")), JSONRPCMessage(root=JSONRPCResponse(jsonrpc="2.0", id=2, result={})), ] for message in messages: stdin.write(message.model_dump_json(by_alias=True, exclude_none=True) + "\n") stdin.seek(0) async with stdio_server( stdin=anyio.AsyncFile(stdin), stdout=anyio.AsyncFile(stdout) ) as (read_stream, write_stream): received_messages = [] async with read_stream: async for message in read_stream: if isinstance(message, Exception): raise message received_messages.append(message) if len(received_messages) == 2: break # Verify received messages assert len(received_messages) == 2 assert received_messages[0] == JSONRPCMessage( root=JSONRPCRequest(jsonrpc="2.0", id=1, method="ping") ) assert received_messages[1] == JSONRPCMessage( root=JSONRPCResponse(jsonrpc="2.0", id=2, result={}) ) # Test sending responses from the server responses = [ JSONRPCMessage(root=JSONRPCRequest(jsonrpc="2.0", id=3, method="ping")), JSONRPCMessage(root=JSONRPCResponse(jsonrpc="2.0", id=4, result={})), ] async with write_stream: for response in responses: await write_stream.send(response) stdout.seek(0) output_lines = stdout.readlines() assert len(output_lines) == 2 received_responses = [ JSONRPCMessage.model_validate_json(line.strip()) for line in output_lines ] assert len(received_responses) == 2 assert received_responses[0] == JSONRPCMessage( root=JSONRPCRequest(jsonrpc="2.0", id=3, method="ping") ) assert received_responses[1] == JSONRPCMessage( root=JSONRPCResponse(jsonrpc="2.0", id=4, result={}) ) ================================================ File: tests/server/fastmcp/test_func_metadata.py ================================================ from typing import Annotated import annotated_types import pytest from pydantic import BaseModel, Field from mcp.server.fastmcp.utilities.func_metadata import func_metadata class SomeInputModelA(BaseModel): pass class SomeInputModelB(BaseModel): class InnerModel(BaseModel): x: int how_many_shrimp: Annotated[int, Field(description="How many shrimp in the tank???")] ok: InnerModel y: None def complex_arguments_fn( an_int: int, must_be_none: None, must_be_none_dumb_annotation: Annotated[None, "blah"], list_of_ints: list[int], # list[str] | str is an interesting case because if it comes in as JSON like # "[\"a\", \"b\"]" then it will be naively parsed as a string. list_str_or_str: list[str] | str, an_int_annotated_with_field: Annotated[ int, Field(description="An int with a field") ], an_int_annotated_with_field_and_others: Annotated[ int, str, # Should be ignored, really Field(description="An int with a field"), annotated_types.Gt(1), ], an_int_annotated_with_junk: Annotated[ int, "123", 456, ], field_with_default_via_field_annotation_before_nondefault_arg: Annotated[ int, Field(1) ], unannotated, my_model_a: SomeInputModelA, my_model_a_forward_ref: "SomeInputModelA", my_model_b: SomeInputModelB, an_int_annotated_with_field_default: Annotated[ int, Field(1, description="An int with a field"), ], unannotated_with_default=5, my_model_a_with_default: SomeInputModelA = SomeInputModelA(), # noqa: B008 an_int_with_default: int = 1, must_be_none_with_default: None = None, an_int_with_equals_field: int = Field(1, ge=0), int_annotated_with_default: Annotated[int, Field(description="hey")] = 5, ) -> str: _ = ( an_int, must_be_none, must_be_none_dumb_annotation, list_of_ints, list_str_or_str, an_int_annotated_with_field, an_int_annotated_with_field_and_others, an_int_annotated_with_junk, field_with_default_via_field_annotation_before_nondefault_arg, unannotated, an_int_annotated_with_field_default, unannotated_with_default, my_model_a, my_model_a_forward_ref, my_model_b, my_model_a_with_default, an_int_with_default, must_be_none_with_default, an_int_with_equals_field, int_annotated_with_default, ) return "ok!" @pytest.mark.anyio async def test_complex_function_runtime_arg_validation_non_json(): """Test that basic non-JSON arguments are validated correctly""" meta = func_metadata(complex_arguments_fn) # Test with minimum required arguments result = await meta.call_fn_with_arg_validation( complex_arguments_fn, fn_is_async=False, arguments_to_validate={ "an_int": 1, "must_be_none": None, "must_be_none_dumb_annotation": None, "list_of_ints": [1, 2, 3], "list_str_or_str": "hello", "an_int_annotated_with_field": 42, "an_int_annotated_with_field_and_others": 5, "an_int_annotated_with_junk": 100, "unannotated": "test", "my_model_a": {}, "my_model_a_forward_ref": {}, "my_model_b": {"how_many_shrimp": 5, "ok": {"x": 1}, "y": None}, }, arguments_to_pass_directly=None, ) assert result == "ok!" # Test with invalid types with pytest.raises(ValueError): await meta.call_fn_with_arg_validation( complex_arguments_fn, fn_is_async=False, arguments_to_validate={"an_int": "not an int"}, arguments_to_pass_directly=None, ) @pytest.mark.anyio async def test_complex_function_runtime_arg_validation_with_json(): """Test that JSON string arguments are parsed and validated correctly""" meta = func_metadata(complex_arguments_fn) result = await meta.call_fn_with_arg_validation( complex_arguments_fn, fn_is_async=False, arguments_to_validate={ "an_int": 1, "must_be_none": None, "must_be_none_dumb_annotation": None, "list_of_ints": "[1, 2, 3]", # JSON string "list_str_or_str": '["a", "b", "c"]', # JSON string "an_int_annotated_with_field": 42, "an_int_annotated_with_field_and_others": "5", # JSON string "an_int_annotated_with_junk": 100, "unannotated": "test", "my_model_a": "{}", # JSON string "my_model_a_forward_ref": "{}", # JSON string "my_model_b": '{"how_many_shrimp": 5, "ok": {"x": 1}, "y": null}', }, arguments_to_pass_directly=None, ) assert result == "ok!" def test_str_vs_list_str(): """Test handling of string vs list[str] type annotations. This is tricky as '"hello"' can be parsed as a JSON string or a Python string. We want to make sure it's kept as a python string. """ def func_with_str_types(str_or_list: str | list[str]): return str_or_list meta = func_metadata(func_with_str_types) # Test string input for union type result = meta.pre_parse_json({"str_or_list": "hello"}) assert result["str_or_list"] == "hello" # Test string input that contains valid JSON for union type # We want to see here that the JSON-vali string is NOT parsed as JSON, but rather # kept as a raw string result = meta.pre_parse_json({"str_or_list": '"hello"'}) assert result["str_or_list"] == '"hello"' # Test list input for union type result = meta.pre_parse_json({"str_or_list": '["hello", "world"]'}) assert result["str_or_list"] == ["hello", "world"] def test_skip_names(): """Test that skipped parameters are not included in the model""" def func_with_many_params( keep_this: int, skip_this: str, also_keep: float, also_skip: bool ): return keep_this, skip_this, also_keep, also_skip # Skip some parameters meta = func_metadata(func_with_many_params, skip_names=["skip_this", "also_skip"]) # Check model fields assert "keep_this" in meta.arg_model.model_fields assert "also_keep" in meta.arg_model.model_fields assert "skip_this" not in meta.arg_model.model_fields assert "also_skip" not in meta.arg_model.model_fields # Validate that we can call with only non-skipped parameters model: BaseModel = meta.arg_model.model_validate({"keep_this": 1, "also_keep": 2.5}) # type: ignore assert model.keep_this == 1 # type: ignore assert model.also_keep == 2.5 # type: ignore @pytest.mark.anyio async def test_lambda_function(): """Test lambda function schema and validation""" fn = lambda x, y=5: x # noqa: E731 meta = func_metadata(lambda x, y=5: x) # Test schema assert meta.arg_model.model_json_schema() == { "properties": { "x": {"title": "x", "type": "string"}, "y": {"default": 5, "title": "y", "type": "string"}, }, "required": ["x"], "title": "<lambda>Arguments", "type": "object", } async def check_call(args): return await meta.call_fn_with_arg_validation( fn, fn_is_async=False, arguments_to_validate=args, arguments_to_pass_directly=None, ) # Basic calls assert await check_call({"x": "hello"}) == "hello" assert await check_call({"x": "hello", "y": "world"}) == "hello" assert await check_call({"x": '"hello"'}) == '"hello"' # Missing required arg with pytest.raises(ValueError): await check_call({"y": "world"}) def test_complex_function_json_schema(): """Test JSON schema generation for complex function arguments. Note: Different versions of pydantic output slightly different JSON Schema formats for model fields with defaults. The format changed in 2.9.0: 1. Before 2.9.0: { "allOf": [{"$ref": "#/$defs/Model"}], "default": {} } 2. Since 2.9.0: { "$ref": "#/$defs/Model", "default": {} } Both formats are valid and functionally equivalent. This test accepts either format to ensure compatibility across our supported pydantic versions. This change in format does not affect runtime behavior since: 1. Both schemas validate the same way 2. The actual model classes and validation logic are unchanged 3. func_metadata uses model_validate/model_dump, not the schema directly """ meta = func_metadata(complex_arguments_fn) actual_schema = meta.arg_model.model_json_schema() # Create a copy of the actual schema to normalize normalized_schema = actual_schema.copy() # Normalize the my_model_a_with_default field to handle both pydantic formats if "allOf" in actual_schema["properties"]["my_model_a_with_default"]: normalized_schema["properties"]["my_model_a_with_default"] = { "$ref": "#/$defs/SomeInputModelA", "default": {}, } assert normalized_schema == { "$defs": { "InnerModel": { "properties": {"x": {"title": "X", "type": "integer"}}, "required": ["x"], "title": "InnerModel", "type": "object", }, "SomeInputModelA": { "properties": {}, "title": "SomeInputModelA", "type": "object", }, "SomeInputModelB": { "properties": { "how_many_shrimp": { "description": "How many shrimp in the tank???", "title": "How Many Shrimp", "type": "integer", }, "ok": {"$ref": "#/$defs/InnerModel"}, "y": {"title": "Y", "type": "null"}, }, "required": ["how_many_shrimp", "ok", "y"], "title": "SomeInputModelB", "type": "object", }, }, "properties": { "an_int": {"title": "An Int", "type": "integer"}, "must_be_none": {"title": "Must Be None", "type": "null"}, "must_be_none_dumb_annotation": { "title": "Must Be None Dumb Annotation", "type": "null", }, "list_of_ints": { "items": {"type": "integer"}, "title": "List Of Ints", "type": "array", }, "list_str_or_str": { "anyOf": [ {"items": {"type": "string"}, "type": "array"}, {"type": "string"}, ], "title": "List Str Or Str", }, "an_int_annotated_with_field": { "description": "An int with a field", "title": "An Int Annotated With Field", "type": "integer", }, "an_int_annotated_with_field_and_others": { "description": "An int with a field", "exclusiveMinimum": 1, "title": "An Int Annotated With Field And Others", "type": "integer", }, "an_int_annotated_with_junk": { "title": "An Int Annotated With Junk", "type": "integer", }, "field_with_default_via_field_annotation_before_nondefault_arg": { "default": 1, "title": "Field With Default Via Field Annotation Before Nondefault Arg", "type": "integer", }, "unannotated": {"title": "unannotated", "type": "string"}, "my_model_a": {"$ref": "#/$defs/SomeInputModelA"}, "my_model_a_forward_ref": {"$ref": "#/$defs/SomeInputModelA"}, "my_model_b": {"$ref": "#/$defs/SomeInputModelB"}, "an_int_annotated_with_field_default": { "default": 1, "description": "An int with a field", "title": "An Int Annotated With Field Default", "type": "integer", }, "unannotated_with_default": { "default": 5, "title": "unannotated_with_default", "type": "string", }, "my_model_a_with_default": { "$ref": "#/$defs/SomeInputModelA", "default": {}, }, "an_int_with_default": { "default": 1, "title": "An Int With Default", "type": "integer", }, "must_be_none_with_default": { "default": None, "title": "Must Be None With Default", "type": "null", }, "an_int_with_equals_field": { "default": 1, "minimum": 0, "title": "An Int With Equals Field", "type": "integer", }, "int_annotated_with_default": { "default": 5, "description": "hey", "title": "Int Annotated With Default", "type": "integer", }, }, "required": [ "an_int", "must_be_none", "must_be_none_dumb_annotation", "list_of_ints", "list_str_or_str", "an_int_annotated_with_field", "an_int_annotated_with_field_and_others", "an_int_annotated_with_junk", "unannotated", "my_model_a", "my_model_a_forward_ref", "my_model_b", ], "title": "complex_arguments_fnArguments", "type": "object", } ================================================ File: tests/server/fastmcp/test_parameter_descriptions.py ================================================ """Test that parameter descriptions are properly exposed through list_tools""" import pytest from pydantic import Field from mcp.server.fastmcp import FastMCP @pytest.mark.anyio async def test_parameter_descriptions(): mcp = FastMCP("Test Server") @mcp.tool() def greet( name: str = Field(description="The name to greet"), title: str = Field(description="Optional title", default=""), ) -> str: """A greeting tool""" return f"Hello {title} {name}" tools = await mcp.list_tools() assert len(tools) == 1 tool = tools[0] # Check that parameter descriptions are present in the schema properties = tool.inputSchema["properties"] assert "name" in properties assert properties["name"]["description"] == "The name to greet" assert "title" in properties assert properties["title"]["description"] == "Optional title" ================================================ File: tests/server/fastmcp/test_server.py ================================================ import base64 from pathlib import Path from typing import TYPE_CHECKING, Union import pytest from pydantic import AnyUrl from mcp.server.fastmcp import Context, FastMCP from mcp.server.fastmcp.prompts.base import EmbeddedResource, Message, UserMessage from mcp.server.fastmcp.resources import FileResource, FunctionResource from mcp.server.fastmcp.utilities.types import Image from mcp.shared.exceptions import McpError from mcp.shared.memory import ( create_connected_server_and_client_session as client_session, ) from mcp.types import ( BlobResourceContents, ImageContent, TextContent, TextResourceContents, ) if TYPE_CHECKING: from mcp.server.fastmcp import Context class TestServer: @pytest.mark.anyio async def test_create_server(self): mcp = FastMCP(instructions="Server instructions") assert mcp.name == "FastMCP" assert mcp.instructions == "Server instructions" @pytest.mark.anyio async def test_non_ascii_description(self): """Test that FastMCP handles non-ASCII characters in descriptions correctly""" mcp = FastMCP() @mcp.tool( description=( "🌟 This tool uses emojis and UTF-8 characters: á é í ó ú ñ 漢字 🎉" ) ) def hello_world(name: str = "世界") -> str: return f"¡Hola, {name}! 👋" async with client_session(mcp._mcp_server) as client: tools = await client.list_tools() assert len(tools.tools) == 1 tool = tools.tools[0] assert tool.description is not None assert "🌟" in tool.description assert "漢字" in tool.description assert "🎉" in tool.description result = await client.call_tool("hello_world", {}) assert len(result.content) == 1 content = result.content[0] assert isinstance(content, TextContent) assert "¡Hola, 世界! 👋" == content.text @pytest.mark.anyio async def test_add_tool_decorator(self): mcp = FastMCP() @mcp.tool() def add(x: int, y: int) -> int: return x + y assert len(mcp._tool_manager.list_tools()) == 1 @pytest.mark.anyio async def test_add_tool_decorator_incorrect_usage(self): mcp = FastMCP() with pytest.raises(TypeError, match="The @tool decorator was used incorrectly"): @mcp.tool # Missing parentheses #type: ignore def add(x: int, y: int) -> int: return x + y @pytest.mark.anyio async def test_add_resource_decorator(self): mcp = FastMCP() @mcp.resource("r://{x}") def get_data(x: str) -> str: return f"Data: {x}" assert len(mcp._resource_manager._templates) == 1 @pytest.mark.anyio async def test_add_resource_decorator_incorrect_usage(self): mcp = FastMCP() with pytest.raises( TypeError, match="The @resource decorator was used incorrectly" ): @mcp.resource # Missing parentheses #type: ignore def get_data(x: str) -> str: return f"Data: {x}" def tool_fn(x: int, y: int) -> int: return x + y def error_tool_fn() -> None: raise ValueError("Test error") def image_tool_fn(path: str) -> Image: return Image(path) def mixed_content_tool_fn() -> list[Union[TextContent, ImageContent]]: return [ TextContent(type="text", text="Hello"), ImageContent(type="image", data="abc", mimeType="image/png"), ] class TestServerTools: @pytest.mark.anyio async def test_add_tool(self): mcp = FastMCP() mcp.add_tool(tool_fn) mcp.add_tool(tool_fn) assert len(mcp._tool_manager.list_tools()) == 1 @pytest.mark.anyio async def test_list_tools(self): mcp = FastMCP() mcp.add_tool(tool_fn) async with client_session(mcp._mcp_server) as client: tools = await client.list_tools() assert len(tools.tools) == 1 @pytest.mark.anyio async def test_call_tool(self): mcp = FastMCP() mcp.add_tool(tool_fn) async with client_session(mcp._mcp_server) as client: result = await client.call_tool("my_tool", {"arg1": "value"}) assert not hasattr(result, "error") assert len(result.content) > 0 @pytest.mark.anyio async def test_tool_exception_handling(self): mcp = FastMCP() mcp.add_tool(error_tool_fn) async with client_session(mcp._mcp_server) as client: result = await client.call_tool("error_tool_fn", {}) assert len(result.content) == 1 content = result.content[0] assert isinstance(content, TextContent) assert "Test error" in content.text assert result.isError is True @pytest.mark.anyio async def test_tool_error_handling(self): mcp = FastMCP() mcp.add_tool(error_tool_fn) async with client_session(mcp._mcp_server) as client: result = await client.call_tool("error_tool_fn", {}) assert len(result.content) == 1 content = result.content[0] assert isinstance(content, TextContent) assert "Test error" in content.text assert result.isError is True @pytest.mark.anyio async def test_tool_error_details(self): """Test that exception details are properly formatted in the response""" mcp = FastMCP() mcp.add_tool(error_tool_fn) async with client_session(mcp._mcp_server) as client: result = await client.call_tool("error_tool_fn", {}) content = result.content[0] assert isinstance(content, TextContent) assert isinstance(content.text, str) assert "Test error" in content.text assert result.isError is True @pytest.mark.anyio async def test_tool_return_value_conversion(self): mcp = FastMCP() mcp.add_tool(tool_fn) async with client_session(mcp._mcp_server) as client: result = await client.call_tool("tool_fn", {"x": 1, "y": 2}) assert len(result.content) == 1 content = result.content[0] assert isinstance(content, TextContent) assert content.text == "3" @pytest.mark.anyio async def test_tool_image_helper(self, tmp_path: Path): # Create a test image image_path = tmp_path / "test.png" image_path.write_bytes(b"fake png data") mcp = FastMCP() mcp.add_tool(image_tool_fn) async with client_session(mcp._mcp_server) as client: result = await client.call_tool("image_tool_fn", {"path": str(image_path)}) assert len(result.content) == 1 content = result.content[0] assert isinstance(content, ImageContent) assert content.type == "image" assert content.mimeType == "image/png" # Verify base64 encoding decoded = base64.b64decode(content.data) assert decoded == b"fake png data" @pytest.mark.anyio async def test_tool_mixed_content(self): mcp = FastMCP() mcp.add_tool(mixed_content_tool_fn) async with client_session(mcp._mcp_server) as client: result = await client.call_tool("mixed_content_tool_fn", {}) assert len(result.content) == 2 content1 = result.content[0] content2 = result.content[1] assert isinstance(content1, TextContent) assert content1.text == "Hello" assert isinstance(content2, ImageContent) assert content2.mimeType == "image/png" assert content2.data == "abc" @pytest.mark.anyio async def test_tool_mixed_list_with_image(self, tmp_path: Path): """Test that lists containing Image objects and other types are handled correctly""" # Create a test image image_path = tmp_path / "test.png" image_path.write_bytes(b"test image data") def mixed_list_fn() -> list: return [ "text message", Image(image_path), {"key": "value"}, TextContent(type="text", text="direct content"), ] mcp = FastMCP() mcp.add_tool(mixed_list_fn) async with client_session(mcp._mcp_server) as client: result = await client.call_tool("mixed_list_fn", {}) assert len(result.content) == 4 # Check text conversion content1 = result.content[0] assert isinstance(content1, TextContent) assert content1.text == "text message" # Check image conversion content2 = result.content[1] assert isinstance(content2, ImageContent) assert content2.mimeType == "image/png" assert base64.b64decode(content2.data) == b"test image data" # Check dict conversion content3 = result.content[2] assert isinstance(content3, TextContent) assert '"key": "value"' in content3.text # Check direct TextContent content4 = result.content[3] assert isinstance(content4, TextContent) assert content4.text == "direct content" class TestServerResources: @pytest.mark.anyio async def test_text_resource(self): mcp = FastMCP() def get_text(): return "Hello, world!" resource = FunctionResource( uri=AnyUrl("resource://test"), name="test", fn=get_text ) mcp.add_resource(resource) async with client_session(mcp._mcp_server) as client: result = await client.read_resource(AnyUrl("resource://test")) assert isinstance(result.contents[0], TextResourceContents) assert result.contents[0].text == "Hello, world!" @pytest.mark.anyio async def test_binary_resource(self): mcp = FastMCP() def get_binary(): return b"Binary data" resource = FunctionResource( uri=AnyUrl("resource://binary"), name="binary", fn=get_binary, mime_type="application/octet-stream", ) mcp.add_resource(resource) async with client_session(mcp._mcp_server) as client: result = await client.read_resource(AnyUrl("resource://binary")) assert isinstance(result.contents[0], BlobResourceContents) assert result.contents[0].blob == base64.b64encode(b"Binary data").decode() @pytest.mark.anyio async def test_file_resource_text(self, tmp_path: Path): mcp = FastMCP() # Create a text file text_file = tmp_path / "test.txt" text_file.write_text("Hello from file!") resource = FileResource( uri=AnyUrl("file://test.txt"), name="test.txt", path=text_file ) mcp.add_resource(resource) async with client_session(mcp._mcp_server) as client: result = await client.read_resource(AnyUrl("file://test.txt")) assert isinstance(result.contents[0], TextResourceContents) assert result.contents[0].text == "Hello from file!" @pytest.mark.anyio async def test_file_resource_binary(self, tmp_path: Path): mcp = FastMCP() # Create a binary file binary_file = tmp_path / "test.bin" binary_file.write_bytes(b"Binary file data") resource = FileResource( uri=AnyUrl("file://test.bin"), name="test.bin", path=binary_file, mime_type="application/octet-stream", ) mcp.add_resource(resource) async with client_session(mcp._mcp_server) as client: result = await client.read_resource(AnyUrl("file://test.bin")) assert isinstance(result.contents[0], BlobResourceContents) assert ( result.contents[0].blob == base64.b64encode(b"Binary file data").decode() ) class TestServerResourceTemplates: @pytest.mark.anyio async def test_resource_with_params(self): """Test that a resource with function parameters raises an error if the URI parameters don't match""" mcp = FastMCP() with pytest.raises(ValueError, match="Mismatch between URI parameters"): @mcp.resource("resource://data") def get_data_fn(param: str) -> str: return f"Data: {param}" @pytest.mark.anyio async def test_resource_with_uri_params(self): """Test that a resource with URI parameters is automatically a template""" mcp = FastMCP() with pytest.raises(ValueError, match="Mismatch between URI parameters"): @mcp.resource("resource://{param}") def get_data() -> str: return "Data" @pytest.mark.anyio async def test_resource_with_untyped_params(self): """Test that a resource with untyped parameters raises an error""" mcp = FastMCP() @mcp.resource("resource://{param}") def get_data(param) -> str: return "Data" @pytest.mark.anyio async def test_resource_matching_params(self): """Test that a resource with matching URI and function parameters works""" mcp = FastMCP() @mcp.resource("resource://{name}/data") def get_data(name: str) -> str: return f"Data for {name}" async with client_session(mcp._mcp_server) as client: result = await client.read_resource(AnyUrl("resource://test/data")) assert isinstance(result.contents[0], TextResourceContents) assert result.contents[0].text == "Data for test" @pytest.mark.anyio async def test_resource_mismatched_params(self): """Test that mismatched parameters raise an error""" mcp = FastMCP() with pytest.raises(ValueError, match="Mismatch between URI parameters"): @mcp.resource("resource://{name}/data") def get_data(user: str) -> str: return f"Data for {user}" @pytest.mark.anyio async def test_resource_multiple_params(self): """Test that multiple parameters work correctly""" mcp = FastMCP() @mcp.resource("resource://{org}/{repo}/data") def get_data(org: str, repo: str) -> str: return f"Data for {org}/{repo}" async with client_session(mcp._mcp_server) as client: result = await client.read_resource( AnyUrl("resource://cursor/fastmcp/data") ) assert isinstance(result.contents[0], TextResourceContents) assert result.contents[0].text == "Data for cursor/fastmcp" @pytest.mark.anyio async def test_resource_multiple_mismatched_params(self): """Test that mismatched parameters raise an error""" mcp = FastMCP() with pytest.raises(ValueError, match="Mismatch between URI parameters"): @mcp.resource("resource://{org}/{repo}/data") def get_data_mismatched(org: str, repo_2: str) -> str: return f"Data for {org}" """Test that a resource with no parameters works as a regular resource""" mcp = FastMCP() @mcp.resource("resource://static") def get_static_data() -> str: return "Static data" async with client_session(mcp._mcp_server) as client: result = await client.read_resource(AnyUrl("resource://static")) assert isinstance(result.contents[0], TextResourceContents) assert result.contents[0].text == "Static data" @pytest.mark.anyio async def test_template_to_resource_conversion(self): """Test that templates are properly converted to resources when accessed""" mcp = FastMCP() @mcp.resource("resource://{name}/data") def get_data(name: str) -> str: return f"Data for {name}" # Should be registered as a template assert len(mcp._resource_manager._templates) == 1 assert len(await mcp.list_resources()) == 0 # When accessed, should create a concrete resource resource = await mcp._resource_manager.get_resource("resource://test/data") assert isinstance(resource, FunctionResource) result = await resource.read() assert result == "Data for test" class TestContextInjection: """Test context injection in tools.""" @pytest.mark.anyio async def test_context_detection(self): """Test that context parameters are properly detected.""" mcp = FastMCP() def tool_with_context(x: int, ctx: Context) -> str: return f"Request {ctx.request_id}: {x}" tool = mcp._tool_manager.add_tool(tool_with_context) assert tool.context_kwarg == "ctx" @pytest.mark.anyio async def test_context_injection(self): """Test that context is properly injected into tool calls.""" mcp = FastMCP() def tool_with_context(x: int, ctx: Context) -> str: assert ctx.request_id is not None return f"Request {ctx.request_id}: {x}" mcp.add_tool(tool_with_context) async with client_session(mcp._mcp_server) as client: result = await client.call_tool("tool_with_context", {"x": 42}) assert len(result.content) == 1 content = result.content[0] assert isinstance(content, TextContent) assert "Request" in content.text assert "42" in content.text @pytest.mark.anyio async def test_async_context(self): """Test that context works in async functions.""" mcp = FastMCP() async def async_tool(x: int, ctx: Context) -> str: assert ctx.request_id is not None return f"Async request {ctx.request_id}: {x}" mcp.add_tool(async_tool) async with client_session(mcp._mcp_server) as client: result = await client.call_tool("async_tool", {"x": 42}) assert len(result.content) == 1 content = result.content[0] assert isinstance(content, TextContent) assert "Async request" in content.text assert "42" in content.text @pytest.mark.anyio async def test_context_logging(self): from unittest.mock import patch import mcp.server.session """Test that context logging methods work.""" mcp = FastMCP() async def logging_tool(msg: str, ctx: Context) -> str: await ctx.debug("Debug message") await ctx.info("Info message") await ctx.warning("Warning message") await ctx.error("Error message") return f"Logged messages for {msg}" mcp.add_tool(logging_tool) with patch("mcp.server.session.ServerSession.send_log_message") as mock_log: async with client_session(mcp._mcp_server) as client: result = await client.call_tool("logging_tool", {"msg": "test"}) assert len(result.content) == 1 content = result.content[0] assert isinstance(content, TextContent) assert "Logged messages for test" in content.text assert mock_log.call_count == 4 mock_log.assert_any_call( level="debug", data="Debug message", logger=None ) mock_log.assert_any_call(level="info", data="Info message", logger=None) mock_log.assert_any_call( level="warning", data="Warning message", logger=None ) mock_log.assert_any_call( level="error", data="Error message", logger=None ) @pytest.mark.anyio async def test_optional_context(self): """Test that context is optional.""" mcp = FastMCP() def no_context(x: int) -> int: return x * 2 mcp.add_tool(no_context) async with client_session(mcp._mcp_server) as client: result = await client.call_tool("no_context", {"x": 21}) assert len(result.content) == 1 content = result.content[0] assert isinstance(content, TextContent) assert content.text == "42" @pytest.mark.anyio async def test_context_resource_access(self): """Test that context can access resources.""" mcp = FastMCP() @mcp.resource("test://data") def test_resource() -> str: return "resource data" @mcp.tool() async def tool_with_resource(ctx: Context) -> str: r = await ctx.read_resource("test://data") return f"Read resource: {r.content} with mime type {r.mime_type}" async with client_session(mcp._mcp_server) as client: result = await client.call_tool("tool_with_resource", {}) assert len(result.content) == 1 content = result.content[0] assert isinstance(content, TextContent) assert "Read resource: resource data" in content.text class TestServerPrompts: """Test prompt functionality in FastMCP server.""" @pytest.mark.anyio async def test_prompt_decorator(self): """Test that the prompt decorator registers prompts correctly.""" mcp = FastMCP() @mcp.prompt() def fn() -> str: return "Hello, world!" prompts = mcp._prompt_manager.list_prompts() assert len(prompts) == 1 assert prompts[0].name == "fn" # Don't compare functions directly since validate_call wraps them content = await prompts[0].render() assert isinstance(content[0].content, TextContent) assert content[0].content.text == "Hello, world!" @pytest.mark.anyio async def test_prompt_decorator_with_name(self): """Test prompt decorator with custom name.""" mcp = FastMCP() @mcp.prompt(name="custom_name") def fn() -> str: return "Hello, world!" prompts = mcp._prompt_manager.list_prompts() assert len(prompts) == 1 assert prompts[0].name == "custom_name" content = await prompts[0].render() assert isinstance(content[0].content, TextContent) assert content[0].content.text == "Hello, world!" @pytest.mark.anyio async def test_prompt_decorator_with_description(self): """Test prompt decorator with custom description.""" mcp = FastMCP() @mcp.prompt(description="A custom description") def fn() -> str: return "Hello, world!" prompts = mcp._prompt_manager.list_prompts() assert len(prompts) == 1 assert prompts[0].description == "A custom description" content = await prompts[0].render() assert isinstance(content[0].content, TextContent) assert content[0].content.text == "Hello, world!" def test_prompt_decorator_error(self): """Test error when decorator is used incorrectly.""" mcp = FastMCP() with pytest.raises(TypeError, match="decorator was used incorrectly"): @mcp.prompt # type: ignore def fn() -> str: return "Hello, world!" @pytest.mark.anyio async def test_list_prompts(self): """Test listing prompts through MCP protocol.""" mcp = FastMCP() @mcp.prompt() def fn(name: str, optional: str = "default") -> str: return f"Hello, {name}!" async with client_session(mcp._mcp_server) as client: result = await client.list_prompts() assert result.prompts is not None assert len(result.prompts) == 1 prompt = result.prompts[0] assert prompt.name == "fn" assert prompt.arguments is not None assert len(prompt.arguments) == 2 assert prompt.arguments[0].name == "name" assert prompt.arguments[0].required is True assert prompt.arguments[1].name == "optional" assert prompt.arguments[1].required is False @pytest.mark.anyio async def test_get_prompt(self): """Test getting a prompt through MCP protocol.""" mcp = FastMCP() @mcp.prompt() def fn(name: str) -> str: return f"Hello, {name}!" async with client_session(mcp._mcp_server) as client: result = await client.get_prompt("fn", {"name": "World"}) assert len(result.messages) == 1 message = result.messages[0] assert message.role == "user" content = message.content assert isinstance(content, TextContent) assert content.text == "Hello, World!" @pytest.mark.anyio async def test_get_prompt_with_resource(self): """Test getting a prompt that returns resource content.""" mcp = FastMCP() @mcp.prompt() def fn() -> Message: return UserMessage( content=EmbeddedResource( type="resource", resource=TextResourceContents( uri=AnyUrl("file://file.txt"), text="File contents", mimeType="text/plain", ), ) ) async with client_session(mcp._mcp_server) as client: result = await client.get_prompt("fn") assert len(result.messages) == 1 message = result.messages[0] assert message.role == "user" content = message.content assert isinstance(content, EmbeddedResource) resource = content.resource assert isinstance(resource, TextResourceContents) assert resource.text == "File contents" assert resource.mimeType == "text/plain" @pytest.mark.anyio async def test_get_unknown_prompt(self): """Test error when getting unknown prompt.""" mcp = FastMCP() async with client_session(mcp._mcp_server) as client: with pytest.raises(McpError, match="Unknown prompt"): await client.get_prompt("unknown") @pytest.mark.anyio async def test_get_prompt_missing_args(self): """Test error when required arguments are missing.""" mcp = FastMCP() @mcp.prompt() def prompt_fn(name: str) -> str: return f"Hello, {name}!" async with client_session(mcp._mcp_server) as client: with pytest.raises(McpError, match="Missing required arguments"): await client.get_prompt("prompt_fn") ================================================ File: tests/server/fastmcp/test_tool_manager.py ================================================ import json import logging from typing import Optional import pytest from pydantic import BaseModel from mcp.server.fastmcp.exceptions import ToolError from mcp.server.fastmcp.tools import ToolManager class TestAddTools: def test_basic_function(self): """Test registering and running a basic function.""" def add(a: int, b: int) -> int: """Add two numbers.""" return a + b manager = ToolManager() manager.add_tool(add) tool = manager.get_tool("add") assert tool is not None assert tool.name == "add" assert tool.description == "Add two numbers." assert tool.is_async is False assert tool.parameters["properties"]["a"]["type"] == "integer" assert tool.parameters["properties"]["b"]["type"] == "integer" @pytest.mark.anyio async def test_async_function(self): """Test registering and running an async function.""" async def fetch_data(url: str) -> str: """Fetch data from URL.""" return f"Data from {url}" manager = ToolManager() manager.add_tool(fetch_data) tool = manager.get_tool("fetch_data") assert tool is not None assert tool.name == "fetch_data" assert tool.description == "Fetch data from URL." assert tool.is_async is True assert tool.parameters["properties"]["url"]["type"] == "string" def test_pydantic_model_function(self): """Test registering a function that takes a Pydantic model.""" class UserInput(BaseModel): name: str age: int def create_user(user: UserInput, flag: bool) -> dict: """Create a new user.""" return {"id": 1, **user.model_dump()} manager = ToolManager() manager.add_tool(create_user) tool = manager.get_tool("create_user") assert tool is not None assert tool.name == "create_user" assert tool.description == "Create a new user." assert tool.is_async is False assert "name" in tool.parameters["$defs"]["UserInput"]["properties"] assert "age" in tool.parameters["$defs"]["UserInput"]["properties"] assert "flag" in tool.parameters["properties"] def test_add_invalid_tool(self): manager = ToolManager() with pytest.raises(AttributeError): manager.add_tool(1) # type: ignore def test_add_lambda(self): manager = ToolManager() tool = manager.add_tool(lambda x: x, name="my_tool") assert tool.name == "my_tool" def test_add_lambda_with_no_name(self): manager = ToolManager() with pytest.raises( ValueError, match="You must provide a name for lambda functions" ): manager.add_tool(lambda x: x) def test_warn_on_duplicate_tools(self, caplog): """Test warning on duplicate tools.""" def f(x: int) -> int: return x manager = ToolManager() manager.add_tool(f) with caplog.at_level(logging.WARNING): manager.add_tool(f) assert "Tool already exists: f" in caplog.text def test_disable_warn_on_duplicate_tools(self, caplog): """Test disabling warning on duplicate tools.""" def f(x: int) -> int: return x manager = ToolManager() manager.add_tool(f) manager.warn_on_duplicate_tools = False with caplog.at_level(logging.WARNING): manager.add_tool(f) assert "Tool already exists: f" not in caplog.text class TestCallTools: @pytest.mark.anyio async def test_call_tool(self): def add(a: int, b: int) -> int: """Add two numbers.""" return a + b manager = ToolManager() manager.add_tool(add) result = await manager.call_tool("add", {"a": 1, "b": 2}) assert result == 3 @pytest.mark.anyio async def test_call_async_tool(self): async def double(n: int) -> int: """Double a number.""" return n * 2 manager = ToolManager() manager.add_tool(double) result = await manager.call_tool("double", {"n": 5}) assert result == 10 @pytest.mark.anyio async def test_call_tool_with_default_args(self): def add(a: int, b: int = 1) -> int: """Add two numbers.""" return a + b manager = ToolManager() manager.add_tool(add) result = await manager.call_tool("add", {"a": 1}) assert result == 2 @pytest.mark.anyio async def test_call_tool_with_missing_args(self): def add(a: int, b: int) -> int: """Add two numbers.""" return a + b manager = ToolManager() manager.add_tool(add) with pytest.raises(ToolError): await manager.call_tool("add", {"a": 1}) @pytest.mark.anyio async def test_call_unknown_tool(self): manager = ToolManager() with pytest.raises(ToolError): await manager.call_tool("unknown", {"a": 1}) @pytest.mark.anyio async def test_call_tool_with_list_int_input(self): def sum_vals(vals: list[int]) -> int: return sum(vals) manager = ToolManager() manager.add_tool(sum_vals) # Try both with plain list and with JSON list result = await manager.call_tool("sum_vals", {"vals": "[1, 2, 3]"}) assert result == 6 result = await manager.call_tool("sum_vals", {"vals": [1, 2, 3]}) assert result == 6 @pytest.mark.anyio async def test_call_tool_with_list_str_or_str_input(self): def concat_strs(vals: list[str] | str) -> str: return vals if isinstance(vals, str) else "".join(vals) manager = ToolManager() manager.add_tool(concat_strs) # Try both with plain python object and with JSON list result = await manager.call_tool("concat_strs", {"vals": ["a", "b", "c"]}) assert result == "abc" result = await manager.call_tool("concat_strs", {"vals": '["a", "b", "c"]'}) assert result == "abc" result = await manager.call_tool("concat_strs", {"vals": "a"}) assert result == "a" result = await manager.call_tool("concat_strs", {"vals": '"a"'}) assert result == '"a"' @pytest.mark.anyio async def test_call_tool_with_complex_model(self): from mcp.server.fastmcp import Context class MyShrimpTank(BaseModel): class Shrimp(BaseModel): name: str shrimp: list[Shrimp] x: None def name_shrimp(tank: MyShrimpTank, ctx: Context) -> list[str]: return [x.name for x in tank.shrimp] manager = ToolManager() manager.add_tool(name_shrimp) result = await manager.call_tool( "name_shrimp", {"tank": {"x": None, "shrimp": [{"name": "rex"}, {"name": "gertrude"}]}}, ) assert result == ["rex", "gertrude"] result = await manager.call_tool( "name_shrimp", {"tank": '{"x": null, "shrimp": [{"name": "rex"}, {"name": "gertrude"}]}'}, ) assert result == ["rex", "gertrude"] class TestToolSchema: @pytest.mark.anyio async def test_context_arg_excluded_from_schema(self): from mcp.server.fastmcp import Context def something(a: int, ctx: Context) -> int: return a manager = ToolManager() tool = manager.add_tool(something) assert "ctx" not in json.dumps(tool.parameters) assert "Context" not in json.dumps(tool.parameters) assert "ctx" not in tool.fn_metadata.arg_model.model_fields class TestContextHandling: """Test context handling in the tool manager.""" def test_context_parameter_detection(self): """Test that context parameters are properly detected in Tool.from_function().""" from mcp.server.fastmcp import Context def tool_with_context(x: int, ctx: Context) -> str: return str(x) manager = ToolManager() tool = manager.add_tool(tool_with_context) assert tool.context_kwarg == "ctx" def tool_without_context(x: int) -> str: return str(x) tool = manager.add_tool(tool_without_context) assert tool.context_kwarg is None @pytest.mark.anyio async def test_context_injection(self): """Test that context is properly injected during tool execution.""" from mcp.server.fastmcp import Context, FastMCP def tool_with_context(x: int, ctx: Context) -> str: assert isinstance(ctx, Context) return str(x) manager = ToolManager() manager.add_tool(tool_with_context) mcp = FastMCP() ctx = mcp.get_context() result = await manager.call_tool("tool_with_context", {"x": 42}, context=ctx) assert result == "42" @pytest.mark.anyio async def test_context_injection_async(self): """Test that context is properly injected in async tools.""" from mcp.server.fastmcp import Context, FastMCP async def async_tool(x: int, ctx: Context) -> str: assert isinstance(ctx, Context) return str(x) manager = ToolManager() manager.add_tool(async_tool) mcp = FastMCP() ctx = mcp.get_context() result = await manager.call_tool("async_tool", {"x": 42}, context=ctx) assert result == "42" @pytest.mark.anyio async def test_context_optional(self): """Test that context is optional when calling tools.""" from mcp.server.fastmcp import Context def tool_with_context(x: int, ctx: Optional[Context] = None) -> str: return str(x) manager = ToolManager() manager.add_tool(tool_with_context) # Should not raise an error when context is not provided result = await manager.call_tool("tool_with_context", {"x": 42}) assert result == "42" @pytest.mark.anyio async def test_context_error_handling(self): """Test error handling when context injection fails.""" from mcp.server.fastmcp import Context, FastMCP def tool_with_context(x: int, ctx: Context) -> str: raise ValueError("Test error") manager = ToolManager() manager.add_tool(tool_with_context) mcp = FastMCP() ctx = mcp.get_context() with pytest.raises(ToolError, match="Error executing tool tool_with_context"): await manager.call_tool("tool_with_context", {"x": 42}, context=ctx) ================================================ File: tests/server/fastmcp/prompts/test_base.py ================================================ import pytest from pydantic import FileUrl from mcp.server.fastmcp.prompts.base import ( AssistantMessage, Message, Prompt, TextContent, UserMessage, ) from mcp.types import EmbeddedResource, TextResourceContents class TestRenderPrompt: @pytest.mark.anyio async def test_basic_fn(self): def fn() -> str: return "Hello, world!" prompt = Prompt.from_function(fn) assert await prompt.render() == [ UserMessage(content=TextContent(type="text", text="Hello, world!")) ] @pytest.mark.anyio async def test_async_fn(self): async def fn() -> str: return "Hello, world!" prompt = Prompt.from_function(fn) assert await prompt.render() == [ UserMessage(content=TextContent(type="text", text="Hello, world!")) ] @pytest.mark.anyio async def test_fn_with_args(self): async def fn(name: str, age: int = 30) -> str: return f"Hello, {name}! You're {age} years old." prompt = Prompt.from_function(fn) assert await prompt.render(arguments=dict(name="World")) == [ UserMessage( content=TextContent( type="text", text="Hello, World! You're 30 years old." ) ) ] @pytest.mark.anyio async def test_fn_with_invalid_kwargs(self): async def fn(name: str, age: int = 30) -> str: return f"Hello, {name}! You're {age} years old." prompt = Prompt.from_function(fn) with pytest.raises(ValueError): await prompt.render(arguments=dict(age=40)) @pytest.mark.anyio async def test_fn_returns_message(self): async def fn() -> UserMessage: return UserMessage(content="Hello, world!") prompt = Prompt.from_function(fn) assert await prompt.render() == [ UserMessage(content=TextContent(type="text", text="Hello, world!")) ] @pytest.mark.anyio async def test_fn_returns_assistant_message(self): async def fn() -> AssistantMessage: return AssistantMessage( content=TextContent(type="text", text="Hello, world!") ) prompt = Prompt.from_function(fn) assert await prompt.render() == [ AssistantMessage(content=TextContent(type="text", text="Hello, world!")) ] @pytest.mark.anyio async def test_fn_returns_multiple_messages(self): expected = [ UserMessage("Hello, world!"), AssistantMessage("How can I help you today?"), UserMessage("I'm looking for a restaurant in the center of town."), ] async def fn() -> list[Message]: return expected prompt = Prompt.from_function(fn) assert await prompt.render() == expected @pytest.mark.anyio async def test_fn_returns_list_of_strings(self): expected = [ "Hello, world!", "I'm looking for a restaurant in the center of town.", ] async def fn() -> list[str]: return expected prompt = Prompt.from_function(fn) assert await prompt.render() == [UserMessage(t) for t in expected] @pytest.mark.anyio async def test_fn_returns_resource_content(self): """Test returning a message with resource content.""" async def fn() -> UserMessage: return UserMessage( content=EmbeddedResource( type="resource", resource=TextResourceContents( uri=FileUrl("file://file.txt"), text="File contents", mimeType="text/plain", ), ) ) prompt = Prompt.from_function(fn) assert await prompt.render() == [ UserMessage( content=EmbeddedResource( type="resource", resource=TextResourceContents( uri=FileUrl("file://file.txt"), text="File contents", mimeType="text/plain", ), ) ) ] @pytest.mark.anyio async def test_fn_returns_mixed_content(self): """Test returning messages with mixed content types.""" async def fn() -> list[Message]: return [ UserMessage(content="Please analyze this file:"), UserMessage( content=EmbeddedResource( type="resource", resource=TextResourceContents( uri=FileUrl("file://file.txt"), text="File contents", mimeType="text/plain", ), ) ), AssistantMessage(content="I'll help analyze that file."), ] prompt = Prompt.from_function(fn) assert await prompt.render() == [ UserMessage( content=TextContent(type="text", text="Please analyze this file:") ), UserMessage( content=EmbeddedResource( type="resource", resource=TextResourceContents( uri=FileUrl("file://file.txt"), text="File contents", mimeType="text/plain", ), ) ), AssistantMessage( content=TextContent(type="text", text="I'll help analyze that file.") ), ] @pytest.mark.anyio async def test_fn_returns_dict_with_resource(self): """Test returning a dict with resource content.""" async def fn() -> dict: return { "role": "user", "content": { "type": "resource", "resource": { "uri": FileUrl("file://file.txt"), "text": "File contents", "mimeType": "text/plain", }, }, } prompt = Prompt.from_function(fn) assert await prompt.render() == [ UserMessage( content=EmbeddedResource( type="resource", resource=TextResourceContents( uri=FileUrl("file://file.txt"), text="File contents", mimeType="text/plain", ), ) ) ] ================================================ File: tests/server/fastmcp/prompts/test_manager.py ================================================ import pytest from mcp.server.fastmcp.prompts.base import Prompt, TextContent, UserMessage from mcp.server.fastmcp.prompts.manager import PromptManager class TestPromptManager: def test_add_prompt(self): """Test adding a prompt to the manager.""" def fn() -> str: return "Hello, world!" manager = PromptManager() prompt = Prompt.from_function(fn) added = manager.add_prompt(prompt) assert added == prompt assert manager.get_prompt("fn") == prompt def test_add_duplicate_prompt(self, caplog): """Test adding the same prompt twice.""" def fn() -> str: return "Hello, world!" manager = PromptManager() prompt = Prompt.from_function(fn) first = manager.add_prompt(prompt) second = manager.add_prompt(prompt) assert first == second assert "Prompt already exists" in caplog.text def test_disable_warn_on_duplicate_prompts(self, caplog): """Test disabling warning on duplicate prompts.""" def fn() -> str: return "Hello, world!" manager = PromptManager(warn_on_duplicate_prompts=False) prompt = Prompt.from_function(fn) first = manager.add_prompt(prompt) second = manager.add_prompt(prompt) assert first == second assert "Prompt already exists" not in caplog.text def test_list_prompts(self): """Test listing all prompts.""" def fn1() -> str: return "Hello, world!" def fn2() -> str: return "Goodbye, world!" manager = PromptManager() prompt1 = Prompt.from_function(fn1) prompt2 = Prompt.from_function(fn2) manager.add_prompt(prompt1) manager.add_prompt(prompt2) prompts = manager.list_prompts() assert len(prompts) == 2 assert prompts == [prompt1, prompt2] @pytest.mark.anyio async def test_render_prompt(self): """Test rendering a prompt.""" def fn() -> str: return "Hello, world!" manager = PromptManager() prompt = Prompt.from_function(fn) manager.add_prompt(prompt) messages = await manager.render_prompt("fn") assert messages == [ UserMessage(content=TextContent(type="text", text="Hello, world!")) ] @pytest.mark.anyio async def test_render_prompt_with_args(self): """Test rendering a prompt with arguments.""" def fn(name: str) -> str: return f"Hello, {name}!" manager = PromptManager() prompt = Prompt.from_function(fn) manager.add_prompt(prompt) messages = await manager.render_prompt("fn", arguments={"name": "World"}) assert messages == [ UserMessage(content=TextContent(type="text", text="Hello, World!")) ] @pytest.mark.anyio async def test_render_unknown_prompt(self): """Test rendering a non-existent prompt.""" manager = PromptManager() with pytest.raises(ValueError, match="Unknown prompt: unknown"): await manager.render_prompt("unknown") @pytest.mark.anyio async def test_render_prompt_with_missing_args(self): """Test rendering a prompt with missing required arguments.""" def fn(name: str) -> str: return f"Hello, {name}!" manager = PromptManager() prompt = Prompt.from_function(fn) manager.add_prompt(prompt) with pytest.raises(ValueError, match="Missing required arguments"): await manager.render_prompt("fn") ================================================ File: tests/server/fastmcp/resources/test_file_resources.py ================================================ import os from pathlib import Path from tempfile import NamedTemporaryFile import pytest from pydantic import FileUrl from mcp.server.fastmcp.resources import FileResource @pytest.fixture def temp_file(): """Create a temporary file for testing. File is automatically cleaned up after the test if it still exists. """ content = "test content" with NamedTemporaryFile(mode="w", delete=False) as f: f.write(content) path = Path(f.name).resolve() yield path try: path.unlink() except FileNotFoundError: pass # File was already deleted by the test class TestFileResource: """Test FileResource functionality.""" def test_file_resource_creation(self, temp_file: Path): """Test creating a FileResource.""" resource = FileResource( uri=FileUrl(temp_file.as_uri()), name="test", description="test file", path=temp_file, ) assert str(resource.uri) == temp_file.as_uri() assert resource.name == "test" assert resource.description == "test file" assert resource.mime_type == "text/plain" # default assert resource.path == temp_file assert resource.is_binary is False # default def test_file_resource_str_path_conversion(self, temp_file: Path): """Test FileResource handles string paths.""" resource = FileResource( uri=FileUrl(f"file://{temp_file}"), name="test", path=Path(str(temp_file)), ) assert isinstance(resource.path, Path) assert resource.path.is_absolute() @pytest.mark.anyio async def test_read_text_file(self, temp_file: Path): """Test reading a text file.""" resource = FileResource( uri=FileUrl(f"file://{temp_file}"), name="test", path=temp_file, ) content = await resource.read() assert content == "test content" assert resource.mime_type == "text/plain" @pytest.mark.anyio async def test_read_binary_file(self, temp_file: Path): """Test reading a file as binary.""" resource = FileResource( uri=FileUrl(f"file://{temp_file}"), name="test", path=temp_file, is_binary=True, ) content = await resource.read() assert isinstance(content, bytes) assert content == b"test content" def test_relative_path_error(self): """Test error on relative path.""" with pytest.raises(ValueError, match="Path must be absolute"): FileResource( uri=FileUrl("file:///test.txt"), name="test", path=Path("test.txt"), ) @pytest.mark.anyio async def test_missing_file_error(self, temp_file: Path): """Test error when file doesn't exist.""" # Create path to non-existent file missing = temp_file.parent / "missing.txt" resource = FileResource( uri=FileUrl("file:///missing.txt"), name="test", path=missing, ) with pytest.raises(ValueError, match="Error reading file"): await resource.read() @pytest.mark.skipif( os.name == "nt", reason="File permissions behave differently on Windows" ) @pytest.mark.anyio async def test_permission_error(self, temp_file: Path): """Test reading a file without permissions.""" temp_file.chmod(0o000) # Remove all permissions try: resource = FileResource( uri=FileUrl(temp_file.as_uri()), name="test", path=temp_file, ) with pytest.raises(ValueError, match="Error reading file"): await resource.read() finally: temp_file.chmod(0o644) # Restore permissions ================================================ File: tests/server/fastmcp/resources/test_function_resources.py ================================================ import pytest from pydantic import AnyUrl, BaseModel from mcp.server.fastmcp.resources import FunctionResource class TestFunctionResource: """Test FunctionResource functionality.""" def test_function_resource_creation(self): """Test creating a FunctionResource.""" def my_func() -> str: return "test content" resource = FunctionResource( uri=AnyUrl("fn://test"), name="test", description="test function", fn=my_func, ) assert str(resource.uri) == "fn://test" assert resource.name == "test" assert resource.description == "test function" assert resource.mime_type == "text/plain" # default assert resource.fn == my_func @pytest.mark.anyio async def test_read_text(self): """Test reading text from a FunctionResource.""" def get_data() -> str: return "Hello, world!" resource = FunctionResource( uri=AnyUrl("function://test"), name="test", fn=get_data, ) content = await resource.read() assert content == "Hello, world!" assert resource.mime_type == "text/plain" @pytest.mark.anyio async def test_read_binary(self): """Test reading binary data from a FunctionResource.""" def get_data() -> bytes: return b"Hello, world!" resource = FunctionResource( uri=AnyUrl("function://test"), name="test", fn=get_data, ) content = await resource.read() assert content == b"Hello, world!" @pytest.mark.anyio async def test_json_conversion(self): """Test automatic JSON conversion of non-string results.""" def get_data() -> dict: return {"key": "value"} resource = FunctionResource( uri=AnyUrl("function://test"), name="test", fn=get_data, ) content = await resource.read() assert isinstance(content, str) assert '"key": "value"' in content @pytest.mark.anyio async def test_error_handling(self): """Test error handling in FunctionResource.""" def failing_func() -> str: raise ValueError("Test error") resource = FunctionResource( uri=AnyUrl("function://test"), name="test", fn=failing_func, ) with pytest.raises(ValueError, match="Error reading resource function://test"): await resource.read() @pytest.mark.anyio async def test_basemodel_conversion(self): """Test handling of BaseModel types.""" class MyModel(BaseModel): name: str resource = FunctionResource( uri=AnyUrl("function://test"), name="test", fn=lambda: MyModel(name="test"), ) content = await resource.read() assert content == '{"name": "test"}' @pytest.mark.anyio async def test_custom_type_conversion(self): """Test handling of custom types.""" class CustomData: def __str__(self) -> str: return "custom data" def get_data() -> CustomData: return CustomData() resource = FunctionResource( uri=AnyUrl("function://test"), name="test", fn=get_data, ) content = await resource.read() assert isinstance(content, str) @pytest.mark.anyio async def test_async_read_text(self): """Test reading text from async FunctionResource.""" async def get_data() -> str: return "Hello, world!" resource = FunctionResource( uri=AnyUrl("function://test"), name="test", fn=get_data, ) content = await resource.read() assert content == "Hello, world!" assert resource.mime_type == "text/plain" ================================================ File: tests/server/fastmcp/resources/test_resource_manager.py ================================================ from pathlib import Path from tempfile import NamedTemporaryFile import pytest from pydantic import AnyUrl, FileUrl from mcp.server.fastmcp.resources import ( FileResource, FunctionResource, ResourceManager, ResourceTemplate, ) @pytest.fixture def temp_file(): """Create a temporary file for testing. File is automatically cleaned up after the test if it still exists. """ content = "test content" with NamedTemporaryFile(mode="w", delete=False) as f: f.write(content) path = Path(f.name).resolve() yield path try: path.unlink() except FileNotFoundError: pass # File was already deleted by the test class TestResourceManager: """Test ResourceManager functionality.""" def test_add_resource(self, temp_file: Path): """Test adding a resource.""" manager = ResourceManager() resource = FileResource( uri=FileUrl(f"file://{temp_file}"), name="test", path=temp_file, ) added = manager.add_resource(resource) assert added == resource assert manager.list_resources() == [resource] def test_add_duplicate_resource(self, temp_file: Path): """Test adding the same resource twice.""" manager = ResourceManager() resource = FileResource( uri=FileUrl(f"file://{temp_file}"), name="test", path=temp_file, ) first = manager.add_resource(resource) second = manager.add_resource(resource) assert first == second assert manager.list_resources() == [resource] def test_warn_on_duplicate_resources(self, temp_file: Path, caplog): """Test warning on duplicate resources.""" manager = ResourceManager() resource = FileResource( uri=FileUrl(f"file://{temp_file}"), name="test", path=temp_file, ) manager.add_resource(resource) manager.add_resource(resource) assert "Resource already exists" in caplog.text def test_disable_warn_on_duplicate_resources(self, temp_file: Path, caplog): """Test disabling warning on duplicate resources.""" manager = ResourceManager(warn_on_duplicate_resources=False) resource = FileResource( uri=FileUrl(f"file://{temp_file}"), name="test", path=temp_file, ) manager.add_resource(resource) manager.add_resource(resource) assert "Resource already exists" not in caplog.text @pytest.mark.anyio async def test_get_resource(self, temp_file: Path): """Test getting a resource by URI.""" manager = ResourceManager() resource = FileResource( uri=FileUrl(f"file://{temp_file}"), name="test", path=temp_file, ) manager.add_resource(resource) retrieved = await manager.get_resource(resource.uri) assert retrieved == resource @pytest.mark.anyio async def test_get_resource_from_template(self): """Test getting a resource through a template.""" manager = ResourceManager() def greet(name: str) -> str: return f"Hello, {name}!" template = ResourceTemplate.from_function( fn=greet, uri_template="greet://{name}", name="greeter", ) manager._templates[template.uri_template] = template resource = await manager.get_resource(AnyUrl("greet://world")) assert isinstance(resource, FunctionResource) content = await resource.read() assert content == "Hello, world!" @pytest.mark.anyio async def test_get_unknown_resource(self): """Test getting a non-existent resource.""" manager = ResourceManager() with pytest.raises(ValueError, match="Unknown resource"): await manager.get_resource(AnyUrl("unknown://test")) def test_list_resources(self, temp_file: Path): """Test listing all resources.""" manager = ResourceManager() resource1 = FileResource( uri=FileUrl(f"file://{temp_file}"), name="test1", path=temp_file, ) resource2 = FileResource( uri=FileUrl(f"file://{temp_file}2"), name="test2", path=temp_file, ) manager.add_resource(resource1) manager.add_resource(resource2) resources = manager.list_resources() assert len(resources) == 2 assert resources == [resource1, resource2] ================================================ File: tests/server/fastmcp/resources/test_resource_template.py ================================================ import json import pytest from pydantic import BaseModel from mcp.server.fastmcp.resources import FunctionResource, ResourceTemplate class TestResourceTemplate: """Test ResourceTemplate functionality.""" def test_template_creation(self): """Test creating a template from a function.""" def my_func(key: str, value: int) -> dict: return {"key": key, "value": value} template = ResourceTemplate.from_function( fn=my_func, uri_template="test://{key}/{value}", name="test", ) assert template.uri_template == "test://{key}/{value}" assert template.name == "test" assert template.mime_type == "text/plain" # default test_input = {"key": "test", "value": 42} assert template.fn(**test_input) == my_func(**test_input) def test_template_matches(self): """Test matching URIs against a template.""" def my_func(key: str, value: int) -> dict: return {"key": key, "value": value} template = ResourceTemplate.from_function( fn=my_func, uri_template="test://{key}/{value}", name="test", ) # Valid match params = template.matches("test://foo/123") assert params == {"key": "foo", "value": "123"} # No match assert template.matches("test://foo") is None assert template.matches("other://foo/123") is None @pytest.mark.anyio async def test_create_resource(self): """Test creating a resource from a template.""" def my_func(key: str, value: int) -> dict: return {"key": key, "value": value} template = ResourceTemplate.from_function( fn=my_func, uri_template="test://{key}/{value}", name="test", ) resource = await template.create_resource( "test://foo/123", {"key": "foo", "value": 123}, ) assert isinstance(resource, FunctionResource) content = await resource.read() assert isinstance(content, str) data = json.loads(content) assert data == {"key": "foo", "value": 123} @pytest.mark.anyio async def test_template_error(self): """Test error handling in template resource creation.""" def failing_func(x: str) -> str: raise ValueError("Test error") template = ResourceTemplate.from_function( fn=failing_func, uri_template="fail://{x}", name="fail", ) with pytest.raises(ValueError, match="Error creating resource from template"): await template.create_resource("fail://test", {"x": "test"}) @pytest.mark.anyio async def test_async_text_resource(self): """Test creating a text resource from async function.""" async def greet(name: str) -> str: return f"Hello, {name}!" template = ResourceTemplate.from_function( fn=greet, uri_template="greet://{name}", name="greeter", ) resource = await template.create_resource( "greet://world", {"name": "world"}, ) assert isinstance(resource, FunctionResource) content = await resource.read() assert content == "Hello, world!" @pytest.mark.anyio async def test_async_binary_resource(self): """Test creating a binary resource from async function.""" async def get_bytes(value: str) -> bytes: return value.encode() template = ResourceTemplate.from_function( fn=get_bytes, uri_template="bytes://{value}", name="bytes", ) resource = await template.create_resource( "bytes://test", {"value": "test"}, ) assert isinstance(resource, FunctionResource) content = await resource.read() assert content == b"test" @pytest.mark.anyio async def test_basemodel_conversion(self): """Test handling of BaseModel types.""" class MyModel(BaseModel): key: str value: int def get_data(key: str, value: int) -> MyModel: return MyModel(key=key, value=value) template = ResourceTemplate.from_function( fn=get_data, uri_template="test://{key}/{value}", name="test", ) resource = await template.create_resource( "test://foo/123", {"key": "foo", "value": 123}, ) assert isinstance(resource, FunctionResource) content = await resource.read() assert isinstance(content, str) data = json.loads(content) assert data == {"key": "foo", "value": 123} @pytest.mark.anyio async def test_custom_type_conversion(self): """Test handling of custom types.""" class CustomData: def __init__(self, value: str): self.value = value def __str__(self) -> str: return self.value def get_data(value: str) -> CustomData: return CustomData(value) template = ResourceTemplate.from_function( fn=get_data, uri_template="test://{value}", name="test", ) resource = await template.create_resource( "test://hello", {"value": "hello"}, ) assert isinstance(resource, FunctionResource) content = await resource.read() assert content == "hello" ================================================ File: tests/server/fastmcp/resources/test_resources.py ================================================ import pytest from pydantic import AnyUrl from mcp.server.fastmcp.resources import FunctionResource, Resource class TestResourceValidation: """Test base Resource validation.""" def test_resource_uri_validation(self): """Test URI validation.""" def dummy_func() -> str: return "data" # Valid URI resource = FunctionResource( uri=AnyUrl("http://example.com/data"), name="test", fn=dummy_func, ) assert str(resource.uri) == "http://example.com/data" # Missing protocol with pytest.raises(ValueError, match="Input should be a valid URL"): FunctionResource( uri=AnyUrl("invalid"), name="test", fn=dummy_func, ) # Missing host with pytest.raises(ValueError, match="Input should be a valid URL"): FunctionResource( uri=AnyUrl("http://"), name="test", fn=dummy_func, ) def test_resource_name_from_uri(self): """Test name is extracted from URI if not provided.""" def dummy_func() -> str: return "data" resource = FunctionResource( uri=AnyUrl("resource://my-resource"), fn=dummy_func, ) assert resource.name == "resource://my-resource" def test_resource_name_validation(self): """Test name validation.""" def dummy_func() -> str: return "data" # Must provide either name or URI with pytest.raises(ValueError, match="Either name or uri must be provided"): FunctionResource( fn=dummy_func, ) # Explicit name takes precedence over URI resource = FunctionResource( uri=AnyUrl("resource://uri-name"), name="explicit-name", fn=dummy_func, ) assert resource.name == "explicit-name" def test_resource_mime_type(self): """Test mime type handling.""" def dummy_func() -> str: return "data" # Default mime type resource = FunctionResource( uri=AnyUrl("resource://test"), fn=dummy_func, ) assert resource.mime_type == "text/plain" # Custom mime type resource = FunctionResource( uri=AnyUrl("resource://test"), fn=dummy_func, mime_type="application/json", ) assert resource.mime_type == "application/json" @pytest.mark.anyio async def test_resource_read_abstract(self): """Test that Resource.read() is abstract.""" class ConcreteResource(Resource): pass with pytest.raises(TypeError, match="abstract method"): ConcreteResource(uri=AnyUrl("test://test"), name="test") # type: ignore ================================================ File: tests/server/fastmcp/servers/test_file_server.py ================================================ import json from pathlib import Path import pytest from mcp.server.fastmcp import FastMCP @pytest.fixture() def test_dir(tmp_path_factory) -> Path: """Create a temporary directory with test files.""" tmp = tmp_path_factory.mktemp("test_files") # Create test files (tmp / "example.py").write_text("print('hello world')") (tmp / "readme.md").write_text("# Test Directory\nThis is a test.") (tmp / "config.json").write_text('{"test": true}') return tmp @pytest.fixture def mcp() -> FastMCP: mcp = FastMCP() return mcp @pytest.fixture(autouse=True) def resources(mcp: FastMCP, test_dir: Path) -> FastMCP: @mcp.resource("dir://test_dir") def list_test_dir() -> list[str]: """List the files in the test directory""" return [str(f) for f in test_dir.iterdir()] @mcp.resource("file://test_dir/example.py") def read_example_py() -> str: """Read the example.py file""" try: return (test_dir / "example.py").read_text() except FileNotFoundError: return "File not found" @mcp.resource("file://test_dir/readme.md") def read_readme_md() -> str: """Read the readme.md file""" try: return (test_dir / "readme.md").read_text() except FileNotFoundError: return "File not found" @mcp.resource("file://test_dir/config.json") def read_config_json() -> str: """Read the config.json file""" try: return (test_dir / "config.json").read_text() except FileNotFoundError: return "File not found" return mcp @pytest.fixture(autouse=True) def tools(mcp: FastMCP, test_dir: Path) -> FastMCP: @mcp.tool() def delete_file(path: str) -> bool: # ensure path is in test_dir if Path(path).resolve().parent != test_dir: raise ValueError(f"Path must be in test_dir: {path}") Path(path).unlink() return True return mcp @pytest.mark.anyio async def test_list_resources(mcp: FastMCP): resources = await mcp.list_resources() assert len(resources) == 4 assert [str(r.uri) for r in resources] == [ "dir://test_dir", "file://test_dir/example.py", "file://test_dir/readme.md", "file://test_dir/config.json", ] @pytest.mark.anyio async def test_read_resource_dir(mcp: FastMCP): res = await mcp.read_resource("dir://test_dir") assert res.mime_type == "text/plain" files = json.loads(res.content) assert sorted([Path(f).name for f in files]) == [ "config.json", "example.py", "readme.md", ] @pytest.mark.anyio async def test_read_resource_file(mcp: FastMCP): res = await mcp.read_resource("file://test_dir/example.py") assert res.content == "print('hello world')" @pytest.mark.anyio async def test_delete_file(mcp: FastMCP, test_dir: Path): await mcp.call_tool( "delete_file", arguments=dict(path=str(test_dir / "example.py")) ) assert not (test_dir / "example.py").exists() @pytest.mark.anyio async def test_delete_file_and_check_resources(mcp: FastMCP, test_dir: Path): await mcp.call_tool( "delete_file", arguments=dict(path=str(test_dir / "example.py")) ) res = await mcp.read_resource("file://test_dir/example.py") assert res.content == "File not found" ================================================ File: tests/shared/test_memory.py ================================================ import pytest from pydantic import AnyUrl from typing_extensions import AsyncGenerator from mcp.client.session import ClientSession from mcp.server import Server from mcp.shared.memory import ( create_connected_server_and_client_session, ) from mcp.types import ( EmptyResult, Resource, ) @pytest.fixture def mcp_server() -> Server: server = Server(name="test_server") @server.list_resources() async def handle_list_resources(): return [ Resource( uri=AnyUrl("memory://test"), name="Test Resource", description="A test resource", ) ] return server @pytest.fixture async def client_connected_to_server( mcp_server: Server, ) -> AsyncGenerator[ClientSession, None]: async with create_connected_server_and_client_session(mcp_server) as client_session: yield client_session @pytest.mark.anyio async def test_memory_server_and_client_connection( client_connected_to_server: ClientSession, ): """Shows how a client and server can communicate over memory streams.""" response = await client_connected_to_server.send_ping() assert isinstance(response, EmptyResult) ================================================ File: tests/shared/test_session.py ================================================ from typing import AsyncGenerator import anyio import pytest import mcp.types as types from mcp.client.session import ClientSession from mcp.server.lowlevel.server import Server from mcp.shared.exceptions import McpError from mcp.shared.memory import create_connected_server_and_client_session from mcp.types import ( CancelledNotification, CancelledNotificationParams, ClientNotification, ClientRequest, EmptyResult, ) @pytest.fixture def mcp_server() -> Server: return Server(name="test server") @pytest.fixture async def client_connected_to_server( mcp_server: Server, ) -> AsyncGenerator[ClientSession, None]: async with create_connected_server_and_client_session(mcp_server) as client_session: yield client_session @pytest.mark.anyio async def test_in_flight_requests_cleared_after_completion( client_connected_to_server: ClientSession, ): """Verify that _in_flight is empty after all requests complete.""" # Send a request and wait for response response = await client_connected_to_server.send_ping() assert isinstance(response, EmptyResult) # Verify _in_flight is empty assert len(client_connected_to_server._in_flight) == 0 @pytest.mark.anyio async def test_request_cancellation(): """Test that requests can be cancelled while in-flight.""" # The tool is already registered in the fixture ev_tool_called = anyio.Event() ev_cancelled = anyio.Event() request_id = None # Start the request in a separate task so we can cancel it def make_server() -> Server: server = Server(name="TestSessionServer") # Register the tool handler @server.call_tool() async def handle_call_tool(name: str, arguments: dict | None) -> list: nonlocal request_id, ev_tool_called if name == "slow_tool": request_id = server.request_context.request_id ev_tool_called.set() await anyio.sleep(10) # Long enough to ensure we can cancel return [] raise ValueError(f"Unknown tool: {name}") # Register the tool so it shows up in list_tools @server.list_tools() async def handle_list_tools() -> list[types.Tool]: return [ types.Tool( name="slow_tool", description="A slow tool that takes 10 seconds to complete", inputSchema={}, ) ] return server async def make_request(client_session): nonlocal ev_cancelled try: await client_session.send_request( ClientRequest( types.CallToolRequest( method="tools/call", params=types.CallToolRequestParams( name="slow_tool", arguments={} ), ) ), types.CallToolResult, ) pytest.fail("Request should have been cancelled") except McpError as e: # Expected - request was cancelled assert "Request cancelled" in str(e) ev_cancelled.set() async with create_connected_server_and_client_session( make_server() ) as client_session: async with anyio.create_task_group() as tg: tg.start_soon(make_request, client_session) # Wait for the request to be in-flight with anyio.fail_after(1): # Timeout after 1 second await ev_tool_called.wait() # Send cancellation notification assert request_id is not None await client_session.send_notification( ClientNotification( CancelledNotification( method="notifications/cancelled", params=CancelledNotificationParams(requestId=request_id), ) ) ) # Give cancellation time to process with anyio.fail_after(1): await ev_cancelled.wait() ================================================ File: tests/shared/test_sse.py ================================================ import multiprocessing import socket import time from typing import AsyncGenerator, Generator import anyio import httpx import pytest import uvicorn from pydantic import AnyUrl from starlette.applications import Starlette from starlette.requests import Request from starlette.routing import Mount, Route from mcp.client.session import ClientSession from mcp.client.sse import sse_client from mcp.server import Server from mcp.server.sse import SseServerTransport from mcp.shared.exceptions import McpError from mcp.types import ( EmptyResult, ErrorData, InitializeResult, ReadResourceResult, TextContent, TextResourceContents, Tool, ) SERVER_NAME = "test_server_for_SSE" @pytest.fixture def server_port() -> int: with socket.socket() as s: s.bind(("127.0.0.1", 0)) return s.getsockname()[1] @pytest.fixture def server_url(server_port: int) -> str: return f"http://127.0.0.1:{server_port}" # Test server implementation class ServerTest(Server): def __init__(self): super().__init__(SERVER_NAME) @self.read_resource() async def handle_read_resource(uri: AnyUrl) -> str | bytes: if uri.scheme == "foobar": return f"Read {uri.host}" elif uri.scheme == "slow": # Simulate a slow resource await anyio.sleep(2.0) return f"Slow response from {uri.host}" raise McpError( error=ErrorData( code=404, message="OOPS! no resource with that URI was found" ) ) @self.list_tools() async def handle_list_tools() -> list[Tool]: return [ Tool( name="test_tool", description="A test tool", inputSchema={"type": "object", "properties": {}}, ) ] @self.call_tool() async def handle_call_tool(name: str, args: dict) -> list[TextContent]: return [TextContent(type="text", text=f"Called {name}")] # Test fixtures def make_server_app() -> Starlette: """Create test Starlette app with SSE transport""" sse = SseServerTransport("/messages/") server = ServerTest() async def handle_sse(request: Request) -> None: async with sse.connect_sse( request.scope, request.receive, request._send ) as streams: await server.run( streams[0], streams[1], server.create_initialization_options() ) app = Starlette( routes=[ Route("/sse", endpoint=handle_sse), Mount("/messages/", app=sse.handle_post_message), ] ) return app def run_server(server_port: int) -> None: app = make_server_app() server = uvicorn.Server( config=uvicorn.Config( app=app, host="127.0.0.1", port=server_port, log_level="error" ) ) print(f"starting server on {server_port}") server.run() # Give server time to start while not server.started: print("waiting for server to start") time.sleep(0.5) @pytest.fixture() def server(server_port: int) -> Generator[None, None, None]: proc = multiprocessing.Process( target=run_server, kwargs={"server_port": server_port}, daemon=True ) print("starting process") proc.start() # Wait for server to be running max_attempts = 20 attempt = 0 print("waiting for server to start") while attempt < max_attempts: try: with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.connect(("127.0.0.1", server_port)) break except ConnectionRefusedError: time.sleep(0.1) attempt += 1 else: raise RuntimeError( "Server failed to start after {} attempts".format(max_attempts) ) yield print("killing server") # Signal the server to stop proc.kill() proc.join(timeout=2) if proc.is_alive(): print("server process failed to terminate") @pytest.fixture() async def http_client(server, server_url) -> AsyncGenerator[httpx.AsyncClient, None]: """Create test client""" async with httpx.AsyncClient(base_url=server_url) as client: yield client # Tests @pytest.mark.anyio async def test_raw_sse_connection(http_client: httpx.AsyncClient) -> None: """Test the SSE connection establishment simply with an HTTP client.""" async with anyio.create_task_group(): async def connection_test() -> None: async with http_client.stream("GET", "/sse") as response: assert response.status_code == 200 assert ( response.headers["content-type"] == "text/event-stream; charset=utf-8" ) line_number = 0 async for line in response.aiter_lines(): if line_number == 0: assert line == "event: endpoint" elif line_number == 1: assert line.startswith("data: /messages/?session_id=") else: return line_number += 1 # Add timeout to prevent test from hanging if it fails with anyio.fail_after(3): await connection_test() @pytest.mark.anyio async def test_sse_client_basic_connection(server: None, server_url: str) -> None: async with sse_client(server_url + "/sse") as streams: async with ClientSession(*streams) as session: # Test initialization result = await session.initialize() assert isinstance(result, InitializeResult) assert result.serverInfo.name == SERVER_NAME # Test ping ping_result = await session.send_ping() assert isinstance(ping_result, EmptyResult) @pytest.fixture async def initialized_sse_client_session( server, server_url: str ) -> AsyncGenerator[ClientSession, None]: async with sse_client(server_url + "/sse", sse_read_timeout=0.5) as streams: async with ClientSession(*streams) as session: await session.initialize() yield session @pytest.mark.anyio async def test_sse_client_happy_request_and_response( initialized_sse_client_session: ClientSession, ) -> None: session = initialized_sse_client_session response = await session.read_resource(uri=AnyUrl("foobar://should-work")) assert len(response.contents) == 1 assert isinstance(response.contents[0], TextResourceContents) assert response.contents[0].text == "Read should-work" @pytest.mark.anyio async def test_sse_client_exception_handling( initialized_sse_client_session: ClientSession, ) -> None: session = initialized_sse_client_session with pytest.raises(McpError, match="OOPS! no resource with that URI was found"): await session.read_resource(uri=AnyUrl("xxx://will-not-work")) @pytest.mark.anyio @pytest.mark.skip( "this test highlights a possible bug in SSE read timeout exception handling" ) async def test_sse_client_timeout( initialized_sse_client_session: ClientSession, ) -> None: session = initialized_sse_client_session # sanity check that normal, fast responses are working response = await session.read_resource(uri=AnyUrl("foobar://1")) assert isinstance(response, ReadResourceResult) with anyio.move_on_after(3): with pytest.raises(McpError, match="Read timed out"): response = await session.read_resource(uri=AnyUrl("slow://2")) # we should receive an error here return pytest.fail("the client should have timed out and returned an error already") ================================================ File: .github/ISSUE_TEMPLATE/bug_report.md ================================================ --- name: Bug report about: Create a report to help us improve title: '' labels: '' assignees: '' --- **Describe the bug** A clear and concise description of what the bug is. **To Reproduce** Steps to reproduce the behavior: 1. Go to '...' 2. Click on '....' 3. Scroll down to '....' 4. See error **Expected behavior** A clear and concise description of what you expected to happen. **Screenshots** If applicable, add screenshots to help explain your problem. **Desktop (please complete the following information):** - OS: [e.g. iOS] - Browser [e.g. chrome, safari] - Version [e.g. 22] **Smartphone (please complete the following information):** - Device: [e.g. iPhone6] - OS: [e.g. iOS8.1] - Browser [e.g. stock browser, safari] - Version [e.g. 22] **Additional context** Add any other context about the problem here. ================================================ File: .github/ISSUE_TEMPLATE/feature_request.md ================================================ --- name: Feature request about: Suggest an idea for this project title: '' labels: '' assignees: '' --- **Is your feature request related to a problem? Please describe.** A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] **Describe the solution you'd like** A clear and concise description of what you want to happen. **Describe alternatives you've considered** A clear and concise description of any alternative solutions or features you've considered. **Additional context** Add any other context or screenshots about the feature request here. ================================================ File: .github/workflows/check-lock.yml ================================================ name: Check uv.lock on: pull_request: paths: - "pyproject.toml" - "uv.lock" push: paths: - "pyproject.toml" - "uv.lock" jobs: check-lock: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Install uv run: | curl -LsSf https://astral.sh/uv/install.sh | sh echo "$HOME/.cargo/bin" >> $GITHUB_PATH - name: Check uv.lock is up to date run: uv lock --check ================================================ File: .github/workflows/main-checks.yml ================================================ name: Main branch checks on: push: branches: - main - "v*.*.*" tags: - "v*.*.*" jobs: checks: uses: ./.github/workflows/shared.yml ================================================ File: .github/workflows/publish-pypi.yml ================================================ name: Publishing on: release: types: [published] jobs: release-build: name: Build distribution runs-on: ubuntu-latest needs: [checks] steps: - uses: actions/checkout@v4 - name: Install uv uses: astral-sh/setup-uv@v3 - name: "Set up Python" uses: actions/setup-python@v5 with: python-version-file: ".python-version" - name: Install the project run: uv sync --frozen --all-extras --dev - name: Build run: uv build - name: Upload artifacts uses: actions/upload-artifact@v4 with: name: release-dists path: dist/ checks: uses: ./.github/workflows/shared.yml pypi-publish: name: Upload release to PyPI runs-on: ubuntu-latest environment: release needs: - release-build permissions: id-token: write # IMPORTANT: this permission is mandatory for trusted publishing steps: - name: Retrieve release distributions uses: actions/download-artifact@v4 with: name: release-dists path: dist/ - name: Publish package distributions to PyPI uses: pypa/gh-action-pypi-publish@release/v1 ================================================ File: .github/workflows/pull-request-checks.yml ================================================ name: Pull request checks on: pull_request: jobs: checks: uses: ./.github/workflows/shared.yml ================================================ File: .github/workflows/shared.yml ================================================ name: Shared Checks on: workflow_call: jobs: format: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Install uv uses: astral-sh/setup-uv@v3 with: enable-cache: true - name: "Set up Python" uses: actions/setup-python@v5 with: python-version-file: ".python-version" - name: Install the project run: uv sync --frozen --all-extras --dev - name: Run ruff format check run: uv run --frozen ruff check . typecheck: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Install uv uses: astral-sh/setup-uv@v3 with: enable-cache: true - name: "Set up Python" uses: actions/setup-python@v5 with: python-version-file: ".python-version" - name: Install the project run: uv sync --frozen --all-extras --dev - name: Run pyright run: uv run --frozen pyright build: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Install uv uses: astral-sh/setup-uv@v3 with: enable-cache: true - name: "Set up Python" uses: actions/setup-python@v5 with: python-version-file: ".python-version" - name: Install the project run: uv sync --frozen --all-extras --dev - name: Run pytest run: uv run --frozen pytest