Skip to main content
Glama

mcp-run-python

Official
by pydantic
tools.py20.3 kB
from __future__ import annotations as _annotations from collections.abc import Awaitable, Callable, Sequence from dataclasses import KW_ONLY, dataclass, field, replace from typing import Annotated, Any, Concatenate, Generic, Literal, TypeAlias, cast from pydantic import Discriminator, Tag from pydantic.json_schema import GenerateJsonSchema, JsonSchemaValue from pydantic_core import SchemaValidator, core_schema from typing_extensions import ParamSpec, Self, TypeVar from . import _function_schema, _utils from ._run_context import AgentDepsT, RunContext from .exceptions import ModelRetry from .messages import RetryPromptPart, ToolCallPart, ToolReturn __all__ = ( 'AgentDepsT', 'DocstringFormat', 'RunContext', 'SystemPromptFunc', 'ToolFuncContext', 'ToolFuncPlain', 'ToolFuncEither', 'ToolParams', 'ToolPrepareFunc', 'ToolsPrepareFunc', 'Tool', 'ObjectJsonSchema', 'ToolDefinition', 'DeferredToolRequests', 'DeferredToolResults', 'ToolApproved', 'ToolDenied', ) ToolParams = ParamSpec('ToolParams', default=...) """Retrieval function param spec.""" SystemPromptFunc: TypeAlias = ( Callable[[RunContext[AgentDepsT]], str] | Callable[[RunContext[AgentDepsT]], Awaitable[str]] | Callable[[], str] | Callable[[], Awaitable[str]] ) """A function that may or maybe not take `RunContext` as an argument, and may or may not be async. Usage `SystemPromptFunc[AgentDepsT]`. """ ToolFuncContext: TypeAlias = Callable[Concatenate[RunContext[AgentDepsT], ToolParams], Any] """A tool function that takes `RunContext` as the first argument. Usage `ToolContextFunc[AgentDepsT, ToolParams]`. """ ToolFuncPlain: TypeAlias = Callable[ToolParams, Any] """A tool function that does not take `RunContext` as the first argument. Usage `ToolPlainFunc[ToolParams]`. """ ToolFuncEither: TypeAlias = ToolFuncContext[AgentDepsT, ToolParams] | ToolFuncPlain[ToolParams] """Either kind of tool function. This is just a union of [`ToolFuncContext`][pydantic_ai.tools.ToolFuncContext] and [`ToolFuncPlain`][pydantic_ai.tools.ToolFuncPlain]. Usage `ToolFuncEither[AgentDepsT, ToolParams]`. """ ToolPrepareFunc: TypeAlias = Callable[[RunContext[AgentDepsT], 'ToolDefinition'], Awaitable['ToolDefinition | None']] """Definition of a function that can prepare a tool definition at call time. See [tool docs](../tools-advanced.md#tool-prepare) for more information. Example — here `only_if_42` is valid as a `ToolPrepareFunc`: ```python {noqa="I001"} from pydantic_ai import RunContext, Tool from pydantic_ai.tools import ToolDefinition async def only_if_42( ctx: RunContext[int], tool_def: ToolDefinition ) -> ToolDefinition | None: if ctx.deps == 42: return tool_def def hitchhiker(ctx: RunContext[int], answer: str) -> str: return f'{ctx.deps} {answer}' hitchhiker = Tool(hitchhiker, prepare=only_if_42) ``` Usage `ToolPrepareFunc[AgentDepsT]`. """ ToolsPrepareFunc: TypeAlias = Callable[ [RunContext[AgentDepsT], list['ToolDefinition']], Awaitable['list[ToolDefinition] | None'] ] """Definition of a function that can prepare the tool definition of all tools for each step. This is useful if you want to customize the definition of multiple tools or you want to register a subset of tools for a given step. Example — here `turn_on_strict_if_openai` is valid as a `ToolsPrepareFunc`: ```python {noqa="I001"} from dataclasses import replace from pydantic_ai import Agent, RunContext from pydantic_ai.tools import ToolDefinition async def turn_on_strict_if_openai( ctx: RunContext[None], tool_defs: list[ToolDefinition] ) -> list[ToolDefinition] | None: if ctx.model.system == 'openai': return [replace(tool_def, strict=True) for tool_def in tool_defs] return tool_defs agent = Agent('openai:gpt-4o', prepare_tools=turn_on_strict_if_openai) ``` Usage `ToolsPrepareFunc[AgentDepsT]`. """ DocstringFormat: TypeAlias = Literal['google', 'numpy', 'sphinx', 'auto'] """Supported docstring formats. * `'google'` — [Google-style](https://google.github.io/styleguide/pyguide.html#381-docstrings) docstrings. * `'numpy'` — [Numpy-style](https://numpydoc.readthedocs.io/en/latest/format.html) docstrings. * `'sphinx'` — [Sphinx-style](https://sphinx-rtd-tutorial.readthedocs.io/en/latest/docstrings.html#the-sphinx-docstring-format) docstrings. * `'auto'` — Automatically infer the format based on the structure of the docstring. """ @dataclass(kw_only=True) class DeferredToolRequests: """Tool calls that require approval or external execution. This can be used as an agent's `output_type` and will be used as the output of the agent run if the model called any deferred tools. Results can be passed to the next agent run using a [`DeferredToolResults`][pydantic_ai.tools.DeferredToolResults] object with the same tool call IDs. See [deferred tools docs](../deferred-tools.md#deferred-tools) for more information. """ calls: list[ToolCallPart] = field(default_factory=list) """Tool calls that require external execution.""" approvals: list[ToolCallPart] = field(default_factory=list) """Tool calls that require human-in-the-loop approval.""" @dataclass(kw_only=True) class ToolApproved: """Indicates that a tool call has been approved and that the tool function should be executed.""" override_args: dict[str, Any] | None = None """Optional tool call arguments to use instead of the original arguments.""" kind: Literal['tool-approved'] = 'tool-approved' @dataclass class ToolDenied: """Indicates that a tool call has been denied and that a denial message should be returned to the model.""" message: str = 'The tool call was denied.' """The message to return to the model.""" _: KW_ONLY kind: Literal['tool-denied'] = 'tool-denied' def _deferred_tool_call_result_discriminator(x: Any) -> str | None: if isinstance(x, dict): if 'kind' in x: return cast(str, x['kind']) elif 'part_kind' in x: return cast(str, x['part_kind']) else: if hasattr(x, 'kind'): return cast(str, x.kind) elif hasattr(x, 'part_kind'): return cast(str, x.part_kind) return None DeferredToolApprovalResult: TypeAlias = Annotated[ToolApproved | ToolDenied, Discriminator('kind')] """Result for a tool call that required human-in-the-loop approval.""" DeferredToolCallResult: TypeAlias = Annotated[ Annotated[ToolReturn, Tag('tool-return')] | Annotated[ModelRetry, Tag('model-retry')] | Annotated[RetryPromptPart, Tag('retry-prompt')], Discriminator(_deferred_tool_call_result_discriminator), ] """Result for a tool call that required external execution.""" DeferredToolResult = DeferredToolApprovalResult | DeferredToolCallResult """Result for a tool call that required approval or external execution.""" @dataclass(kw_only=True) class DeferredToolResults: """Results for deferred tool calls from a previous run that required approval or external execution. The tool call IDs need to match those from the [`DeferredToolRequests`][pydantic_ai.output.DeferredToolRequests] output object from the previous run. See [deferred tools docs](../deferred-tools.md#deferred-tools) for more information. """ calls: dict[str, DeferredToolCallResult | Any] = field(default_factory=dict) """Map of tool call IDs to results for tool calls that required external execution.""" approvals: dict[str, bool | DeferredToolApprovalResult] = field(default_factory=dict) """Map of tool call IDs to results for tool calls that required human-in-the-loop approval.""" A = TypeVar('A') class GenerateToolJsonSchema(GenerateJsonSchema): def typed_dict_schema(self, schema: core_schema.TypedDictSchema) -> JsonSchemaValue: json_schema = super().typed_dict_schema(schema) # Workaround for https://github.com/pydantic/pydantic/issues/12123 if 'additionalProperties' not in json_schema: # pragma: no branch extra = schema.get('extra_behavior') or schema.get('config', {}).get('extra_fields_behavior') if extra == 'allow': extras_schema = schema.get('extras_schema', None) if extras_schema is not None: json_schema['additionalProperties'] = self.generate_inner(extras_schema) or True else: json_schema['additionalProperties'] = True # pragma: no cover elif extra == 'forbid': json_schema['additionalProperties'] = False return json_schema def _named_required_fields_schema(self, named_required_fields: Sequence[tuple[str, bool, Any]]) -> JsonSchemaValue: # Remove largely-useless property titles s = super()._named_required_fields_schema(named_required_fields) for p in s.get('properties', {}): s['properties'][p].pop('title', None) return s @dataclass(init=False) class Tool(Generic[AgentDepsT]): """A tool function for an agent.""" function: ToolFuncEither[AgentDepsT] takes_ctx: bool max_retries: int | None name: str description: str | None prepare: ToolPrepareFunc[AgentDepsT] | None docstring_format: DocstringFormat require_parameter_descriptions: bool strict: bool | None sequential: bool requires_approval: bool metadata: dict[str, Any] | None function_schema: _function_schema.FunctionSchema """ The base JSON schema for the tool's parameters. This schema may be modified by the `prepare` function or by the Model class prior to including it in an API request. """ def __init__( self, function: ToolFuncEither[AgentDepsT], *, takes_ctx: bool | None = None, max_retries: int | None = None, name: str | None = None, description: str | None = None, prepare: ToolPrepareFunc[AgentDepsT] | None = None, docstring_format: DocstringFormat = 'auto', require_parameter_descriptions: bool = False, schema_generator: type[GenerateJsonSchema] = GenerateToolJsonSchema, strict: bool | None = None, sequential: bool = False, requires_approval: bool = False, metadata: dict[str, Any] | None = None, function_schema: _function_schema.FunctionSchema | None = None, ): """Create a new tool instance. Example usage: ```python {noqa="I001"} from pydantic_ai import Agent, RunContext, Tool async def my_tool(ctx: RunContext[int], x: int, y: int) -> str: return f'{ctx.deps} {x} {y}' agent = Agent('test', tools=[Tool(my_tool)]) ``` or with a custom prepare method: ```python {noqa="I001"} from pydantic_ai import Agent, RunContext, Tool from pydantic_ai.tools import ToolDefinition async def my_tool(ctx: RunContext[int], x: int, y: int) -> str: return f'{ctx.deps} {x} {y}' async def prep_my_tool( ctx: RunContext[int], tool_def: ToolDefinition ) -> ToolDefinition | None: # only register the tool if `deps == 42` if ctx.deps == 42: return tool_def agent = Agent('test', tools=[Tool(my_tool, prepare=prep_my_tool)]) ``` Args: function: The Python function to call as the tool. takes_ctx: Whether the function takes a [`RunContext`][pydantic_ai.tools.RunContext] first argument, this is inferred if unset. max_retries: Maximum number of retries allowed for this tool, set to the agent default if `None`. name: Name of the tool, inferred from the function if `None`. description: Description of the tool, inferred from the function if `None`. prepare: custom method to prepare the tool definition for each step, return `None` to omit this tool from a given step. This is useful if you want to customise a tool at call time, or omit it completely from a step. See [`ToolPrepareFunc`][pydantic_ai.tools.ToolPrepareFunc]. docstring_format: The format of the docstring, see [`DocstringFormat`][pydantic_ai.tools.DocstringFormat]. Defaults to `'auto'`, such that the format is inferred from the structure of the docstring. require_parameter_descriptions: If True, raise an error if a parameter description is missing. Defaults to False. schema_generator: The JSON schema generator class to use. Defaults to `GenerateToolJsonSchema`. strict: Whether to enforce JSON schema compliance (only affects OpenAI). See [`ToolDefinition`][pydantic_ai.tools.ToolDefinition] for more info. sequential: Whether the function requires a sequential/serial execution environment. Defaults to False. requires_approval: Whether this tool requires human-in-the-loop approval. Defaults to False. See the [tools documentation](../deferred-tools.md#human-in-the-loop-tool-approval) for more info. metadata: Optional metadata for the tool. This is not sent to the model but can be used for filtering and tool behavior customization. function_schema: The function schema to use for the tool. If not provided, it will be generated. """ self.function = function self.function_schema = function_schema or _function_schema.function_schema( function, schema_generator, takes_ctx=takes_ctx, docstring_format=docstring_format, require_parameter_descriptions=require_parameter_descriptions, ) self.takes_ctx = self.function_schema.takes_ctx self.max_retries = max_retries self.name = name or function.__name__ self.description = description or self.function_schema.description self.prepare = prepare self.docstring_format = docstring_format self.require_parameter_descriptions = require_parameter_descriptions self.strict = strict self.sequential = sequential self.requires_approval = requires_approval self.metadata = metadata @classmethod def from_schema( cls, function: Callable[..., Any], name: str, description: str | None, json_schema: JsonSchemaValue, takes_ctx: bool = False, sequential: bool = False, ) -> Self: """Creates a Pydantic tool from a function and a JSON schema. Args: function: The function to call. This will be called with keywords only, and no validation of the arguments will be performed. name: The unique name of the tool that clearly communicates its purpose description: Used to tell the model how/when/why to use the tool. You can provide few-shot examples as a part of the description. json_schema: The schema for the function arguments takes_ctx: An optional boolean parameter indicating whether the function accepts the context object as an argument. sequential: Whether the function requires a sequential/serial execution environment. Defaults to False. Returns: A Pydantic tool that calls the function """ function_schema = _function_schema.FunctionSchema( function=function, description=description, validator=SchemaValidator(schema=core_schema.any_schema()), json_schema=json_schema, takes_ctx=takes_ctx, is_async=_utils.is_async_callable(function), ) return cls( function, takes_ctx=takes_ctx, name=name, description=description, function_schema=function_schema, sequential=sequential, ) @property def tool_def(self): return ToolDefinition( name=self.name, description=self.description, parameters_json_schema=self.function_schema.json_schema, strict=self.strict, sequential=self.sequential, metadata=self.metadata, ) async def prepare_tool_def(self, ctx: RunContext[AgentDepsT]) -> ToolDefinition | None: """Get the tool definition. By default, this method creates a tool definition, then either returns it, or calls `self.prepare` if it's set. Returns: return a `ToolDefinition` or `None` if the tools should not be registered for this run. """ base_tool_def = self.tool_def if self.requires_approval and not ctx.tool_call_approved: base_tool_def = replace(base_tool_def, kind='unapproved') if self.prepare is not None: return await self.prepare(ctx, base_tool_def) else: return base_tool_def ObjectJsonSchema: TypeAlias = dict[str, Any] """Type representing JSON schema of an object, e.g. where `"type": "object"`. This type is used to define tools parameters (aka arguments) in [ToolDefinition][pydantic_ai.tools.ToolDefinition]. With PEP-728 this should be a TypedDict with `type: Literal['object']`, and `extra_parts=Any` """ ToolKind: TypeAlias = Literal['function', 'output', 'external', 'unapproved'] """Kind of tool.""" @dataclass(repr=False, kw_only=True) class ToolDefinition: """Definition of a tool passed to a model. This is used for both function tools and output tools. """ name: str """The name of the tool.""" parameters_json_schema: ObjectJsonSchema = field(default_factory=lambda: {'type': 'object', 'properties': {}}) """The JSON schema for the tool's parameters.""" description: str | None = None """The description of the tool.""" outer_typed_dict_key: str | None = None """The key in the outer [TypedDict] that wraps an output tool. This will only be set for output tools which don't have an `object` JSON schema. """ strict: bool | None = None """Whether to enforce (vendor-specific) strict JSON schema validation for tool calls. Setting this to `True` while using a supported model generally imposes some restrictions on the tool's JSON schema in exchange for guaranteeing the API responses strictly match that schema. When `False`, the model may be free to generate other properties or types (depending on the vendor). When `None` (the default), the value will be inferred based on the compatibility of the parameters_json_schema. Note: this is currently only supported by OpenAI models. """ sequential: bool = False """Whether this tool requires a sequential/serial execution environment.""" kind: ToolKind = field(default='function') """The kind of tool: - `'function'`: a tool that will be executed by Pydantic AI during an agent run and has its result returned to the model - `'output'`: a tool that passes through an output value that ends the run - `'external'`: a tool whose result will be produced outside of the Pydantic AI agent run in which it was called, because it depends on an upstream service (or user) or could take longer to generate than it's reasonable to keep the agent process running. See the [tools documentation](../deferred-tools.md#deferred-tools) for more info. - `'unapproved'`: a tool that requires human-in-the-loop approval. See the [tools documentation](../deferred-tools.md#human-in-the-loop-tool-approval) for more info. """ metadata: dict[str, Any] | None = None """Tool metadata that can be set by the toolset this tool came from. It is not sent to the model, but can be used for filtering and tool behavior customization. For MCP tools, this contains the `meta`, `annotations`, and `output_schema` fields from the tool definition. """ @property def defer(self) -> bool: """Whether calls to this tool will be deferred. See the [tools documentation](../deferred-tools.md#deferred-tools) for more info. """ return self.kind in ('external', 'unapproved') __repr__ = _utils.dataclasses_no_defaults_repr

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/pydantic/pydantic-ai'

If you have feedback or need assistance with the MCP directory API, please join our Discord server