Skip to main content
Glama

mcp-run-python

Official
by pydantic
test_builtin_tools.py1.69 kB
from __future__ import annotations import pytest from pydantic_ai.agent import Agent from pydantic_ai.builtin_tools import CodeExecutionTool, WebSearchTool from pydantic_ai.exceptions import UserError from pydantic_ai.models import Model @pytest.mark.parametrize('model', ('bedrock', 'mistral', 'cohere', 'huggingface', 'test'), indirect=True) async def test_builtin_tools_not_supported_web_search(model: Model, allow_model_requests: None): agent = Agent(model=model, builtin_tools=[WebSearchTool()]) with pytest.raises(UserError): await agent.run('What day is tomorrow?') @pytest.mark.parametrize('model', ('bedrock', 'mistral', 'huggingface'), indirect=True) async def test_builtin_tools_not_supported_web_search_stream(model: Model, allow_model_requests: None): agent = Agent(model=model, builtin_tools=[WebSearchTool()]) with pytest.raises(UserError): async with agent.run_stream('What day is tomorrow?'): ... # pragma: no cover @pytest.mark.parametrize('model', ('groq', 'openai'), indirect=True) async def test_builtin_tools_not_supported_code_execution(model: Model, allow_model_requests: None): agent = Agent(model=model, builtin_tools=[CodeExecutionTool()]) with pytest.raises(UserError): await agent.run('What day is tomorrow?') @pytest.mark.parametrize('model', ('groq', 'openai'), indirect=True) async def test_builtin_tools_not_supported_code_execution_stream(model: Model, allow_model_requests: None): agent = Agent(model=model, builtin_tools=[CodeExecutionTool()]) with pytest.raises(UserError): async with agent.run_stream('What day is tomorrow?'): ... # pragma: no cover

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/pydantic/pydantic-ai'

If you have feedback or need assistance with the MCP directory API, please join our Discord server