main.py•2.34 kB
import asyncio
import time
from mcp_agent.app import MCPApp
from mcp_agent.config import (
AzureSettings,
Settings,
LoggerSettings,
MCPSettings,
MCPServerSettings,
)
from mcp_agent.agents.agent import Agent
from mcp_agent.workflows.llm.augmented_llm_azure import AzureAugmentedLLM
settings = Settings(
execution_engine="asyncio",
logger=LoggerSettings(type="file", level="debug"),
mcp=MCPSettings(
servers={
"fetch": MCPServerSettings(
command="uvx",
args=["mcp-server-fetch"],
),
}
),
azure=AzureSettings(
api_key="changethis",
endpoint="https://<your-resource-name>.cognitiveservices.azure.com/openai/deployments/<your-deployment-name>",
default_model="gpt-4o-mini",
api_version="2025-01-01-preview",
),
)
# Settings can either be specified programmatically,
# or loaded from mcp_agent.config.yaml/mcp_agent.secrets.yaml
app = MCPApp(
name="mcp_basic_agent",
# settings=settings
)
async def example_usage():
async with app.run() as agent_app:
logger = agent_app.logger
context = agent_app.context
logger.info("Current config:", data=context.config.model_dump())
finder_agent = Agent(
name="finder",
instruction="""You are an agent with the ability to fetch URLs. Your job is to identify
the closest match to a user's request, make the appropriate tool calls,
and return the URI and CONTENTS of the closest match.""",
server_names=["fetch"],
)
async with finder_agent:
logger.info("finder: Connected to server, calling list_tools...")
result = await finder_agent.list_tools()
logger.info("Tools available:", data=result.model_dump())
llm = await finder_agent.attach_llm(AzureAugmentedLLM)
result = await llm.generate_str(
message="Print the first 2 paragraphs of https://modelcontextprotocol.io/introduction",
)
logger.info(f"First 2 paragraphs of Model Context Protocol docs: {result}")
if __name__ == "__main__":
start = time.time()
asyncio.run(example_usage())
end = time.time()
t = end - start
print(f"Total run time: {t:.2f}s")