Fused MCP Agents
Official
by fusedio
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# 2. Create Your Own Agents\n",
"\n",
"This notebook expects that you're all setup (Have Claude desktop app running with an MCP Server setup in the previous notebook).\n",
"\n",
"Here we'll show you **how to pass your own Python functions to Claude** "
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"import fused\n",
"import json\n",
"import os\n",
"import time\n",
"from pathlib import Path"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"# We still need your local paths\n",
"PATH_TO_CLAUDE_CONFIG = (\n",
" f\"{str(Path.home())}/Library/Application Support/Claude/claude_desktop_config.json\"\n",
")\n",
"\n",
"\n",
"if not os.path.exists(PATH_TO_CLAUDE_CONFIG):\n",
" # Creating the config file\n",
" os.makedirs(os.path.dirname(PATH_TO_CLAUDE_CONFIG), exist_ok=True)\n",
" with open(PATH_TO_CLAUDE_CONFIG, \"w\") as f:\n",
" json.dump({}, f)\n",
"\n",
"assert os.path.exists(PATH_TO_CLAUDE_CONFIG), (\n",
" \"Please update the PATH_TO_CLAUDE_CONFIG variable with the correct path to your Claude config file\"\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"# Local path to the Claude app\n",
"CLAUDE_APP_PATH = \"/Applications/Claude.app\"\n",
"assert os.path.exists(CLAUDE_APP_PATH), (\n",
" \"Please update the CLAUDE_APP_PATH variable with the correct path to your Claude app\"\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"# Change this path if you're not running this from the repo root\n",
"WORKING_DIR = os.getcwd()"
]
},
{
"cell_type": "code",
"execution_count": 22,
"metadata": {},
"outputs": [],
"source": [
"# We'll load the commons folder once again to have our helper functions\n",
"commit = \"28821ea\"\n",
"common = fused.load(\n",
" f\"https://github.com/fusedio/udfs/tree/{commit}/public/common\"\n",
").utils"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'agents': [{'name': 'get_current_time', 'udfs': ['current_utc_time']},\n",
" {'name': 'fused_docs', 'udfs': ['list_public_udfs', 'reading_fused_docs']}]}"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# And see which agents we have available\n",
"json.load(open(os.path.join(WORKING_DIR, \"agents.json\")))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We'll make a simple UDF that gives us the top 5 stories from Hacker News as our basis"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [],
"source": [
"@fused.udf\n",
"def udf(story_type: str = \"top\"):\n",
" \"\"\"\n",
" Fetches top posts from Hacker News as a dataframe.\n",
"\n",
" Parameters:\n",
" story_type (str): Type of stories to fetch. Options are:\n",
" - \"top\" for top stories\n",
" - \"newest\" for latest stories\n",
"\n",
" Returns:\n",
" pandas.DataFrame: DataFrame containing HN posts with columns:\n",
" id, title, url, score, by (author), time, descendants (comments)\n",
" \"\"\"\n",
" import pandas as pd\n",
" import requests\n",
" import time\n",
" from datetime import datetime\n",
"\n",
" # Validate input\n",
" if story_type not in [\"top\", \"newest\"]:\n",
" raise ValueError('Invalid story_type. Must be \"top\" or \"newest\"')\n",
"\n",
" # Map story_type to the appropriate HN API endpoint\n",
" endpoint_map = {\"top\": \"topstories\", \"newest\": \"newstories\"}\n",
"\n",
" endpoint = endpoint_map[story_type]\n",
"\n",
" # Fetch the list of top or newest story IDs\n",
" response = requests.get(f\"https://hacker-news.firebaseio.com/v0/{endpoint}.json\")\n",
" story_ids = response.json()\n",
"\n",
" # Only doing 5 stories for now\n",
" story_ids = story_ids[:5]\n",
"\n",
" # Fetch details for each story ID\n",
" stories = []\n",
" for story_id in story_ids:\n",
" try:\n",
" # Get the story details\n",
" story_response = requests.get(\n",
" f\"https://hacker-news.firebaseio.com/v0/item/{story_id}.json\"\n",
" )\n",
" story = story_response.json()\n",
"\n",
" # Skip if not a story or missing key fields\n",
" if not story or story.get(\"type\") != \"story\" or \"title\" not in story:\n",
" continue\n",
"\n",
" # Add to our list\n",
" stories.append(\n",
" {\n",
" \"id\": story.get(\"id\"),\n",
" \"title\": story.get(\"title\"),\n",
" \"url\": story.get(\"url\", \"\"),\n",
" \"score\": story.get(\"score\", 0),\n",
" \"by\": story.get(\"by\", \"\"),\n",
" \"time\": datetime.fromtimestamp(story.get(\"time\", 0)),\n",
" \"descendants\": story.get(\"descendants\", 0),\n",
" }\n",
" )\n",
"\n",
" # Brief pause to avoid overloading the API\n",
" time.sleep(0.1)\n",
"\n",
" except Exception as e:\n",
" print(f\"Error fetching story {story_id}: {e}\")\n",
"\n",
" # Convert the list of stories to a DataFrame\n",
" df = pd.DataFrame(stories)\n",
"\n",
" # Add a timestamp for when the data was fetched\n",
" df[\"fetched_at\"] = datetime.now()\n",
" print(f\"{df=}\")\n",
"\n",
" return df"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>id</th>\n",
" <th>title</th>\n",
" <th>url</th>\n",
" <th>score</th>\n",
" <th>by</th>\n",
" <th>time</th>\n",
" <th>descendants</th>\n",
" <th>fetched_at</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>43410061</td>\n",
" <td>The Lost Art of Research as Leisure</td>\n",
" <td>https://kasurian.com/p/research-as-leisure</td>\n",
" <td>166</td>\n",
" <td>altilunium</td>\n",
" <td>2025-03-19 10:09:15</td>\n",
" <td>77</td>\n",
" <td>2025-03-19 13:25:12.563609</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1</th>\n",
" <td>43400989</td>\n",
" <td>Two new PebbleOS watches</td>\n",
" <td>https://ericmigi.com/blog/introducing-two-new-...</td>\n",
" <td>1441</td>\n",
" <td>griffinli</td>\n",
" <td>2025-03-18 15:59:27</td>\n",
" <td>428</td>\n",
" <td>2025-03-19 13:25:12.563609</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2</th>\n",
" <td>43406710</td>\n",
" <td>Make Ubuntu packages 90% faster by rebuilding ...</td>\n",
" <td>https://gist.github.com/jwbee/7e8b27e298de8bbb...</td>\n",
" <td>446</td>\n",
" <td>jeffbee</td>\n",
" <td>2025-03-18 23:55:17</td>\n",
" <td>258</td>\n",
" <td>2025-03-19 13:25:12.563609</td>\n",
" </tr>\n",
" <tr>\n",
" <th>3</th>\n",
" <td>43401245</td>\n",
" <td>Apple restricts Pebble from being awesome with...</td>\n",
" <td>https://ericmigi.com/blog/apple-restricts-pebb...</td>\n",
" <td>1556</td>\n",
" <td>griffinli</td>\n",
" <td>2025-03-18 16:23:21</td>\n",
" <td>958</td>\n",
" <td>2025-03-19 13:25:12.563609</td>\n",
" </tr>\n",
" <tr>\n",
" <th>4</th>\n",
" <td>43410885</td>\n",
" <td>The Origin of the Pork Taboo</td>\n",
" <td>https://archaeology.org/issues/march-april-202...</td>\n",
" <td>3</td>\n",
" <td>diodorus</td>\n",
" <td>2025-03-19 12:16:06</td>\n",
" <td>0</td>\n",
" <td>2025-03-19 13:25:12.563609</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" id title \\\n",
"0 43410061 The Lost Art of Research as Leisure \n",
"1 43400989 Two new PebbleOS watches \n",
"2 43406710 Make Ubuntu packages 90% faster by rebuilding ... \n",
"3 43401245 Apple restricts Pebble from being awesome with... \n",
"4 43410885 The Origin of the Pork Taboo \n",
"\n",
" url score by \\\n",
"0 https://kasurian.com/p/research-as-leisure 166 altilunium \n",
"1 https://ericmigi.com/blog/introducing-two-new-... 1441 griffinli \n",
"2 https://gist.github.com/jwbee/7e8b27e298de8bbb... 446 jeffbee \n",
"3 https://ericmigi.com/blog/apple-restricts-pebb... 1556 griffinli \n",
"4 https://archaeology.org/issues/march-april-202... 3 diodorus \n",
"\n",
" time descendants fetched_at \n",
"0 2025-03-19 10:09:15 77 2025-03-19 13:25:12.563609 \n",
"1 2025-03-18 15:59:27 428 2025-03-19 13:25:12.563609 \n",
"2 2025-03-18 23:55:17 258 2025-03-19 13:25:12.563609 \n",
"3 2025-03-18 16:23:21 958 2025-03-19 13:25:12.563609 \n",
"4 2025-03-19 12:16:06 0 2025-03-19 13:25:12.563609 "
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# We can run this UDF locally with `fused.run(udf)`\n",
"fused.run(udf)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Creating our own agent with our own UDF\n",
"\n",
"We want to be able to give this over to Claude we need to create a new agent for. We'll need a few things:\n",
"- `agent_name` -> The name of the agent we want to call. This `agent_name` can call multiple `udf_names`\n",
"- `udf_name` -> The name of our UDF. Right now we defined it as `udf`, but we can name it for example `recent_hacker_news_stories`\n",
"- `mcp_metadata` -> We also need to give some info to Claude about what our UDF does, and what parameters it have access to. This includes:\n",
" - `description`: What the UDF does\n",
" - `parameters`: A dict containing:\n",
" - `\"name\"` -> The name of the parameter. In our example above this would be `story_type`\n",
" - `\"type\"` -> The python type of the parameter. In our example it would be `str`"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'description': \"This UDF gets the latest 5 stories names, url, author & time from Hacker News. It get either get the 'top' or 'newest' stories\",\n",
" 'parameters': [{'name': 'story_type', 'type': 'int'}]}"
]
},
"execution_count": 13,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"hacker_news_mcp_metadata = {\n",
" \"description\": \"This UDF gets the latest 5 stories names, url, author & time from Hacker News. It get either get the 'top' or 'newest' stories\",\n",
" \"parameters\": [\n",
" {\n",
" \"name\": \"story_type\",\n",
" \"type\": \"str\",\n",
" }\n",
" ],\n",
"}\n",
"\n",
"hacker_news_mcp_metadata"
]
},
{
"cell_type": "code",
"execution_count": 23,
"metadata": {},
"outputs": [],
"source": [
"# We'll create a new Agent that can access the news\n",
"common.save_to_agent(\n",
" agent_json_path=os.path.join(WORKING_DIR, \"agents.json\"),\n",
" udf=udf,\n",
" agent_name=\"getting_the_news\",\n",
" udf_name=\"recent_hacker_news_stories\",\n",
" mcp_metadata=hacker_news_mcp_metadata,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 29,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{\n",
" \"agents\": [\n",
" {\n",
" \"name\": \"get_current_time\",\n",
" \"udfs\": [\n",
" \"current_utc_time\"\n",
" ]\n",
" },\n",
" {\n",
" \"name\": \"fused_docs\",\n",
" \"udfs\": [\n",
" \"list_public_udfs\",\n",
" \"reading_fused_docs\"\n",
" ]\n",
" },\n",
" {\n",
" \"name\": \"getting_the_news\",\n",
" \"udfs\": [\n",
" \"recent_hacker_news_stories\"\n",
" ]\n",
" }\n",
" ]\n",
"}\n"
]
}
],
"source": [
"# let's look at the existing agents\n",
"agents = json.load(open(os.path.join(WORKING_DIR, \"agents.json\")))\n",
"print(json.dumps(agents, indent=4, sort_keys=True))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We've made a utility that lets you pass multiple agents to Claude if you want, each with individual UDFs\n",
"\n",
"For now let's simply use the `getting_the_news` agent we created:"
]
},
{
"cell_type": "code",
"execution_count": 31,
"metadata": {},
"outputs": [],
"source": [
"agents_list = [\"getting_the_news\"]"
]
},
{
"cell_type": "code",
"execution_count": 32,
"metadata": {},
"outputs": [],
"source": [
"# Finally, we can select which Agent we want to pass to Claude in our MCP server config\n",
"common.generate_local_mcp_config(\n",
" config_path=PATH_TO_CLAUDE_CONFIG,\n",
" agents_list=agents_list,\n",
" repo_path=WORKING_DIR,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Claude uses a specific config (that you passed under `PATH_TO_CLAUDE_CONFIG`) to know what to run under the hood. This is what we're editing for you each time you change the agent you want to run"
]
},
{
"cell_type": "code",
"execution_count": 33,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{\n",
" \"mcpServers\": {\n",
" \"getting_the_news\": {\n",
" \"args\": [\n",
" \"run\",\n",
" \"--directory\",\n",
" \"/Users/maximelenormand/Library/CloudStorage/Dropbox/Mac/Documents/repos/fused-mcp\",\n",
" \"main.py\",\n",
" \"--runtime=local\",\n",
" \"--udf-names=recent_hacker_news_stories\",\n",
" \"--name=getting_the_news\"\n",
" ],\n",
" \"command\": \"uv\"\n",
" }\n",
" }\n",
"}\n"
]
}
],
"source": [
"# Let's read this Claude Desktop config to see what we're passing\n",
"claude_config = json.load(open(PATH_TO_CLAUDE_CONFIG))\n",
"print(json.dumps(claude_config, indent=4, sort_keys=True))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Now let's restart Claude with this new agent!"
]
},
{
"cell_type": "code",
"execution_count": 34,
"metadata": {},
"outputs": [],
"source": [
"def restart_claude(claude_path: str = CLAUDE_APP_PATH):\n",
" app_name = claude_path.split(\"/\")[-1]\n",
"\n",
" try:\n",
" os.system(f\"pkill -f '{app_name}'\")\n",
" print(f\"Killed {app_name}\")\n",
" time.sleep(2) # Wait for shutdown\n",
" except Exception:\n",
" print(\"Claude wasn't running, so no need to kill it\")\n",
"\n",
" print(f\"Restarting {app_name}\")\n",
" os.system(f\"open -a '{claude_path}'\") # Restart Claude"
]
},
{
"cell_type": "code",
"execution_count": 35,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Killed Claude.app\n",
"Restarting Claude.app\n"
]
}
],
"source": [
"restart_claude()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.5"
}
},
"nbformat": 4,
"nbformat_minor": 2
}