Skip to main content
Glama

@arizeai/phoenix-mcp

Official
by Arize-ai
parallelization-agent.ipynb9.94 kB
{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "<center>\n", " <p style=\"text-align:center\">\n", " <img alt=\"phoenix logo\" src=\"https://storage.googleapis.com/arize-phoenix-assets/assets/phoenix-logo-light.svg\" width=\"200\"/>\n", " <br>\n", " <a href=\"https://arize.com/docs/phoenix/\">Docs</a>\n", " |\n", " <a href=\"https://github.com/Arize-ai/phoenix\">GitHub</a>\n", " |\n", " <a href=\"https://arize-ai.slack.com/join/shared_invite/zt-2w57bhem8-hq24MB6u7yE_ZF_ilOYSBw#/shared-invite/email\">Community</a>\n", " </p>\n", "</center>\n", "\n", "# Google GenAI SDK - Building a Parallelization Agent" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Install Dependencies" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "!pip install -q google-genai arize-phoenix-otel openinference-instrumentation-google-genai" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Connect to Arize Phoenix" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import os\n", "from getpass import getpass\n", "\n", "from google import genai\n", "\n", "from phoenix.otel import register\n", "\n", "if \"PHOENIX_API_KEY\" not in os.environ:\n", " os.environ[\"PHOENIX_API_KEY\"] = getpass(\"🔑 Enter your Phoenix API key: \")\n", "\n", "if \"PHOENIX_COLLECTOR_ENDPOINT\" not in os.environ:\n", " os.environ[\"PHOENIX_COLLECTOR_ENDPOINT\"] = getpass(\"🔑 Enter your Phoenix Collector Endpoint\")\n", "\n", "tracer_provider = register(auto_instrument=True, project_name=\"google-genai-parallelization-agent\")\n", "tracer = tracer_provider.get_tracer(__name__)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Authenticate with Google Vertex AI" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "!gcloud auth login" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Create a client using the Vertex AI API, you could also use the Google GenAI API instead here\n", "client = genai.Client(vertexai=True, project=\"<ADD YOUR GCP PROJECT ID>\", location=\"us-central1\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Parallelization" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import concurrent.futures\n", "import textwrap\n", "\n", "from opentelemetry.context import attach, detach, get_current\n", "\n", "GEMINI_MODEL_NAME = \"gemini-2.0-flash-001\"\n", "\n", "\n", "def define_research_prompts():\n", " \"\"\"Define the research prompts for different topics.\"\"\"\n", " return {\n", " \"ai_research_result\": (\n", " \"Artificial Intelligence\",\n", " \"\"\"You are an AI Research Assistant.\n", "Research the latest advancements in 'Artificial Intelligence'.\n", "Summarize your key findings concisely (1-2 sentences).\n", "Focus on information readily available up to your knowledge cutoff.\n", "Output *only* the summary.\"\"\",\n", " ),\n", " \"quantum_research_result\": (\n", " \"Quantum Computing\",\n", " \"\"\"You are an AI Research Assistant specializing in physics and computing.\n", "Research the latest breakthroughs in 'Quantum Computing'.\n", "Summarize your key findings concisely (1-2 sentences).\n", "Focus on information readily available up to your knowledge cutoff.\n", "Output *only* the summary.\"\"\",\n", " ),\n", " \"biotech_research_result\": (\n", " \"Biotechnology\",\n", " \"\"\"You are an AI Research Assistant specializing in life sciences.\n", "Research the latest innovations in 'Biotechnology'.\n", "Summarize your key findings concisely (1-2 sentences).\n", "Focus on information readily available up to your knowledge cutoff.\n", "Output *only* the summary.\"\"\",\n", " ),\n", " }\n", "\n", "\n", "@tracer.chain()\n", "def run_research_task(topic, prompt):\n", " \"\"\"Calls the generative model and returns the text result.\"\"\"\n", " print(f\"Starting research for: {topic}...\")\n", " try:\n", " response = client.models.generate_content(model=GEMINI_MODEL_NAME, contents=prompt)\n", " print(f\"Finished research for: {topic}.\")\n", " return response.text.strip()\n", " except Exception as e:\n", " print(f\"Error during research for {topic}: {e}\")\n", " return f\"Error retrieving information for {topic}.\"\n", "\n", "\n", "# This is a helper function to wrap the run_research_task function with the OTel context.\n", "# Without this, each research subtask would be traced as a new span, and wouldn't be nested\n", "# under the main research task span.\n", "def context_wrapped_task(context, func, *args, **kwargs):\n", " token = attach(context)\n", " try:\n", " return func(*args, **kwargs)\n", " finally:\n", " detach(token)\n", "\n", "\n", "@tracer.chain()\n", "def execute_parallel_research(research_prompts):\n", " \"\"\"Execute research tasks in parallel and return the results.\"\"\"\n", " research_results = {}\n", " context = get_current() # Capture the current OTel context\n", " with concurrent.futures.ThreadPoolExecutor() as executor:\n", " # Submit tasks, wrapping each with the context\n", " future_to_key = {\n", " executor.submit(context_wrapped_task, context, run_research_task, topic, prompt): key\n", " for key, (topic, prompt) in research_prompts.items()\n", " }\n", " # Collect results as they complete\n", " for future in concurrent.futures.as_completed(future_to_key):\n", " key = future_to_key[future]\n", " try:\n", " result = future.result()\n", " research_results[key] = result\n", " except Exception as exc:\n", " print(f\"{key} generated an exception: {exc}\")\n", " research_results[key] = f\"Error in {key} task.\"\n", " return research_results\n", "\n", "\n", "@tracer.chain()\n", "def create_synthesis_prompt(research_results):\n", " \"\"\"Create a prompt for synthesizing the research results.\"\"\"\n", " return f\"\"\"You are an AI Assistant responsible for combining research findings into a structured report.\n", "\n", "Your primary task is to synthesize the following research summaries, clearly attributing findings to their\n", "source areas (AI, Quantum Computing, Biotechnology). Structure your response using headings for each topic.\n", "Ensure the report is coherent and integrates the key points smoothly.\n", "\n", "**Crucially: Your entire response MUST be grounded *exclusively* on the information provided in the\n", "'Input Summaries' below. Do NOT add any external knowledge, facts, or details not present in these\n", "specific summaries.**\n", "\n", "**Input Summaries:**\n", "\n", "* **AI Advancements:** {research_results.get(\"ai_research_result\", \"N/A\")}\n", "* **Quantum Computing:** {research_results.get(\"quantum_research_result\", \"N/A\")}\n", "* **Biotechnology:** {research_results.get(\"biotech_research_result\", \"N/A\")}\n", "\n", "Produce the final synthesized report.\n", "\"\"\"\n", "\n", "\n", "@tracer.chain()\n", "def synthesize_results(synthesis_instruction):\n", " \"\"\"Generate a synthesized report from the research results.\"\"\"\n", " print(\"\\n--- Starting Synthesis ---\")\n", " try:\n", " synthesis_response = client.models.generate_content(\n", " model=GEMINI_MODEL_NAME, contents=synthesis_instruction\n", " )\n", " final_report = synthesis_response.text.strip()\n", " print(\"--- Synthesis Complete ---\")\n", " return final_report\n", " except Exception as e:\n", " print(f\"Error during synthesis: {e}\")\n", " return \"Error generating the final report.\"\n", "\n", "\n", "def display_report(final_report):\n", " \"\"\"Display the final synthesized report.\"\"\"\n", " print(\"\\n=== Final Synthesized Report ===\\n\")\n", " # Use textwrap for potentially long reports in notebooks\n", " print(textwrap.fill(final_report, width=80))\n", "\n", "\n", "# Main execution flow\n", "@tracer.agent()\n", "def main():\n", " research_prompts = define_research_prompts()\n", " research_results = execute_parallel_research(research_prompts)\n", "\n", " print(\"\\n--- Parallel Research Results ---\")\n", " for key, result in research_results.items():\n", " print(f\"{key}: {result}\")\n", "\n", " synthesis_instruction = create_synthesis_prompt(research_results)\n", " final_report = synthesize_results(synthesis_instruction)\n", " display_report(final_report)\n", " return final_report\n", "\n", "\n", "main()" ] } ], "metadata": { "language_info": { "name": "python" } }, "nbformat": 4, "nbformat_minor": 2 }

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/Arize-ai/phoenix'

If you have feedback or need assistance with the MCP directory API, please join our Discord server