Skip to main content
Glama
browser_agent.py3.34 kB
import asyncio import os from dotenv import load_dotenv from browser_use import Agent, Browser, ChatOpenAI from dedalus_labs import AsyncDedalus, DedalusRunner from pydantic import BaseModel import json from lib import extract_json load_dotenv() class Evaluation(BaseModel): website: str task: str score: int reasoning: str async def browser_agent(website: str, task: str, inputs: dict = None) -> str: """Navigate to a website and perform a browser automation task""" api_key = os.getenv("OPENAI_API_KEY") llm = ChatOpenAI(model="gpt-4o", api_key=api_key, temperature=0) # Concatenate inputs to task full_task = task if inputs: inputs_text = "\n\nTHESE ARE YOUR INPUTS:\n" for key, value in inputs.items(): inputs_text += f"- {key}: {value}\n" full_task = task + inputs_text browser = Browser(headless=False) try: await browser.start() await browser.new_page(website) agent = Agent(task=full_task, browser=browser, llm=llm) result = await asyncio.wait_for(agent.run(max_steps=15), timeout=180) return result except asyncio.TimeoutError: return "TIMEOUT: Browser agent exceeded 180 seconds" except Exception as e: return f"ERROR: {str(e)}" finally: await browser.stop() async def scoring_agent(task: str, result: str) -> Evaluation: """Evaluate if the browser agent completed the task successfully""" client = AsyncDedalus() runner = DedalusRunner(client) evaluation = await runner.run( input=f"""An intern was given the following task: {task}. This is the result of the task: {result}. Evaluate if the intern completed the task successfully, and your reasons for why you gave them this score. Respond with ONLY a JSON object with this structure: {{ "score": <number 1-10>, "reasoning": "<detailed feedback>" }}""", model="openai/gpt-4o", stream=False ) json_str = extract_json(evaluation.final_output) json_data = json.loads(json_str) # Add website and task to evaluation json_data['website'] = '' json_data['task'] = task return Evaluation(**json_data) async def feedback_agent(website: str, original_task: str, evaluation: Evaluation) -> str: """Generate an improved task based on evaluation feedback""" client = AsyncDedalus() runner = DedalusRunner(client) result = await runner.run( input=f"""You are a task improvement specialist. Based on the evaluation of a browser automation task, generate an improved version of the task that addresses the issues identified. Website: {website} Original Task: {original_task} Evaluation Score: {evaluation.score}/10 Evaluation Reasoning: {evaluation.reasoning} Analyze the reasoning and create an improved task that: 1. Addresses the specific issues mentioned in the reasoning 2. Is more clear and actionable Don't be overly specific or complex. Keep it simple and concise. Respond with ONLY the improved task description as plain text, no JSON or extra formatting.""", model="openai/gpt-5", stream=False ) improved_task = str(result).strip() return improved_task

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/brian-bfz/fireworks4'

If you have feedback or need assistance with the MCP directory API, please join our Discord server