Skip to main content
Glama

ShallowCodeResearch_agent_research_request

Research coding requests by generating and executing code, then providing a clean summary of results through a multi-agent workflow.

Instructions

This function researches a coding request from the user, generates code, executes it, and returns a clean summary of the results. This is an mcp server function that responds to research coding requests from users. Returns: A tuple containing the JSON result from the orchestrator and a clean summary

Input Schema

TableJSON Schema
NameRequiredDescriptionDefault
user_requestNoThe user's request or question to be processed

Implementation Reference

  • app.py:658-721 (handler)
    Primary MCP tool handler for 'ShallowCodeResearch_agent_research_request'. Calls orchestrator, formats JSON result and clean markdown summary with sections for summary, code, execution, citations.
    def agent_research_request(user_request): """ This function researches a coding request from the user, generates code, executes it, and returns a clean summary of the results. This is an mcp server function that responds to research coding requests from users. Args: user_request (str): The user's request or question to be processed Returns: tuple: A tuple containing the JSON result from the orchestrator and a clean summary """ # Get the full response (which is a tuple) orchestrator_result = agent_orchestrator(user_request) # Extract the JSON result (first element of tuple) if isinstance(orchestrator_result, tuple) and len(orchestrator_result) > 0: json_result = orchestrator_result[0] else: json_result = orchestrator_result # Extract and format the clean output clean_summary = "" if isinstance(json_result, dict): if 'final_summary' in json_result: clean_summary += f"## 📋 Summary\n{json_result['final_summary']}\n\n" if 'code_string' in json_result and json_result['code_string']: clean_summary += f"## 💻 Generated Code\n```python\n{json_result['code_string']}\n```\n\n" if 'execution_output' in json_result and json_result['execution_output']: clean_summary += f"## ▶️ Execution Result\n```\n{json_result['execution_output']}\n```\n\n" if 'code_output' in json_result and json_result['code_output']: # Handle both string and dict formats for code_output code_output = json_result['code_output'] if isinstance(code_output, dict): output = code_output.get('output', '') else: output = str(code_output) if output: clean_summary += f"## ▶️ Code Output\n```\n{output}\n```\n\n" if 'citations' in json_result and json_result['citations']: clean_summary += "## 📚 Sources\n" for i, citation in enumerate(json_result['citations'], 1): clean_summary += f"{i}. {citation}\n" clean_summary += "\n" if 'sub_questions' in json_result: clean_summary += "## 🔍 Research Questions Explored\n" for i, q in enumerate(json_result['sub_questions'], 1): clean_summary += f"{i}. {q}\n" # If we have sub-summaries, show them too if 'sub_summaries' in json_result and json_result['sub_summaries']: clean_summary += "\n## 📖 Research Summaries\n" for i, summary in enumerate(json_result['sub_summaries'], 1): clean_summary += f"### {i}. {summary}...\n" if not clean_summary: clean_summary = "## ⚠️ Processing Complete\nThe request was processed but no detailed results were generated." return json_result, clean_summary
  • Core implementation logic in OrchestratorAgent.orchestrate(). Coordinates question enhancement, web search, code generation, execution, citations, and final summary using other agents.
    def orchestrate(self, user_request: str, timeout: int = 180) -> tuple[Dict[str, Any], str]: """ Orchestrate the complete workflow: enhance question → search → generate code → execute. Manages the full AI-assisted workflow by coordinating all agents to provide comprehensive research, code generation, and execution. Returns both structured data and natural language summaries of the complete process. Args: user_request (str): The user's original request or question timeout (int): Maximum execution time in seconds (default: 180s/3min) Returns: tuple[Dict[str, Any], str]: A tuple containing the complete result dictionary and a natural language summary of the process """ try: # Validate input user_request = validate_user_input( user_request, field_name="User request", max_length=10000, min_length=5 ) logger.info(f"Starting orchestration for: {user_request[:100]}...") # Step 1: Enhance the question logger.info("Step 1: Enhancing question...") enhanced_result = self.question_enhancer.enhance_question(user_request, num_questions=3) sub_questions = enhanced_result.get('sub_questions', [user_request]) # Step 2: Search for information logger.info("Step 2: Searching for information...") search_results = [] search_summaries = [] for i, question in enumerate(sub_questions[:2]): # Limit to 2 questions to avoid too many searches logger.info(f"Processing question {i+1}: {question}") try: search_result = self.web_search.search(question) logger.info(f"Search result for question {i+1}: {search_result}") # Extract results and summary regardless of status key results = search_result.get('results', []) summary = search_result.get('tavily_answer', search_result.get('summary', '')) if results or summary: # Treat as success if any results or summary found logger.info(f"Question {i+1} - Found {len(results)} results") logger.info(f"Question {i+1} - Summary: {summary[:100]}...") # Add to collections search_results.extend(results) search_summaries.append(summary) logger.info(f"Question {i+1} - Successfully added {len(results)} results to collection") logger.info(f"Question {i+1} - Current total search_results: {len(search_results)}") logger.info(f"Question {i+1} - Current total search_summaries: {len(search_summaries)}") else: error_msg = search_result.get('error', 'Unknown error or no results returned') logger.warning(f"Search failed for question {i+1}: {error_msg}") except Exception as e: logger.error(f"Exception during search for question '{question}': {e}") import traceback logger.error(f"Traceback: {traceback.format_exc()}") logger.info(f"Total search results collected: {len(search_results)}") logger.info(f"Total search summaries: {len(search_summaries)}") for i, result in enumerate(search_results[:3]): logger.info(f"Search result {i+1}: {result.get('title', 'No title')[:50]}...") # Step 3: Create grounded context logger.info("Step 3: Creating grounded context...") grounded_context = "" if search_results: # Combine search results into context context_parts = [] for result in search_results[:5]: # Limit to top 5 results context_parts.append(f"Title: {result.get('title', 'N/A')}") context_parts.append(f"Content: {result.get('content', 'N/A')}") context_parts.append(f"URL: {result.get('url', 'N/A')}") context_parts.append("---") grounded_context = "\n".join(context_parts) # If no search results, use a generic context if not grounded_context: grounded_context = f"User request: {user_request}\nNo additional web search context available." # Step 4: Generate code logger.info("Step 4: Generating code...") logger.info(f"Grounded context length: {len(grounded_context)}") code_result, code_summary = self.code_generator.generate_code(user_request, grounded_context) logger.info(f"Code generation result: {code_result}") logger.info(f"Code generation summary: {code_summary[:200]}...") code_string = "" if code_result.get('status') == 'success': # Use raw_output (string) for display, generated_code (compiled) for execution code_string = code_summary # This is the raw string output logger.info(f"Successfully extracted code_string with length: {len(code_string)}") logger.info(f"Code preview: {code_string[:200]}...") else: logger.warning(f"Code generation failed: {code_result.get('error', 'Unknown error')}") # Step 5: Execute code if available execution_output = "" if code_string: logger.info("Step 5: Executing code...") try: # Use async execution for better performance execution_output = asyncio.run(self.code_runner.run_code_async(code_string)) except Exception as e: execution_output = f"Execution failed: {str(e)}" logger.warning(f"Code execution failed: {e}") # Step 6: Format citations logger.info("Step 6: Formatting citations...") citations = [] for result in search_results: if result.get('url'): citations.append(f"{result.get('title', 'Untitled')} - {result.get('url')}") # Compile final result logger.info("=== PRE-FINAL RESULT DEBUG ===") logger.info(f"search_results length: {len(search_results)}") logger.info(f"search_summaries length: {len(search_summaries)}") logger.info(f"code_string length: {len(code_string)}") logger.info(f"execution_output length: {len(execution_output)}") logger.info(f"citations length: {len(citations)}") logger.info("=== GENERATING EXECUTIVE SUMMARY ===") # Sample first search result if search_results: logger.info(f"First search result: {search_results[0]}") prompt = f""" The user asked about {user_request} which yielded this summary: {search_summaries} During the orchestration, you generated the following code: {code_string} The code was executed in a secure sandbox environment, and the output was <executed_code>{execution_output}</executed_code>. If there was no output in the executed_code tags, please state how to answer the user's request showing the code required. State that the code you are giving them has not been executed, and that they should run it in their own environment. Please provide a short and concise summary of the code that you wrote, including the user request, the summaries provided and the code generated. Explain how the code addresses the user's request, what it does, and any important details about its execution. Touch upon the other methods available that were found in the search results, and how they relate to the user's request. Please return the result in natural language only, without any code blocks, unless as stated above, there was no code executed in the sandbox and then you should give them the code as a code block. References to code can be made to explain why particular code has been used regardless of sandbox execution, e.g. discuss why the LinerRegression module was used from scikit-learn etc. If no code was generated, apologise, please state that clearly the code generation failed in the sandbox, this could be due to restriction or the code being too complex for the sandbox to handle. Note, if appropriate, indicate how the code can be modified to include human input etc. as this is a banned keyword in the sandbox. The response should be directed at the user, in a friendly and helpful manner, as if you were a human assistant helping the user with their request. **Summary Requirements:** - The summary should be concise, no more than 500 words. - It should clearly explain how the code addresses the user's request. - It should only include code if there was no execution output, and then it should be in a code block. (if there is executed_code, this will be returned by another process and therefor you dont need to do it here) - The summary should be written in a friendly and helpful tone, as if you were a human assistant helping the user with their request. """ messages = [{"role": "user", "content": prompt}] logger.info(f"LLM provider is: {api_config.llm_provider}, model used: {model_config.get_model_for_provider('llm_processor', api_config.llm_provider)}") # Last call to LLM to summarize the entire orchestration overall_summary = make_llm_completion( model=model_config.get_model_for_provider("llm_processor", api_config.llm_provider), messages=messages, temperature=app_config.llm_temperature ) logger.info("Overall summary generated:") final_result = { "status": "success", "user_request": user_request, "sub_questions": sub_questions, "search_results": search_results[:5], "search_summaries": search_summaries, "code_string": code_string, "execution_output": execution_output, "citations": citations, "final_summary": f"{overall_summary}", "message": "Orchestration completed successfully" } # Create clean summary for display final_narrative = f"## 🎯 Request: {user_request}\n\n{overall_summary}" logger.info("Orchestration completed successfully") return final_result, final_narrative except (ValidationError, APIError, CodeGenerationError) as e: logger.error(f"Orchestration failed: {str(e)}") # Sanitize error message before returning sanitized_error = sanitize_error_message(e) execution_log = f"Error during orchestration: {sanitized_error}" return {"error": sanitized_error, "execution_log": execution_log}, sanitized_error except Exception as e: logger.error(f"Unexpected error in orchestration: {str(e)}") # Sanitize error message before returning sanitized_error = sanitize_error_message(e) execution_log = f"Unexpected error: {sanitized_error}" return {"error": f"Unexpected error: {sanitized_error}", "execution_log": execution_log}, sanitized_error
  • app.py:956-960 (registration)
    Gradio UI registration via button click event binding fn=agent_research_request. With mcp_server=True (line ~1235), exposes as MCP tool likely named 'ShallowCodeResearch_agent_research_request'.
    process_btn.click( fn=agent_research_request, inputs=[input_textbox], outputs=[json_output, clean_output], )
  • app.py:831-859 (handler)
    Explicit MCP tool wrapper 'research_code' documented as MCP tool, directly calls agent_research_request.
    def research_code(user_request: str) -> tuple: """ This function serves as an MCP (Model Context Protocol) tool that orchestrates comprehensive research and code generation workflows. It enhances user requests through intelligent processing, performs web searches for relevant information, generates appropriate code solutions, executes the code safely, and provides clean, actionable summaries. The function is designed to be used as a tool within MCP frameworks, providing autonomous research capabilities that combine web search, code generation, and execution in a single workflow. user_request (str): The user's request, question, or problem statement to be processed. Can include coding problems, research questions, or requests for information gathering and analysis. tuple: A two-element tuple containing: - JSON result (dict): Structured data from the orchestrator containing detailed research findings, generated code, execution results, and metadata about the research process - Clean summary (str): A human-readable summary of the research findings and generated solutions, formatted for easy consumption Example: >>> result, summary = research_code("How to implement a binary search in Python?") >>> print(summary) # Clean explanation with code examples >>> print(result['code']) # Generated code implementation Note: This function is optimized for use as an MCP tool and handles error cases gracefully, returning meaningful feedback even when research or code generation encounters issues. """ return agent_research_request(user_request)
  • Helper wrapper agent_orchestrator that handles async/sync execution of OrchestratorAgent.orchestrate() called by agent_research_request.
    def agent_orchestrator(user_request: str) -> tuple: """ Wrapper for OrchestratorAgent with async-first approach and sync fallback. Provides a unified interface to the orchestrator that attempts async execution for better performance and falls back to synchronous execution if needed. Handles event loop management and thread pooling automatically. Args: user_request (str): The user's request to be processed Returns: tuple: A tuple containing the orchestration result and summary """ try: # Try async orchestration first for better performance if hasattr(orchestrator, "orchestrate_async"): try: # Check if we're in an async context loop = asyncio.get_event_loop() if loop.is_running(): # If loop is already running (like in Gradio), we need to handle this differently # Use asyncio.run_coroutine_threadsafe or run in thread pool import concurrent.futures def run_async_in_thread(): # Create a new event loop for this thread new_loop = asyncio.new_event_loop() asyncio.set_event_loop(new_loop) try: return new_loop.run_until_complete(orchestrator.orchestrate_async(user_request)) finally: new_loop.close() with concurrent.futures.ThreadPoolExecutor() as executor: future = executor.submit(run_async_in_thread) result = future.result() else: # No loop running, safe to use run_until_complete result = loop.run_until_complete(orchestrator.orchestrate_async(user_request)) logger.info("Successfully used async orchestration") return result except RuntimeError as e: if "cannot be called from a running event loop" in str(e): logger.warning("Cannot use asyncio.run from running event loop, trying thread approach") # Fallback: run in a separate thread import concurrent.futures def run_async_in_thread(): new_loop = asyncio.new_event_loop() asyncio.set_event_loop(new_loop) try: return new_loop.run_until_complete(orchestrator.orchestrate_async(user_request)) finally: new_loop.close() with concurrent.futures.ThreadPoolExecutor() as executor: future = executor.submit(run_async_in_thread) return future.result() else: raise except Exception as e: logger.warning(f"Async orchestration failed: {e}. Falling back to sync.") # Fallback to synchronous orchestration logger.info("Using synchronous orchestration as fallback") return orchestrator.orchestrate(user_request)

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/CodeHalwell/gradio-mcp-agent-hack'

If you have feedback or need assistance with the MCP directory API, please join our Discord server