code-execution
Execute code snippets and receive immediate results through the MCP server, enabling code testing and debugging within AI workflows.
Instructions
Execute code and return the result
Input Schema
TableJSON Schema
| Name | Required | Description | Default |
|---|---|---|---|
| parameters | Yes |
Implementation Reference
- CodeInterpreterAdapter class provides the core handler logic for the 'code-execution' tool, including translating MCP requests to OpenAI code interpreter parameters and responses back to MCP format.class CodeInterpreterAdapter(ToolAdapter): """Adapter for OpenAI's code interpreter tool""" @property def tool_id(self) -> str: """Get the MCP tool ID""" return "code-execution" @property def openai_tool_type(self) -> str: """Get the OpenAI tool type""" return "code_interpreter" @property def description(self) -> str: """Get the tool description""" return "Execute code and return the result" async def translate_request(self, request: MCPRequest) -> dict: """ Translate MCP request to OpenAI parameters Args: request: The MCP request to translate Returns: Dictionary of OpenAI parameters """ # Extract code to execute code = request.parameters.get("code", "") language = request.parameters.get("language", "python") logger.debug(f"Translating code execution request with language: {language}") # Return OpenAI parameters return {"code": code, "language": language} async def translate_response(self, response: dict) -> MCPResponse: """ Translate OpenAI response to MCP response Args: response: The OpenAI response to translate Returns: MCP response object """ # Extract execution result result = response.get("result", {}) logger.debug("Translating code execution response") # Format result as markdown content = format_code_result(result) # Check for errors error = None if isinstance(result, dict) and "error" in result: error = result["error"] # Return MCP response return MCPResponse(content=content, error=error, context={"language": response.get("language", "python")})
- openai_tool2mcp/tools/registry.py:43-47 (registration)Registration of the 'code-execution' tool in ToolRegistry, mapping it to OpenAI's code_interpreter tool."code-execution": { "openai_tool": OpenAIBuiltInTools.CODE_INTERPRETER.value, "enabled": OpenAIBuiltInTools.CODE_INTERPRETER.value in self.enabled_tools, "description": "Execute code in a sandbox environment", },
- openai_tool2mcp/server.py:48-60 (registration)Instantiation of CodeInterpreterAdapter and mapping to tool_id 'code-execution' in the server's tools_map.def _build_tools_map(self): """Build a map of tool adapters""" tools_map = {} # Register default tool adapters adapters = [WebSearchAdapter(), CodeInterpreterAdapter(), BrowserAdapter(), FileManagerAdapter()] for adapter in adapters: # Only register if the tool is enabled if adapter.openai_tool_type in self.config.tools: tools_map[adapter.tool_id] = adapter return tools_map
- openai_tool2mcp/server.py:62-114 (handler)Dynamic registration of MCP tools including 'code-execution' using @mcp.tool decorator, with a generic handler that delegates to the specific adapter's translate methods and invokes OpenAI.def _register_mcp_tools(self): """Register tools with the MCP SDK""" for tool_id, adapter in self.tools_map.items(): # Define a tool handler for each adapter # Create a closure to properly capture the values def create_tool_handler(tool_id=tool_id, adapter=adapter): @self.mcp.tool(name=tool_id, description=adapter.description) async def tool_handler(**parameters): """ MCP tool handler for OpenAI tools. """ # Create an MCP request from the parameters mcp_request = MCPRequest(parameters=parameters) # Translate the request parameters using the adapter translated_params = await adapter.translate_request(mcp_request) # Create an OpenAI tool request openai_request = mcp_to_openai.translate_request(mcp_request, tool_id) # Override the parameters with the adapter-specific ones openai_request.parameters = translated_params try: # Call OpenAI API to execute the tool openai_response = await self.openai_client.invoke_tool(openai_request) # Translate the OpenAI response to MCP format using the adapter if openai_response.tool_outputs: # Use the adapter to translate the tool-specific response mcp_response = await adapter.translate_response(openai_response.tool_outputs[0].output) # Add thread_id to context for state management if mcp_response.context is None: mcp_response.context = {} mcp_response.context["thread_id"] = openai_response.thread_id # Return the response content which will be used by MCP SDK return mcp_response.content else: # Fallback to generic translation mcp_response = openai_to_mcp.translate_response(openai_response) return mcp_response.content except Exception as e: logger.error(f"Error invoking tool {tool_id}: {e!s}") # Using custom exception class to fix TRY003 raise ToolInvocationError() from e return tool_handler # Create and register the tool handler create_tool_handler()
- Helper function mapping 'code-execution' MCP tool ID to OpenAI 'code_interpreter' type.def map_tool_id_to_openai_type(tool_id: str) -> str: """ Map MCP tool IDs to OpenAI tool types. Args: tool_id: MCP tool ID Returns: OpenAI tool type """ mapping = { "web-search": "retrieval", "code-execution": "code_interpreter", "browser": "web_browser", "file-io": "file_search", } openai_type = mapping.get(tool_id, tool_id) logger.debug(f"Mapped MCP tool ID {tool_id} to OpenAI tool type {openai_type}") return openai_type