openai_chat
Send prompts to OpenAI for AI consultation via Claude Code MCP server. Get second opinions with fast startup and clear error handling.
Instructions
Send a prompt to OpenAI via Codex exec. Non-interactive, fast startup (no MCP servers loaded), with timeout. Returns clear error on quota limits. For code review, use openai_review instead.
Input Schema
TableJSON Schema
| Name | Required | Description | Default |
|---|---|---|---|
| prompt | Yes | The prompt to send | |
| model | No | Model override (optional). Note: some models may not be available on ChatGPT Plus | |
| timeout | No | Timeout in seconds (default 90) | |
| cwd | No | Working directory for codex |
Implementation Reference
- servers/mcp-openai/server.js:207-287 (handler)The main handler function for openai_chat tool. It executes the Codex CLI with the user's prompt, handles timeout, manages temporary output files, detects errors, and returns the AI response. The function uses runCodex to spawn the process, reads the output file, extracts the response, and handles various error cases including quota limits and auth failures.
async ({ prompt, model, timeout = 90, cwd }) => { const timeoutMs = timeout * 1000; const outputFile = tempFile("codex-chat"); try { log(`Chat: ${prompt.length} chars, timeout ${timeout}s`); const startTime = Date.now(); const args = [ "exec", "--sandbox", "read-only", "--ephemeral", "-o", outputFile, ]; if (model) { args.push("-m", model); } args.push("-"); const { stdout, stderr, exitCode } = await runCodex(args, { timeoutMs, stdin: prompt, cwd, }); const elapsed = ((Date.now() - startTime) / 1000).toFixed(1); const combined = stdout + stderr; const error = detectError(combined); if (error) { log(`${error.errorType}: ${error.message}`); try { await unlink(outputFile); } catch {} return { content: [{ type: "text", text: error.message }], isError: true, }; } let outputFileContent = ""; try { outputFileContent = await readFile(outputFile, "utf-8"); } catch {} try { await unlink(outputFile); } catch {} const response = extractResponse(stdout, outputFileContent); if (!response) { log(`No response (exit: ${exitCode}, stdout: ${stdout.length}, stderr: ${stderr.length})`); return { content: [{ type: "text", text: `No response from Codex. Exit code: ${exitCode}. Output: ${combined.slice(-300)}` }], isError: true, }; } log(`OK in ${elapsed}s (${response.length} chars)`); return { content: [{ type: "text", text: response }], }; } catch (error) { try { await unlink(outputFile); } catch {} const knownError = detectError(error.message); if (knownError) { log(`${knownError.errorType}: ${knownError.message}`); return { content: [{ type: "text", text: knownError.message }], isError: true, }; } log(`Error: ${error.message}`); return { content: [{ type: "text", text: `Codex error: ${error.message}` }], isError: true, }; } } - servers/mcp-openai/server.js:186-288 (registration)Tool registration for openai_chat using mcpServer.registerTool. Includes the tool description, input schema with zod validation (prompt, model, timeout, cwd parameters), and the handler function that processes requests.
mcpServer.registerTool( "openai_chat", { description: "Send a prompt to OpenAI via Codex exec. Non-interactive, fast startup (no MCP servers loaded), with timeout. Returns clear error on quota limits. For code review, use openai_review instead.", inputSchema: { prompt: z.string().describe("The prompt to send"), model: z .string() .optional() .describe("Model override (optional). Note: some models may not be available on ChatGPT Plus"), timeout: z .number() .default(90) .describe("Timeout in seconds (default 90)"), cwd: z .string() .optional() .describe("Working directory for codex"), }, }, async ({ prompt, model, timeout = 90, cwd }) => { const timeoutMs = timeout * 1000; const outputFile = tempFile("codex-chat"); try { log(`Chat: ${prompt.length} chars, timeout ${timeout}s`); const startTime = Date.now(); const args = [ "exec", "--sandbox", "read-only", "--ephemeral", "-o", outputFile, ]; if (model) { args.push("-m", model); } args.push("-"); const { stdout, stderr, exitCode } = await runCodex(args, { timeoutMs, stdin: prompt, cwd, }); const elapsed = ((Date.now() - startTime) / 1000).toFixed(1); const combined = stdout + stderr; const error = detectError(combined); if (error) { log(`${error.errorType}: ${error.message}`); try { await unlink(outputFile); } catch {} return { content: [{ type: "text", text: error.message }], isError: true, }; } let outputFileContent = ""; try { outputFileContent = await readFile(outputFile, "utf-8"); } catch {} try { await unlink(outputFile); } catch {} const response = extractResponse(stdout, outputFileContent); if (!response) { log(`No response (exit: ${exitCode}, stdout: ${stdout.length}, stderr: ${stderr.length})`); return { content: [{ type: "text", text: `No response from Codex. Exit code: ${exitCode}. Output: ${combined.slice(-300)}` }], isError: true, }; } log(`OK in ${elapsed}s (${response.length} chars)`); return { content: [{ type: "text", text: response }], }; } catch (error) { try { await unlink(outputFile); } catch {} const knownError = detectError(error.message); if (knownError) { log(`${knownError.errorType}: ${knownError.message}`); return { content: [{ type: "text", text: knownError.message }], isError: true, }; } log(`Error: ${error.message}`); return { content: [{ type: "text", text: `Codex error: ${error.message}` }], isError: true, }; } } ); - servers/mcp-openai/server.js:191-205 (schema)Input schema definition for openai_chat using zod. Defines the expected parameters: prompt (required string), model (optional string for model override), timeout (optional number, default 90 seconds), and cwd (optional working directory string).
inputSchema: { prompt: z.string().describe("The prompt to send"), model: z .string() .optional() .describe("Model override (optional). Note: some models may not be available on ChatGPT Plus"), timeout: z .number() .default(90) .describe("Timeout in seconds (default 90)"), cwd: z .string() .optional() .describe("Working directory for codex"), }, - servers/mcp-openai/server.js:112-177 (helper)The runCodex helper function that spawns the codex CLI process with timeout support, manages stdin/stdout/stderr streams, handles process cleanup, and returns a promise with the process output. Used by the openai_chat handler to execute the actual AI requests.
function runCodex(args, options = {}) { const { timeoutMs = 90000, stdin: stdinData, cwd } = options; return new Promise((resolve, reject) => { const proc = spawn("codex", args, { cwd: cwd || process.cwd(), stdio: ["pipe", "pipe", "pipe"], env: { ...process.env, CODEX_HOME, }, }); let stdout = ""; let stderr = ""; let killed = false; let killTimer; const timer = setTimeout(() => { killed = true; proc.kill("SIGTERM"); killTimer = setTimeout(() => { try { if (!proc.killed) proc.kill("SIGKILL"); } catch {} }, 5000); }, timeoutMs); proc.stdout.on("data", (data) => { stdout += data.toString(); if (stdout.length > MAX_BUFFER) { killed = true; proc.kill("SIGTERM"); } }); proc.stderr.on("data", (data) => { stderr += data.toString(); if (stderr.length > MAX_BUFFER) { killed = true; proc.kill("SIGTERM"); } }); if (stdinData) { proc.stdin.write(stdinData); proc.stdin.end(); } else { proc.stdin.end(); } proc.on("close", (exitCode) => { clearTimeout(timer); clearTimeout(killTimer); if (killed) { reject(new Error(`Process killed after ${timeoutMs / 1000}s timeout. Partial output: ${(stdout + stderr).slice(-200)}`)); } else { resolve({ stdout, stderr, exitCode }); } }); proc.on("error", (err) => { clearTimeout(timer); clearTimeout(killTimer); reject(err); }); }); } - servers/mcp-openai/server.js:41-71 (helper)The detectError helper function that parses codex CLI output for known error patterns including quota limits, model not supported, and auth expiration. Returns structured error information used by openai_chat handler to provide clear error messages.
function detectError(output) { const combined = output.toLowerCase(); if (combined.includes("usage limit") || combined.includes("hit your usage limit")) { const match = output.match(/try again at (.+?)[\.\n]/); const resetDate = match ? match[1] : "unknown"; return { isError: true, errorType: "QUOTA_EXCEEDED", message: `Codex usage limit reached. Credits reset at: ${resetDate}. Use a fallback provider.`, }; } if (combined.includes("not supported when using codex with a chatgpt account")) { return { isError: true, errorType: "MODEL_NOT_SUPPORTED", message: "This model is not available with ChatGPT Plus. Use the default model.", }; } if (combined.includes("auth") && (combined.includes("expired") || combined.includes("login"))) { return { isError: true, errorType: "AUTH_EXPIRED", message: "Codex auth token expired. Run 'codex login' to re-authenticate.", }; } return null; }