openai_chat
Send a prompt to OpenAI through Codex for non-interactive queries with a 180-second timeout. Returns clear error messages when quota limits are exceeded.
Instructions
Send a prompt to OpenAI via Codex exec. Non-interactive, fast startup (no MCP servers loaded), 180s default timeout. Returns clear error on quota limits. For code review, use openai_review instead.
Input Schema
| Name | Required | Description | Default |
|---|---|---|---|
| prompt | Yes | The prompt to send | |
| model | No | Model override (optional). Note: some models may not be available on ChatGPT Plus | |
| timeout | No | Timeout in seconds (default 180) | |
| cwd | No | Working directory for codex |
Implementation Reference
- servers/mcp-openai/server.js:186-288 (registration)Registration of the 'openai_chat' tool via mcpServer.registerTool(), including input schema (prompt, model, timeout, cwd) and the handler callback.
mcpServer.registerTool( "openai_chat", { description: "Send a prompt to OpenAI via Codex exec. Non-interactive, fast startup (no MCP servers loaded), 180s default timeout. Returns clear error on quota limits. For code review, use openai_review instead.", inputSchema: { prompt: z.string().describe("The prompt to send"), model: z .string() .optional() .describe("Model override (optional). Note: some models may not be available on ChatGPT Plus"), timeout: z .number() .default(180) .describe("Timeout in seconds (default 180)"), cwd: z .string() .optional() .describe("Working directory for codex"), }, }, async ({ prompt, model, timeout = 180, cwd }) => { const timeoutMs = timeout * 1000; const outputFile = tempFile("codex-chat"); try { log(`Chat: ${prompt.length} chars, timeout ${timeout}s`); const startTime = Date.now(); const args = [ "exec", "--sandbox", "read-only", "--ephemeral", "-o", outputFile, ]; if (model) { args.push("-m", model); } args.push("-"); const { stdout, stderr, exitCode } = await runCodex(args, { timeoutMs, stdin: prompt, cwd, }); const elapsed = ((Date.now() - startTime) / 1000).toFixed(1); const combined = stdout + stderr; const error = detectError(combined); if (error) { log(`${error.errorType}: ${error.message}`); try { await unlink(outputFile); } catch {} return { content: [{ type: "text", text: error.message }], isError: true, }; } let outputFileContent = ""; try { outputFileContent = await readFile(outputFile, "utf-8"); } catch {} try { await unlink(outputFile); } catch {} const response = extractResponse(stdout, outputFileContent); if (!response) { log(`No response (exit: ${exitCode}, stdout: ${stdout.length}, stderr: ${stderr.length})`); return { content: [{ type: "text", text: `No response from Codex. Exit code: ${exitCode}. Output: ${combined.slice(-300)}` }], isError: true, }; } log(`OK in ${elapsed}s (${response.length} chars)`); return { content: [{ type: "text", text: response }], }; } catch (error) { try { await unlink(outputFile); } catch {} const knownError = detectError(error.message); if (knownError) { log(`${knownError.errorType}: ${knownError.message}`); return { content: [{ type: "text", text: knownError.message }], isError: true, }; } log(`Error: ${error.message}`); return { content: [{ type: "text", text: `Codex error: ${error.message}` }], isError: true, }; } } ); - servers/mcp-openai/server.js:207-287 (handler)Async handler function for 'openai_chat' tool. Sends prompt to OpenAI via Codex CLI exec in non-interactive sandbox mode, handles timeouts, error detection, and output extraction.
async ({ prompt, model, timeout = 180, cwd }) => { const timeoutMs = timeout * 1000; const outputFile = tempFile("codex-chat"); try { log(`Chat: ${prompt.length} chars, timeout ${timeout}s`); const startTime = Date.now(); const args = [ "exec", "--sandbox", "read-only", "--ephemeral", "-o", outputFile, ]; if (model) { args.push("-m", model); } args.push("-"); const { stdout, stderr, exitCode } = await runCodex(args, { timeoutMs, stdin: prompt, cwd, }); const elapsed = ((Date.now() - startTime) / 1000).toFixed(1); const combined = stdout + stderr; const error = detectError(combined); if (error) { log(`${error.errorType}: ${error.message}`); try { await unlink(outputFile); } catch {} return { content: [{ type: "text", text: error.message }], isError: true, }; } let outputFileContent = ""; try { outputFileContent = await readFile(outputFile, "utf-8"); } catch {} try { await unlink(outputFile); } catch {} const response = extractResponse(stdout, outputFileContent); if (!response) { log(`No response (exit: ${exitCode}, stdout: ${stdout.length}, stderr: ${stderr.length})`); return { content: [{ type: "text", text: `No response from Codex. Exit code: ${exitCode}. Output: ${combined.slice(-300)}` }], isError: true, }; } log(`OK in ${elapsed}s (${response.length} chars)`); return { content: [{ type: "text", text: response }], }; } catch (error) { try { await unlink(outputFile); } catch {} const knownError = detectError(error.message); if (knownError) { log(`${knownError.errorType}: ${knownError.message}`); return { content: [{ type: "text", text: knownError.message }], isError: true, }; } log(`Error: ${error.message}`); return { content: [{ type: "text", text: `Codex error: ${error.message}` }], isError: true, }; } } - servers/mcp-openai/server.js:188-205 (schema)Input schema for 'openai_chat': prompt (required string), model (optional string), timeout (optional number, default 180s), cwd (optional string).
{ description: "Send a prompt to OpenAI via Codex exec. Non-interactive, fast startup (no MCP servers loaded), 180s default timeout. Returns clear error on quota limits. For code review, use openai_review instead.", inputSchema: { prompt: z.string().describe("The prompt to send"), model: z .string() .optional() .describe("Model override (optional). Note: some models may not be available on ChatGPT Plus"), timeout: z .number() .default(180) .describe("Timeout in seconds (default 180)"), cwd: z .string() .optional() .describe("Working directory for codex"), }, - servers/mcp-openai/server.js:41-71 (helper)detectError() - checks output for usage limit, model not supported, and auth expired errors, returning structured error info.
function detectError(output) { const combined = output.toLowerCase(); if (combined.includes("usage limit") || combined.includes("hit your usage limit")) { const match = output.match(/try again at (.+?)[\.\n]/); const resetDate = match ? match[1] : "unknown"; return { isError: true, errorType: "QUOTA_EXCEEDED", message: `Codex usage limit reached. Credits reset at: ${resetDate}. Use a fallback provider.`, }; } if (combined.includes("not supported when using codex with a chatgpt account")) { return { isError: true, errorType: "MODEL_NOT_SUPPORTED", message: "This model is not available with ChatGPT Plus. Use the default model.", }; } if (combined.includes("auth") && (combined.includes("expired") || combined.includes("login"))) { return { isError: true, errorType: "AUTH_EXPIRED", message: "Codex auth token expired. Run 'codex login' to re-authenticate.", }; } return null; } - servers/mcp-openai/server.js:77-104 (helper)extractResponse() - extracts the AI response from codex exec output, preferring output file content over stdout parsing.
function extractResponse(stdout, outputFileContent) { if (outputFileContent && outputFileContent.trim()) { return outputFileContent.trim(); } const lines = stdout.split("\n"); let inResponse = false; let response = []; for (const line of lines) { if (line.trim() === "codex") { inResponse = true; continue; } if (inResponse && line.startsWith("tokens used")) { break; } if (inResponse) { response.push(line); } } if (response.length > 0) { return response.join("\n").trim(); } return stdout.trim(); }