gpt5_query
Query GPT-5 with configurable reasoning effort and verbosity levels. Supports optional web search integration and custom system prompts for flexible AI interactions.
Instructions
Query GPT-5 with optional Web Search Preview. Supports verbosity and reasoning effort.
Input Schema
TableJSON Schema
| Name | Required | Description | Default |
|---|---|---|---|
| input | Yes |
Implementation Reference
- src/index.ts:43-58 (handler)Handler function that parses the input schema, calls runQuery to execute the OpenAI query, and returns the formatted response or error.async ({ input }) => { const parsed = QueryInputSchema.parse(input); try { const text = await runQuery(openai, parsed as QueryInput, config); return { content: [{ type: "text" as const, text: text || "No response text available." }], }; } catch (error) { console.error("Error calling OpenAI API:", error); const message = error instanceof Error ? error.message : "Unknown error"; return { content: [{ type: "text" as const, text: `Error: ${message}` }], isError: true, }; } }
- src/index.ts:21-37 (schema)Zod schema defining the input parameters for the gpt5_query tool, matching the QueryInput type.const QueryInputSchema = z.object({ query: z.string().describe("User question or instruction"), // Per-call overrides model: z.string().optional().describe("Model name, e.g. gpt-5"), system: z.string().optional().describe("Optional system prompt/instructions for the model"), reasoning_effort: z.enum(["low", "minimal", "medium", "high"]).optional(), verbosity: z.enum(["low", "medium", "high"]).optional(), tool_choice: z.enum(["auto", "none"]).optional(), parallel_tool_calls: z.boolean().optional(), max_output_tokens: z.number().int().positive().optional(), web_search: z .object({ enabled: z.boolean().optional(), search_context_size: z.enum(["low", "medium", "high"]).optional(), }) .optional(), });
- src/index.ts:39-59 (registration)Registers the 'gpt5_query' MCP tool with name, description, input schema, and handler function.server.tool( "gpt5_query", "Query GPT-5 with optional Web Search Preview. Supports verbosity and reasoning effort.", { input: QueryInputSchema }, async ({ input }) => { const parsed = QueryInputSchema.parse(input); try { const text = await runQuery(openai, parsed as QueryInput, config); return { content: [{ type: "text" as const, text: text || "No response text available." }], }; } catch (error) { console.error("Error calling OpenAI API:", error); const message = error instanceof Error ? error.message : "Unknown error"; return { content: [{ type: "text" as const, text: `Error: ${message}` }], isError: true, }; } } );
- src/openai.ts:94-101 (helper)Main helper function implementing the tool logic: builds the OpenAI request, invokes the API via openai.responses.create, and extracts the output text.export async function runQuery(openai: OpenAI, input: QueryInput, cfg: AppConfig) { const req = buildOpenAIRequest(input, cfg); const response: unknown = await openai.responses.create( req as unknown as Record<string, unknown> ); const text = extractOutputText(response) ?? ""; return text || "No response text available."; }
- src/openai.ts:36-84 (helper)Helper function to construct the OpenAI request object from tool input and config defaults, handling web search tools, reasoning effort, verbosity, etc.export function buildOpenAIRequest(input: QueryInput, cfg: AppConfig): OpenAIRequest { const model = input.model ?? cfg.model; const effRaw = (input.reasoning_effort ?? cfg.reasoningEffort) as | "low" | ReasoningEffort | undefined; let reasoningEffort: ReasoningEffort | undefined = effRaw ? ((effRaw === "low" ? "minimal" : effRaw) as ReasoningEffort) : undefined; // Bump reasoning for web search minimal constraint const webEnabled = input.web_search?.enabled ?? cfg.webSearchDefaultEnabled; if (reasoningEffort === "minimal" && webEnabled) { reasoningEffort = "medium"; } const verbosity: Verbosity | undefined = input.verbosity ?? cfg.defaultVerbosity; const searchContextSize: SearchContextSize | undefined = input.web_search?.search_context_size ?? cfg.webSearchContextSize; const toolChoice = input.tool_choice ?? "auto"; const parallelToolCalls = input.parallel_tool_calls ?? true; const tools: WebSearchPreviewTool[] = []; if (webEnabled) { const webTool: WebSearchPreviewTool = { type: "web_search_preview" }; if (searchContextSize) { webTool.search_context_size = searchContextSize; } tools.push(webTool); } const req: OpenAIRequest = { model, input: input.query, tool_choice: toolChoice, parallel_tool_calls: parallelToolCalls, } as OpenAIRequest; if (input.system) req.instructions = input.system; if (tools.length > 0) req.tools = tools; if (reasoningEffort) req.reasoning = { effort: reasoningEffort }; if (verbosity) req.text = { verbosity }; if (input.max_output_tokens) req.max_output_tokens = input.max_output_tokens; return req; }