chat_completion
Send messages to AI models through OpenRouter for text conversations and multimodal responses, enabling interactive communication with diverse language models.
Instructions
Send messages to an OpenRouter model and get a response
Input Schema
TableJSON Schema
| Name | Required | Description | Default |
|---|---|---|---|
| model | No | Model ID (optional, uses default) | |
| messages | Yes | ||
| temperature | No | ||
| max_tokens | No |
Implementation Reference
- The handler function that executes the chat completion logic using the OpenAI client.
export async function handleChatCompletion( request: { params: { arguments: ChatCompletionToolRequest } }, openai: OpenAI, defaultModel?: string, ) { const { messages, model, temperature, max_tokens } = request.params.arguments; if (!messages?.length) { return { content: [{ type: 'text', text: 'Messages array cannot be empty.' }], isError: true }; } try { const completion = await openai.chat.completions.create({ model: model || defaultModel || 'nvidia/nemotron-nano-12b-v2-vl:free', messages, temperature: temperature ?? 1, ...(max_tokens && { max_tokens }), }); return { content: [{ type: 'text', text: completion.choices[0].message.content || '' }] }; } catch (error: unknown) { const msg = error instanceof Error ? error.message : String(error); return { content: [{ type: 'text', text: `API error: ${msg}` }], isError: true }; } } - src/tool-handlers.ts:46-72 (registration)Tool registration with schema definition for 'chat_completion'.
{ name: 'chat_completion', description: 'Send messages to an OpenRouter model and get a response', inputSchema: { type: 'object', properties: { model: { type: 'string', description: 'Model ID (optional, uses default)' }, messages: { type: 'array', minItems: 1, items: { type: 'object', properties: { role: { type: 'string', enum: ['system', 'user', 'assistant'] }, content: { oneOf: [{ type: 'string' }, { type: 'array', items: { type: 'object' } }], }, }, required: ['role', 'content'], }, }, temperature: { type: 'number', minimum: 0, maximum: 2 }, max_tokens: { type: 'number', minimum: 1 }, }, required: ['messages'], }, }, - src/tool-handlers.ts:136-141 (registration)Execution dispatch for the 'chat_completion' tool in the request handler.
case 'chat_completion': return handleChatCompletion( wrapToolArgs(args as ChatCompletionToolRequest | undefined), this.openai, this.defaultModel, );