chat
Process user prompts through Grok MCP to generate responses, configure model parameters, and handle conversation flow with customizable AI settings.
Input Schema
Name | Required | Description | Default |
---|---|---|---|
frequency_penalty | No | ||
max_tokens | No | ||
model | No | grok-4-fast | |
presence_penalty | No | ||
prompt | Yes | ||
reasoning_effort | No | ||
stop | No | ||
system_prompt | No | ||
temperature | No | ||
top_p | No | ||
use_conversation_history | No |
Input Schema (JSON Schema)
{
"properties": {
"frequency_penalty": {
"anyOf": [
{
"type": "number"
},
{
"type": "null"
}
],
"default": null,
"title": "Frequency Penalty"
},
"max_tokens": {
"anyOf": [
{
"type": "integer"
},
{
"type": "null"
}
],
"default": null,
"title": "Max Tokens"
},
"model": {
"default": "grok-4-fast",
"title": "Model",
"type": "string"
},
"presence_penalty": {
"anyOf": [
{
"type": "number"
},
{
"type": "null"
}
],
"default": null,
"title": "Presence Penalty"
},
"prompt": {
"title": "Prompt",
"type": "string"
},
"reasoning_effort": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"title": "Reasoning Effort"
},
"stop": {
"anyOf": [
{
"items": {
"type": "string"
},
"type": "array"
},
{
"type": "null"
}
],
"default": null,
"title": "Stop"
},
"system_prompt": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"title": "System Prompt"
},
"temperature": {
"anyOf": [
{
"type": "number"
},
{
"type": "null"
}
],
"default": null,
"title": "Temperature"
},
"top_p": {
"anyOf": [
{
"type": "number"
},
{
"type": "null"
}
],
"default": null,
"title": "Top P"
},
"use_conversation_history": {
"default": false,
"title": "Use Conversation History",
"type": "boolean"
}
},
"required": [
"prompt"
],
"type": "object"
}