/**
* Function to send a chat message to the LM Studio API.
*
* @param {Object} args - Arguments for the chat message.
* @param {string} args.model - The model to use for the chat.
* @param {Array<Object>} args.messages - An array of message objects with role and content.
* @param {number} [args.temperature=0.7] - The temperature for randomness in responses.
* @param {number} [args.max_tokens=-1] - The maximum number of tokens to generate.
* @param {boolean} [args.stream=false] - Whether to stream the response.
* @returns {Promise<Object>} - The response from the chat API.
*/
const executeFunction = async ({ model, messages, temperature = 0.7, max_tokens = -1, stream = false }) => {
const url = 'http://127.0.0.1:1234/v1/chat/completions';
const apiKey = process.env.LM_STUDIO_API_KEY;
try {
// Construct the request body
const body = JSON.stringify({
model,
messages,
temperature,
max_tokens,
stream
});
// Set up headers for the request
const headers = {
'Content-Type': 'application/json'
};
// Perform the fetch request
const response = await fetch(url, {
method: 'POST',
headers,
body
});
// Check if the response was successful
if (!response.ok) {
const errorData = await response.json();
throw new Error(errorData);
}
// Parse and return the response data
const data = await response.json();
return data;
} catch (error) {
console.error('Error sending chat message:', error);
return { error: 'An error occurred while sending the chat message.' };
}
};
/**
* Tool configuration for sending chat messages to the LM Studio API.
* @type {Object}
*/
const apiTool = {
function: executeFunction,
definition: {
type: 'function',
function: {
name: 'chat',
description: 'Send a chat message to the LM Studio API.',
parameters: {
type: 'object',
properties: {
model: {
type: 'string',
description: 'The model to use for the chat.'
},
messages: {
type: 'array',
items: {
type: 'object',
properties: {
role: {
type: 'string',
description: 'The role of the message sender (e.g., system, user).'
},
content: {
type: 'string',
description: 'The content of the message.'
}
},
required: ['role', 'content']
},
description: 'An array of message objects with role and content.'
},
temperature: {
type: 'number',
description: 'The temperature for randomness in responses.'
},
max_tokens: {
type: 'integer',
description: 'The maximum number of tokens to generate.'
},
stream: {
type: 'boolean',
description: 'Whether to stream the response.'
}
},
required: ['model', 'messages']
}
}
}
};
export { apiTool };