import axios from 'axios';
import { CONFIG } from '../config.js';
import { LocalLLMInput, ToolResponse } from '../validation.js';
export async function executeLMStudioLLM(input: LocalLLMInput): Promise<ToolResponse> {
const { model, prompt, temperature = 0.7 } = input;
try {
const url = `${CONFIG.LMSTUDIO_BASE_URL}/v1/chat/completions`;
const requestBody = {
model,
messages: [{ role: 'user', content: prompt }],
temperature,
max_tokens: 1000
};
const headers: any = { 'Content-Type': 'application/json' };
// Add API key if available in environment
if (process.env.LMSTUDIO_API_KEY) {
headers['Authorization'] = `Bearer ${process.env.LMSTUDIO_API_KEY}`;
}
const startTime = Date.now();
const response = await axios.post(url, requestBody, {
timeout: 60000,
headers
});
const latencyMs = Date.now() - startTime;
if (!response.data.choices || response.data.choices.length === 0) {
throw new Error('No response choices from LM Studio');
}
const text = response.data.choices[0].message.content;
const tokens = response.data.usage?.total_tokens || 0;
const result = {
text,
model,
latencyMs,
tokens
};
return {
content: [{
type: 'text' as const,
text: JSON.stringify(result, null, 2)
}]
};
} catch (error: any) {
const errorMessage = error.response?.data?.error?.message || error.message || 'Unknown error';
return {
content: [{
type: 'text' as const,
text: `Error calling LM Studio: ${errorMessage}`
}]
};
}
}