/**
* LLM Service - Frontend client for LLM management and chat
* Provides typed interfaces for LLM operations
*/
const API_BASE_URL = import.meta.env.VITE_API_URL || 'http://localhost:8354'
export interface LLMModel {
id: string
name: string
provider: string
context_length?: number
max_tokens?: number
description?: string
loaded?: boolean
}
export interface ChatMessage {
role: 'system' | 'user' | 'assistant'
content: string
}
export interface Personality {
name: string
system_prompt: string
temperature: number
max_tokens: number
}
export interface ChatRequest {
model: string
messages: ChatMessage[]
temperature?: number
max_tokens?: number
personality?: string
}
export interface GenerateRequest {
model: string
prompt: string
temperature?: number
max_tokens?: number
}
export interface LLMResponse {
success: boolean
data?: any
error?: string
timestamp?: string
}
class LLMService {
private baseUrl: string
constructor(baseUrl: string = API_BASE_URL) {
this.baseUrl = baseUrl
}
/**
* List all available models
*/
async listModels(provider?: string): Promise<LLMModel[]> {
try {
const url = provider
? `${this.baseUrl}/api/llm/models?provider=${provider}`
: `${this.baseUrl}/api/llm/models`
const response = await fetch(url)
if (!response.ok) throw new Error(`HTTP ${response.status}`)
const data = await response.json()
return data.models || []
} catch (error) {
console.error('Failed to list models:', error)
return []
}
}
/**
* List Ollama models
*/
async listOllamaModels(): Promise<LLMModel[]> {
try {
const response = await fetch(`${this.baseUrl}/api/llm/models/ollama`)
if (!response.ok) throw new Error(`HTTP ${response.status}`)
const data = await response.json()
return data.models || []
} catch (error) {
console.error('Failed to list Ollama models:', error)
return []
}
}
/**
* List LM Studio models
*/
async listLMStudioModels(): Promise<LLMModel[]> {
try {
const response = await fetch(`${this.baseUrl}/api/llm/models/lmstudio`)
if (!response.ok) throw new Error(`HTTP ${response.status}`)
const data = await response.json()
return data.models || []
} catch (error) {
console.error('Failed to list LM Studio models:', error)
return []
}
}
/**
* Load a model
*/
async loadModel(modelId: string, provider: string = 'ollama'): Promise<LLMResponse> {
try {
const response = await fetch(`${this.baseUrl}/api/llm/models/${modelId}/load?provider=${provider}`, {
method: 'POST'
})
if (!response.ok) throw new Error(`HTTP ${response.status}`)
return await response.json()
} catch (error) {
console.error('Failed to load model:', error)
return { success: false, error: String(error) }
}
}
/**
* Unload a model
*/
async unloadModel(modelId: string, provider: string = 'ollama'): Promise<LLMResponse> {
try {
const response = await fetch(`${this.baseUrl}/api/llm/models/${modelId}/unload?provider=${provider}`, {
method: 'POST'
})
if (!response.ok) throw new Error(`HTTP ${response.status}`)
return await response.json()
} catch (error) {
console.error('Failed to unload model:', error)
return { success: false, error: String(error) }
}
}
/**
* Pull/download a model
*/
async pullModel(modelId: string, provider: string = 'ollama'): Promise<LLMResponse> {
try {
const response = await fetch(`${this.baseUrl}/api/llm/models/${modelId}/pull?provider=${provider}`, {
method: 'POST'
})
if (!response.ok) throw new Error(`HTTP ${response.status}`)
return await response.json()
} catch (error) {
console.error('Failed to pull model:', error)
return { success: false, error: String(error) }
}
}
/**
* Get active models
*/
async getActiveModels(): Promise<Record<string, any>> {
try {
const response = await fetch(`${this.baseUrl}/api/llm/models/active`)
if (!response.ok) throw new Error(`HTTP ${response.status}`)
const data = await response.json()
return data.models || {}
} catch (error) {
console.error('Failed to get active models:', error)
return {}
}
}
/**
* Generate text
*/
async generateText(request: GenerateRequest): Promise<LLMResponse> {
try {
const response = await fetch(`${this.baseUrl}/api/llm/generate`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(request)
})
if (!response.ok) throw new Error(`HTTP ${response.status}`)
return await response.json()
} catch (error) {
console.error('Failed to generate text:', error)
return { success: false, error: String(error) }
}
}
/**
* Chat completion
*/
async chatCompletion(request: ChatRequest): Promise<LLMResponse> {
try {
const response = await fetch(`${this.baseUrl}/api/llm/chat`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(request)
})
if (!response.ok) throw new Error(`HTTP ${response.status}`)
return await response.json()
} catch (error) {
console.error('Failed to chat:', error)
return { success: false, error: String(error) }
}
}
/**
* Get health status
*/
async getHealth(): Promise<LLMResponse> {
try {
const response = await fetch(`${this.baseUrl}/api/llm/health`)
if (!response.ok) throw new Error(`HTTP ${response.status}`)
return await response.json()
} catch (error) {
console.error('Failed to get health:', error)
return { success: false, error: String(error) }
}
}
/**
* Get system info
*/
async getSystemInfo(): Promise<LLMResponse> {
try {
const response = await fetch(`${this.baseUrl}/api/llm/system-info`)
if (!response.ok) throw new Error(`HTTP ${response.status}`)
return await response.json()
} catch (error) {
console.error('Failed to get system info:', error)
return { success: false, error: String(error) }
}
}
/**
* Get personalities
*/
async getPersonalities(): Promise<Record<string, Personality>> {
try {
const response = await fetch(`${this.baseUrl}/api/llm/personalities`)
if (!response.ok) throw new Error(`HTTP ${response.status}`)
const data = await response.json()
return data.personalities || {}
} catch (error) {
console.error('Failed to get personalities:', error)
return {}
}
}
/**
* Add custom personality
*/
async addPersonality(
key: string,
name: string,
systemPrompt: string,
temperature: number = 0.7,
maxTokens: number = 2000
): Promise<LLMResponse> {
try {
const response = await fetch(`${this.baseUrl}/api/llm/personalities`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
key,
name,
system_prompt: systemPrompt,
temperature,
max_tokens: maxTokens
})
})
if (!response.ok) throw new Error(`HTTP ${response.status}`)
return await response.json()
} catch (error) {
console.error('Failed to add personality:', error)
return { success: false, error: String(error) }
}
}
}
export const llmService = new LLMService()