Skip to main content
Glama
ollama-client.tsโ€ข3.77 kB
import fetch from 'node-fetch'; export interface OllamaModel { name: string; model: string; modified_at: string; size: number; digest: string; details: { parent_model: string; format: string; family: string; families: string[]; parameter_size: string; quantization_level: string; }; } export interface ChatMessage { role: 'system' | 'user' | 'assistant'; content: string; } export interface ChatResponse { model: string; created_at: string; message: ChatMessage; done: boolean; total_duration?: number; load_duration?: number; prompt_eval_duration?: number; eval_duration?: number; eval_count?: number; } export class OllamaClient { private baseUrl: string; constructor(baseUrl = 'http://localhost:11434') { this.baseUrl = baseUrl; } async listModels(): Promise<OllamaModel[]> { try { const response = await fetch(`${this.baseUrl}/api/tags`); if (!response.ok) { throw new Error(`HTTP error! status: ${response.status}`); } const data = await response.json() as { models: OllamaModel[] }; return data.models || []; } catch (error) { throw new Error(`Failed to list models: ${error instanceof Error ? error.message : String(error)}`); } } async chat(model: string, messages: ChatMessage[]): Promise<ChatResponse> { try { const response = await fetch(`${this.baseUrl}/api/chat`, { method: 'POST', headers: { 'Content-Type': 'application/json', }, body: JSON.stringify({ model, messages, stream: false, }), }); if (!response.ok) { throw new Error(`HTTP error! status: ${response.status}`); } const data = await response.json() as ChatResponse; return data; } catch (error) { throw new Error(`Failed to chat with model: ${error instanceof Error ? error.message : String(error)}`); } } async pullModel(model: string): Promise<void> { try { const response = await fetch(`${this.baseUrl}/api/pull`, { method: 'POST', headers: { 'Content-Type': 'application/json', }, body: JSON.stringify({ name: model, }), }); if (!response.ok) { throw new Error(`HTTP error! status: ${response.status}`); } } catch (error) { throw new Error(`Failed to pull model: ${error instanceof Error ? error.message : String(error)}`); } } async deleteModel(model: string): Promise<void> { try { const response = await fetch(`${this.baseUrl}/api/delete`, { method: 'DELETE', headers: { 'Content-Type': 'application/json', }, body: JSON.stringify({ name: model, }), }); if (!response.ok) { throw new Error(`HTTP error! status: ${response.status}`); } } catch (error) { throw new Error(`Failed to delete model: ${error instanceof Error ? error.message : String(error)}`); } } async generateResponse(model: string, prompt: string): Promise<string> { try { const response = await fetch(`${this.baseUrl}/api/generate`, { method: 'POST', headers: { 'Content-Type': 'application/json', }, body: JSON.stringify({ model, prompt, stream: false, }), }); if (!response.ok) { throw new Error(`HTTP error! status: ${response.status}`); } const data = await response.json() as { response: string }; return data.response; } catch (error) { throw new Error(`Failed to generate response: ${error instanceof Error ? error.message : String(error)}`); } } }

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/etnlbck/ollama-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server