MCP Base

// Ollama Text Generation Service // This service provides text generation using a local Ollama instance import fetch from "node-fetch"; /** * Service for generating text using a local Ollama instance */ class OllamaTextGenerationService { baseUrl: string; model: string; temperature: number; maxTokens: number; /** * Create a new Ollama text generation service * @param {Object} options Configuration options * @param {string} options.baseUrl The base URL of the Ollama API (default: http://localhost:11434) * @param {string} options.model The LLM model to use (default: llama3) * @param {number} options.temperature The temperature for generation (default: 0.7) * @param {number} options.maxTokens Maximum tokens to generate (default: 2048) */ constructor( options: { baseUrl?: string; model?: string; temperature?: number; maxTokens?: number; } = {} ) { this.baseUrl = options.baseUrl || process.env.OLLAMA_URL || "http://localhost:11434"; this.model = options.model || process.env.OLLAMA_LLM_MODEL || "llama3"; this.temperature = options.temperature || 0.7; this.maxTokens = options.maxTokens || 2048; console.log( `Initialized Ollama text generation service with model ${this.model}` ); } /** * Generate text using the chat completion API * @param {Array} messages Array of message objects with role and content * @param {Object} options Additional options for generation * @returns {Promise<string>} A promise that resolves to the generated text */ async generateText( messages: Array<{ role: string; content: string }>, options: { temperature?: number; maxTokens?: number; topP?: number; topK?: number; } = {} ): Promise<string> { try { // Create AbortController for timeout const controller = new AbortController(); const timeoutId = setTimeout(() => controller.abort(), 120000); // 2 minute timeout let response; try { response = await fetch(`${this.baseUrl}/api/chat`, { method: "POST", headers: { "Content-Type": "application/json", }, body: JSON.stringify({ model: this.model, messages: messages, options: { temperature: options.temperature || this.temperature, num_predict: options.maxTokens || this.maxTokens, top_p: options.topP || 0.9, top_k: options.topK || 40, }, stream: false, }), signal: controller.signal, }); } finally { clearTimeout(timeoutId); } if (!response.ok) { const errorText = await response.text(); throw new Error(`Ollama API error (${response.status}): ${errorText}`); } const data = await response.json(); return data.message.content; } catch (error) { console.error("Error generating text with Ollama:", error); throw new Error(`Failed to generate text: ${error.message}`); } } /** * Check if the Ollama service is available * @returns {Promise<boolean>} A promise that resolves to true if the service is available */ async isAvailable(): Promise<boolean> { try { const response = await fetch(`${this.baseUrl}/api/tags`); return response.ok; } catch (error) { console.error("Error checking Ollama availability:", error); return false; } } /** * Check if the specified model is available in Ollama * @returns {Promise<boolean>} A promise that resolves to true if the model is available */ async isModelAvailable(): Promise<boolean> { try { const response = await fetch(`${this.baseUrl}/api/tags`); if (!response.ok) { return false; } const data = await response.json(); return data.models.some((model) => model.name === this.model); } catch (error) { console.error("Error checking model availability:", error); return false; } } /** * Pull the model if not already available * @returns {Promise<boolean>} A promise that resolves to true if successful */ async pullModelIfNeeded(): Promise<boolean> { const isAvailable = await this.isModelAvailable(); if (isAvailable) { console.log(`Model ${this.model} is already available`); return true; } try { console.log(`Pulling model ${this.model}...`); const response = await fetch(`${this.baseUrl}/api/pull`, { method: "POST", headers: { "Content-Type": "application/json", }, body: JSON.stringify({ model: this.model, }), }); if (!response.ok) { const errorText = await response.text(); throw new Error(`Failed to pull model: ${errorText}`); } console.log(`Successfully pulled model ${this.model}`); return true; } catch (error) { console.error("Error pulling model:", error); return false; } } } export default OllamaTextGenerationService;