MCP Base
by jsmiff
- server
- utils
// Ollama Embeddings Service
// This service provides embeddings generated by Ollama running locally
import fetch from "node-fetch";
/**
* Options for the Ollama embeddings service
*/
interface OllamaEmbeddingsOptions {
baseUrl?: string;
model?: string;
}
/**
* Service for generating embeddings using a local Ollama instance
*/
class OllamaEmbeddingsService {
baseUrl: string;
model: string;
/**
* Create a new Ollama embeddings service
* @param {OllamaEmbeddingsOptions} options Configuration options
*/
constructor(options: OllamaEmbeddingsOptions = {}) {
this.baseUrl =
options.baseUrl || process.env.OLLAMA_URL || "http://localhost:11434";
this.model = options.model || process.env.OLLAMA_EMBED_MODEL || "nomic-embed-text";
console.log(
`Initialized Ollama embeddings service with model ${this.model}`
);
}
/**
* Generate embeddings for the given text
* @param {string} text The text to generate embeddings for
* @returns {Promise<number[]>} A promise that resolves to the embedding vector
*/
async generateEmbedding(text: string): Promise<number[]> {
try {
const response = await fetch(`${this.baseUrl}/api/embeddings`, {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
model: this.model,
prompt: text,
options: {
dimensions: 768, // Using 768 dimensions (more efficient than 1536)
},
}),
});
if (!response.ok) {
const errorText = await response.text();
throw new Error(
`Ollama embeddings API error (${response.status}): ${errorText}`
);
}
const data = await response.json();
return data.embedding;
} catch (error) {
console.error("Error generating embeddings with Ollama:", error);
throw new Error(`Failed to generate embeddings: ${error.message}`);
}
}
/**
* Generate embeddings for multiple texts in batch
* @param {string[]} texts Array of texts to generate embeddings for
* @returns {Promise<number[][]>} A promise that resolves to an array of embedding vectors
*/
async generateBatchEmbeddings(texts: string[]): Promise<number[][]> {
try {
// Ollama doesn't have a batch API, so we'll process sequentially
const embeddings: number[][] = [];
for (const text of texts) {
const embedding = await this.generateEmbedding(text);
embeddings.push(embedding);
}
return embeddings;
} catch (error) {
console.error("Error generating batch embeddings with Ollama:", error);
throw new Error(`Failed to generate batch embeddings: ${error.message}`);
}
}
/**
* Check if the Ollama service is available
* @returns {Promise<boolean>} A promise that resolves to true if the service is available
*/
async isAvailable(): Promise<boolean> {
try {
const response = await fetch(`${this.baseUrl}/api/tags`);
return response.ok;
} catch (error) {
console.error("Error checking Ollama availability:", error);
return false;
}
}
/**
* Check if the specified model is available in Ollama
* @returns {Promise<boolean>} A promise that resolves to true if the model is available
*/
async isModelAvailable(): Promise<boolean> {
try {
const response = await fetch(`${this.baseUrl}/api/tags`);
if (!response.ok) {
return false;
}
const data = await response.json();
// The API returns the model name with a tag, e.g. "nomic-embed-text:latest"
return data.models.some((model: { name: string }) =>
model.name.startsWith(this.model)
);
} catch (error) {
console.error("Error checking model availability:", error);
return false;
}
}
/**
* Pull the embedding model if not already available
* @returns {Promise<boolean>} A promise that resolves to true if successful
*/
async pullModelIfNeeded(): Promise<boolean> {
const isAvailable = await this.isModelAvailable();
if (isAvailable) {
console.log(`Model ${this.model} is already available`);
return true;
}
try {
console.log(`Pulling model ${this.model}...`);
const response = await fetch(`${this.baseUrl}/api/pull`, {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
stream: false,
name: this.model,
}),
});
if (!response.ok) {
const errorText = await response.text();
throw new Error(`Failed to pull model: ${errorText}`);
}
console.log(`Successfully pulled model ${this.model}`);
return true;
} catch (error) {
console.error("Error pulling model:", error);
return false;
}
}
}
export default OllamaEmbeddingsService;