Skip to main content
Glama

Agent MCP

HuggingFaceEmbeddingProvider.ts4.01 kB
// HuggingFace Embedding Provider // Users should implement this provider to use HuggingFace embeddings (API or local) import { BaseEmbeddingProvider } from '../BaseEmbeddingProvider.js'; import { PROVIDER_CONFIG } from '../../../core/config.js'; /** * HuggingFace Embedding Provider Implementation * Supports both API and local inference via Transformers.js * * To implement: * For API: Set HF_TOKEN in environment * For Local: npm install @xenova/transformers * Then implement generateEmbeddingsInternal method */ export class HuggingFaceEmbeddingProvider extends BaseEmbeddingProvider { private pipeline: any = null; protected getProviderType(): string { return 'huggingface'; } protected getDefaultModel(): string { return PROVIDER_CONFIG.HUGGINGFACE_MODEL || 'sentence-transformers/all-MiniLM-L6-v2'; } protected getDefaultMaxBatchSize(): number { return PROVIDER_CONFIG.HUGGINGFACE_USE_LOCAL ? 10 : 100; } protected isLocalProvider(): boolean { return PROVIDER_CONFIG.HUGGINGFACE_USE_LOCAL || false; } async isAvailable(): Promise<boolean> { if (PROVIDER_CONFIG.HUGGINGFACE_USE_LOCAL) { // Check if Transformers.js is available try { // Check if Transformers.js module can be resolved const transformersModule = await import('@xenova/transformers' as string); return !!transformersModule; } catch (error) { console.warn('Transformers.js not installed. Run: npm install @xenova/transformers'); return false; } } else { // Check if HuggingFace API key is configured if (!PROVIDER_CONFIG.HUGGINGFACE_API_KEY) { console.warn('HuggingFace API key not configured'); return false; } return true; } } async warmUp(): Promise<void> { if (PROVIDER_CONFIG.HUGGINGFACE_USE_LOCAL) { console.log(`🔥 Loading HuggingFace model ${this.getModel()} locally...`); // TODO: Initialize the pipeline /* const { pipeline } = await import('@xenova/transformers'); this.pipeline = await pipeline('feature-extraction', this.getModel()); console.log('✅ Model loaded and ready'); */ } } protected async generateEmbeddingsInternal(texts: string[]): Promise<number[][]> { if (PROVIDER_CONFIG.HUGGINGFACE_USE_LOCAL) { // TODO: Implement local inference with Transformers.js /* if (!this.pipeline) { await this.warmUp(); } const embeddings: number[][] = []; for (const text of texts) { const output = await this.pipeline(text, { pooling: 'mean', normalize: true }); embeddings.push(Array.from(output.data)); } return embeddings; */ } else { // TODO: Implement API-based inference /* const response = await fetch( `https://api-inference.huggingface.co/models/${this.getModel()}`, { method: 'POST', headers: { 'Authorization': `Bearer ${PROVIDER_CONFIG.HUGGINGFACE_API_KEY}`, 'Content-Type': 'application/json' }, body: JSON.stringify({ inputs: texts, options: { wait_for_model: true } }) } ); if (!response.ok) { const error = await response.text(); throw new Error(`HuggingFace error: ${response.status} - ${error}`); } const data = await response.json(); return data; */ } throw new Error('HuggingFace provider not implemented. Please implement generateEmbeddingsInternal method.'); } estimateCost(tokenCount: number): number { if (PROVIDER_CONFIG.HUGGINGFACE_USE_LOCAL) { return 0; // Local inference is free } // HuggingFace Inference API pricing varies by model return tokenCount * 0.00006 / 1000; // Approximate } } console.log('✅ HuggingFace embedding provider template loaded');

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/rinadelph/Agent-MCP'

If you have feedback or need assistance with the MCP directory API, please join our Discord server