Skip to main content
Glama

Agent MCP

embeddingProvider.ts6.6 kB
// Embedding Provider Interface and Types // Foundation interface for all embedding providers (OpenAI, Ollama, Gemini, HuggingFace, etc.) // This interface ensures provider-agnostic embedding generation across the system /** * Standard interface that all embedding providers must implement * Provides abstraction layer for different embedding services and models */ export interface EmbeddingProvider { /** * Generate embeddings for an array of input texts * @param texts Array of strings to generate embeddings for * @returns Promise resolving to array of embedding vectors (number arrays) * Returns empty array if no embeddings generated * Each embedding vector corresponds to input text at same index */ generateEmbeddings(texts: string[]): Promise<number[][]>; /** * Get the dimension count for embeddings generated by this provider * @returns Number of dimensions in generated embedding vectors * Used for database schema validation and vector similarity calculations */ getDimensions(): number; /** * Get the maximum batch size for processing multiple texts at once * @returns Maximum number of texts that can be processed in single API call * Used for batch processing optimization and rate limit management */ getMaxBatchSize(): number; /** * Check if the provider is currently available and functional * @returns Promise resolving to true if provider can generate embeddings * Performs actual connectivity/health check (not just config validation) */ isAvailable(): Promise<boolean>; /** * Get the human-readable name of this provider * @returns String identifier for logging and user interfaces * Examples: "OpenAI", "Ollama", "Gemini", "HuggingFace" */ getName(): string; /** * Get the specific model being used by this provider * @returns Model identifier string for debugging and configuration tracking * Examples: "text-embedding-3-large", "nomic-embed-text", "text-embedding-004" */ getModel(): string; /** * Get provider-specific configuration and metadata * @returns Object containing provider configuration details * Used for debugging, monitoring, and provider comparison */ getConfig(): ProviderConfig; /** * Estimate cost for generating embeddings for given number of tokens * @param tokenCount Approximate number of tokens to be processed * @returns Estimated cost in USD, 0 for local providers */ estimateCost?(tokenCount: number): number; /** * Warm up the provider (download models, establish connections, etc.) * @returns Promise that resolves when provider is ready for requests * Optional method for providers that benefit from initialization */ warmUp?(): Promise<void>; } /** * Configuration object returned by provider.getConfig() * Contains provider-specific settings and metadata */ export interface ProviderConfig { /** Provider type identifier */ type: string; /** Model being used */ model: string; /** Embedding dimensions */ dimensions: number; /** Maximum batch size */ maxBatchSize: number; /** Whether this is a local provider (no network calls) */ isLocal: boolean; /** Base URL for API calls (if applicable) */ baseUrl?: string; /** Additional provider-specific settings */ settings?: Record<string, any>; } /** * Error types specific to embedding providers * Allows for provider-specific error handling and fallback logic */ export class EmbeddingProviderError extends Error { constructor( message: string, public provider: string, public errorType: 'AUTH' | 'RATE_LIMIT' | 'QUOTA' | 'NETWORK' | 'MODEL' | 'UNKNOWN' = 'UNKNOWN', public retryable: boolean = false ) { super(message); this.name = 'EmbeddingProviderError'; } } /** * Performance metrics for embedding generation * Used for monitoring and provider comparison */ export interface EmbeddingMetrics { /** Provider name */ provider: string; /** Model used */ model: string; /** Number of texts processed */ textCount: number; /** Total time taken in milliseconds */ durationMs: number; /** Average time per text in milliseconds */ avgTimePerText: number; /** Success rate (0-1) */ successRate: number; /** Timestamp of measurement */ timestamp: string; } /** * Provider capability information * Used for automatic provider selection and optimization */ export interface ProviderCapabilities { /** Provider supports batch processing */ supportsBatching: boolean; /** Provider supports custom dimensions */ supportsCustomDimensions: boolean; /** Provider is local (no network required) */ isLocal: boolean; /** Provider has usage costs */ hasCosts: boolean; /** Provider supports model selection */ supportsModelSelection: boolean; /** Typical response time category */ responseTimeCategory: 'fast' | 'medium' | 'slow'; /** Quality/accuracy category */ qualityCategory: 'basic' | 'good' | 'excellent'; } /** * Provider status information * Real-time status for monitoring and health checks */ export interface ProviderStatus { /** Provider name */ name: string; /** Current availability status */ available: boolean; /** Last successful health check */ lastHealthCheck: string; /** Current error if unavailable */ error?: string; /** Response time from last request */ lastResponseTime?: number; /** Success rate over recent requests */ recentSuccessRate?: number; } /** * Batch processing options * Configuration for optimizing batch embedding generation */ export interface BatchOptions { /** Maximum batch size override */ maxBatchSize?: number; /** Delay between batches in milliseconds */ batchDelay?: number; /** Maximum concurrent batches */ maxConcurrency?: number; /** Retry failed texts individually */ retryFailures?: boolean; } /** * Provider selection criteria * Used for automatic provider selection based on requirements */ export interface ProviderSelectionCriteria { /** Preferred provider if available */ preferredProvider?: string; /** Require local provider (no network) */ requireLocal?: boolean; /** Maximum acceptable cost per token */ maxCostPerToken?: number; /** Required quality level */ minQuality?: 'basic' | 'good' | 'excellent'; /** Maximum acceptable response time */ maxResponseTime?: number; /** Required embedding dimensions */ requiredDimensions?: number; } console.log('✅ Embedding provider interface loaded');

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/rinadelph/Agent-MCP'

If you have feedback or need assistance with the MCP directory API, please join our Discord server