import OpenAI from 'openai';
import { Message } from '../types/conversation.js';
interface Model {
id: string;
contextWindow: number;
streaming: boolean;
supportsFunctions?: boolean;
temperature?: number;
description?: string;
}
interface ProviderConfig {
apiKey: string;
models: {
[key: string]: {
id: string;
contextWindow: number;
streaming: boolean;
temperature?: number;
description?: string;
};
};
defaultModel: string;
timeouts?: {
completion?: number;
stream?: number;
};
}
interface ProviderResponse {
content: string;
model: string;
tokenCount?: number;
metadata?: Record<string, unknown>;
}
interface CompletionParams {
messages: Message[];
model: string;
stream: boolean;
timeout?: number;
temperature?: number;
maxTokens?: number;
}
interface ModelInfo extends Model {
isDefault: boolean;
provider: string;
cost?: {
prompt: number;
completion: number;
};
}
export class OpenRouterProvider {
private client: OpenAI;
private _models: Model[];
private defaultModel: string;
private timeouts: Required<NonNullable<ProviderConfig['timeouts']>>;
readonly name = 'openrouter';
constructor(config: ProviderConfig) {
if (!config.apiKey) {
throw new Error('Missing openRouter.apiKey in YAML configuration');
}
if (!config.defaultModel) {
throw new Error('Missing defaultModel in YAML configuration');
}
// Initialize OpenAI client with OpenRouter configuration
this.client = new OpenAI({
apiKey: config.apiKey,
baseURL: 'https://openrouter.ai/api/v1',
defaultQuery: { use_cache: 'true' },
defaultHeaders: {
'HTTP-Referer': 'https://github.com/cursor-ai/mcp-conversation-server',
'X-Title': 'MCP Conversation Server',
'Content-Type': 'application/json',
'OR-SITE-LOCATION': 'https://github.com/cursor-ai/mcp-conversation-server',
'OR-ALLOW-FINE-TUNING': 'false'
}
});
this.timeouts = {
completion: config.timeouts?.completion ?? 30000,
stream: config.timeouts?.stream ?? 60000
};
this.defaultModel = config.defaultModel;
// Convert configured models to internal format
this._models = Object.entries(config.models).map(([id, modelConfig]) => ({
id,
contextWindow: modelConfig.contextWindow,
streaming: modelConfig.streaming,
temperature: modelConfig.temperature,
description: modelConfig.description,
supportsFunctions: false
}));
}
private getModelConfig(modelId: string): Model {
const model = this._models.find(m => m.id === modelId);
if (!model) {
console.warn(`Model ${modelId} not found in configuration, falling back to default model ${this.defaultModel}`);
const defaultModel = this._models.find(m => m.id === this.defaultModel);
if (!defaultModel) {
throw new Error('Default model not found in configuration');
}
return defaultModel;
}
return model;
}
get models(): Model[] {
return this._models;
}
async validateConfig(): Promise<void> {
if (this._models.length === 0) {
throw new Error('No models configured for OpenRouter provider');
}
try {
// Simple validation - just verify API connection works
await this.client.chat.completions.create({
model: this._models[0].id,
messages: [{ role: 'user', content: 'test' }],
max_tokens: 1 // Minimum response size for validation
});
} catch (error: unknown) {
const message = error instanceof Error ? error.message : 'Unknown error';
throw new Error(`Failed to validate OpenRouter configuration: ${message}`);
}
}
async createCompletion(params: CompletionParams): Promise<ProviderResponse> {
try {
// Get model configuration or fall back to default
const modelConfig = this.getModelConfig(params.model);
const response = await this.client.chat.completions.create({
model: modelConfig.id,
messages: params.messages.map((msg: Message) => ({
role: msg.role,
content: msg.content,
name: msg.name
})),
temperature: params.temperature ?? modelConfig.temperature ?? 0.7,
max_tokens: params.maxTokens,
stream: false
});
// Validate response structure
if (!response || !response.choices || !Array.isArray(response.choices) || response.choices.length === 0) {
throw new Error('Invalid or empty response from OpenRouter');
}
const choice = response.choices[0];
if (!choice || !choice.message || typeof choice.message.content !== 'string') {
throw new Error('Invalid message structure in OpenRouter response');
}
return {
content: choice.message.content,
model: modelConfig.id,
tokenCount: response.usage?.total_tokens,
metadata: {
provider: 'openrouter',
modelName: modelConfig.id,
...response.usage && { usage: response.usage }
}
};
} catch (error: unknown) {
if (error instanceof Error) {
if (error.message.includes('timeout')) {
throw new Error('OpenRouter request timed out. Please try again.');
}
if (error.message.includes('rate_limit')) {
throw new Error('OpenRouter rate limit exceeded. Please try again later.');
}
if (error.message.includes('insufficient_quota')) {
throw new Error('OpenRouter quota exceeded. Please check your credits.');
}
throw new Error(`OpenRouter completion failed: ${error.message}`);
}
throw new Error('Unknown error occurred during OpenRouter completion');
}
}
async *streamCompletion(params: CompletionParams): AsyncIterableIterator<ProviderResponse> {
try {
// Get model configuration or fall back to default
const modelConfig = this.getModelConfig(params.model);
const stream = await this.client.chat.completions.create({
model: modelConfig.id,
messages: params.messages.map((msg: Message) => ({
role: msg.role,
content: msg.content,
name: msg.name
})),
temperature: params.temperature ?? modelConfig.temperature ?? 0.7,
max_tokens: params.maxTokens,
stream: true
});
for await (const chunk of stream) {
// Validate chunk structure
if (!chunk || !chunk.choices || !Array.isArray(chunk.choices) || chunk.choices.length === 0) {
continue;
}
const delta = chunk.choices[0]?.delta;
if (!delta || typeof delta.content !== 'string') {
continue;
}
yield {
content: delta.content,
model: modelConfig.id,
metadata: {
provider: 'openrouter',
modelName: modelConfig.id,
isPartial: true
}
};
}
} catch (error: unknown) {
if (error instanceof Error) {
if (error.message.includes('timeout')) {
throw new Error('OpenRouter streaming request timed out. Please try again.');
}
if (error.message.includes('rate_limit')) {
throw new Error('OpenRouter rate limit exceeded. Please try again later.');
}
if (error.message.includes('insufficient_quota')) {
throw new Error('OpenRouter quota exceeded. Please check your credits.');
}
throw new Error(`OpenRouter streaming completion failed: ${error.message}`);
}
throw new Error('Unknown error occurred during OpenRouter streaming');
}
}
/**
* Get detailed information about all available models
* @returns Array of model information including default status and pricing
*/
async listAvailableModels(): Promise<ModelInfo[]> {
try {
return this._models.map(model => {
const [provider, modelName] = model.id.split('/');
return {
...model,
provider: provider || 'unknown',
isDefault: model.id === this.defaultModel,
cost: undefined // Could be fetched from OpenRouter API if needed
};
}).sort((a, b) => {
// Sort with default model first, then by provider/name
if (a.isDefault) return -1;
if (b.isDefault) return 1;
return a.id.localeCompare(b.id);
});
} catch (error) {
const message = error instanceof Error ? error.message : 'Unknown error';
throw new Error(`Failed to list available models: ${message}`);
}
}
/**
* Get the current default model configuration
* @returns The default model configuration
*/
getDefaultModel(): ModelInfo {
const defaultModel = this._models.find(m => m.id === this.defaultModel);
if (!defaultModel) {
throw new Error('Default model not found in configuration');
}
const [provider] = defaultModel.id.split('/');
return {
...defaultModel,
isDefault: true,
provider: provider || 'unknown',
cost: undefined
};
}
}