openrouter.provider.test.ts.disabledā¢11.3 kB
/**
* @fileoverview Unit tests for OpenRouter LLM provider.
* @module tests/services/llm/providers/openrouter.provider
*/
import { describe, it, expect, beforeEach, vi, afterEach } from 'vitest';
import type {
ChatCompletion,
ChatCompletionCreateParamsNonStreaming,
} from 'openai/resources/chat/completions';
import { OpenRouterProvider } from '@/services/llm/providers/openrouter.provider.js';
import { config } from '@/config/index.js';
import { logger } from '@/utils/index.js';
import { RateLimiter } from '@/utils/security/rateLimiter.js';
import { McpError } from '@/types-global/errors.js';
import { requestContextService } from '@/utils/index.js';
// Mock OpenAI client
vi.mock('openai', () => {
return {
default: vi.fn().mockImplementation(() => ({
chat: {
completions: {
create: vi.fn(),
},
},
})),
};
});
describe('OpenRouterProvider', () => {
let provider: OpenRouterProvider;
let rateLimiter: RateLimiter;
let originalApiKey: string | undefined;
beforeEach(() => {
vi.clearAllMocks();
originalApiKey = config.openrouterApiKey;
// Set up a valid API key for tests
Object.defineProperty(config, 'openrouterApiKey', {
value: 'test-api-key',
writable: true,
configurable: true,
});
rateLimiter = new RateLimiter(config, logger);
provider = new OpenRouterProvider(rateLimiter, config, logger);
});
afterEach(() => {
// Restore original API key
Object.defineProperty(config, 'openrouterApiKey', {
value: originalApiKey,
writable: true,
configurable: true,
});
});
describe('constructor', () => {
it('should initialize successfully with valid API key', () => {
expect(provider).toBeInstanceOf(OpenRouterProvider);
});
it('should throw McpError when API key is missing', () => {
Object.defineProperty(config, 'openrouterApiKey', {
value: undefined,
writable: true,
configurable: true,
});
expect(() => new OpenRouterProvider(rateLimiter, config, logger)).toThrow(
McpError,
);
});
it('should configure OpenAI client with correct headers', () => {
Object.defineProperty(config, 'openrouterAppUrl', {
value: 'https://test-app.com',
writable: true,
configurable: true,
});
Object.defineProperty(config, 'openrouterAppName', {
value: 'TestApp',
writable: true,
configurable: true,
});
const testProvider = new OpenRouterProvider(rateLimiter, config, logger);
expect(testProvider).toBeInstanceOf(OpenRouterProvider);
});
});
describe('chatCompletion', () => {
it('should make chat completion request with valid parameters', async () => {
const mockResponse: ChatCompletion = {
id: 'chatcmpl-123',
object: 'chat.completion',
created: Date.now(),
model: 'gpt-3.5-turbo',
choices: [
{
index: 0,
message: {
role: 'assistant',
content: 'Hello! How can I help you?',
refusal: null,
},
finish_reason: 'stop',
logprobs: null,
},
],
usage: {
prompt_tokens: 10,
completion_tokens: 20,
total_tokens: 30,
},
};
// Mock the OpenAI client create method
const OpenAI = (await import('openai')).default;
const mockCreate = vi.fn().mockResolvedValue(mockResponse);
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(OpenAI as any).mockImplementation(() => ({
chat: {
completions: {
create: mockCreate,
},
},
}));
// Recreate provider with mocked client
provider = new OpenRouterProvider(rateLimiter, config, logger);
const context = requestContextService.createRequestContext({
operation: 'test-chat-completion',
});
const result = await provider.chatCompletion(
{
model: 'gpt-3.5-turbo',
messages: [{ role: 'user', content: 'Hello' }],
},
context,
);
expect(result).toEqual(mockResponse);
expect(mockCreate).toHaveBeenCalledWith(
expect.objectContaining({
model: 'gpt-3.5-turbo',
messages: [{ role: 'user', content: 'Hello' }],
}),
);
});
it('should apply default parameters when not provided', async () => {
const mockResponse: ChatCompletion = {
id: 'chatcmpl-456',
object: 'chat.completion',
created: Date.now(),
model: config.llmDefaultModel,
choices: [
{
index: 0,
message: {
role: 'assistant',
content: 'Response',
refusal: null,
},
finish_reason: 'stop',
logprobs: null,
},
],
usage: {
prompt_tokens: 5,
completion_tokens: 10,
total_tokens: 15,
},
};
const OpenAI = (await import('openai')).default;
const mockCreate = vi.fn().mockResolvedValue(mockResponse);
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(OpenAI as any).mockImplementation(() => ({
chat: {
completions: {
create: mockCreate,
},
},
}));
provider = new OpenRouterProvider(rateLimiter, config, logger);
const context = requestContextService.createRequestContext({
operation: 'test-defaults',
});
await provider.chatCompletion(
{
model: 'gpt-3.5-turbo',
messages: [{ role: 'user', content: 'Test' }],
} as ChatCompletionCreateParamsNonStreaming,
context,
);
expect(mockCreate).toHaveBeenCalledWith(
expect.objectContaining({
model: expect.any(String),
temperature: config.llmDefaultTemperature,
top_p: config.llmDefaultTopP,
max_tokens: config.llmDefaultMaxTokens,
}),
);
});
it('should enforce rate limiting', async () => {
const context = requestContextService.createRequestContext({
operation: 'test-rate-limit',
requestId: 'rate-limit-test',
});
// Configure rate limiter to allow only 1 request
rateLimiter.configure({
maxRequests: 1,
windowMs: 60000,
});
const OpenAI = (await import('openai')).default;
const mockCreate = vi.fn().mockResolvedValue({
id: 'test',
object: 'chat.completion',
created: Date.now(),
model: 'test',
choices: [],
});
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(OpenAI as any).mockImplementation(() => ({
chat: {
completions: {
create: mockCreate,
},
},
}));
provider = new OpenRouterProvider(rateLimiter, config, logger);
// First request should succeed
await provider.chatCompletion(
{
model: 'gpt-3.5-turbo',
messages: [{ role: 'user', content: 'First' }],
},
context,
);
// Second request should be rate limited
await expect(
provider.chatCompletion(
{
model: 'gpt-3.5-turbo',
messages: [{ role: 'user', content: 'Second' }],
},
context,
),
).rejects.toThrow(McpError);
});
it('should handle null temperature parameter correctly', async () => {
const OpenAI = (await import('openai')).default;
const mockCreate = vi.fn().mockResolvedValue({
id: 'test',
object: 'chat.completion',
created: Date.now(),
model: 'test',
choices: [],
});
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(OpenAI as any).mockImplementation(() => ({
chat: {
completions: {
create: mockCreate,
},
},
}));
provider = new OpenRouterProvider(rateLimiter, config, logger);
const context = requestContextService.createRequestContext({
operation: 'test-null-temp',
});
await provider.chatCompletion(
{
model: 'gpt-3.5-turbo',
messages: [{ role: 'user', content: 'Test' }],
temperature: null,
},
context,
);
// Temperature should be undefined (not passed to API)
const callArgs = mockCreate.mock.calls[0]?.[0];
expect(callArgs?.temperature).toBeUndefined();
});
});
describe('chatCompletionStream', () => {
it('should return an async iterable for streaming responses', async () => {
const mockChunk = {
id: 'chunk-1',
object: 'chat.completion.chunk',
created: Date.now(),
model: 'gpt-3.5-turbo',
choices: [
{
index: 0,
delta: { content: 'Hello' },
finish_reason: null,
logprobs: null,
},
],
};
const OpenAI = (await import('openai')).default;
const mockAsyncIterator = (async function* () {
yield mockChunk;
})();
const mockCreate = vi.fn().mockReturnValue(mockAsyncIterator);
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(OpenAI as any).mockImplementation(() => ({
chat: {
completions: {
create: mockCreate,
},
},
}));
provider = new OpenRouterProvider(rateLimiter, config, logger);
const context = requestContextService.createRequestContext({
operation: 'test-stream',
});
const stream = await provider.chatCompletionStream(
{
model: 'gpt-3.5-turbo',
messages: [{ role: 'user', content: 'Stream test' }],
},
context,
);
const chunks = [];
for await (const chunk of stream) {
chunks.push(chunk);
}
expect(chunks.length).toBeGreaterThan(0);
expect(chunks[0]).toEqual(mockChunk);
});
it('should set stream parameter to true automatically', async () => {
const OpenAI = (await import('openai')).default;
const mockCreate = vi.fn().mockReturnValue(
(async function* () {
yield {
id: 'test',
object: 'chat.completion.chunk',
created: Date.now(),
model: 'test',
choices: [],
};
})(),
);
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(OpenAI as any).mockImplementation(() => ({
chat: {
completions: {
create: mockCreate,
},
},
}));
provider = new OpenRouterProvider(rateLimiter, config, logger);
const context = requestContextService.createRequestContext({
operation: 'test-stream-param',
});
await provider.chatCompletionStream(
{
model: 'gpt-3.5-turbo',
messages: [{ role: 'user', content: 'Test' }],
},
context,
);
expect(mockCreate).toHaveBeenCalledWith(
expect.objectContaining({
stream: true,
}),
);
});
});
});