Skip to main content
Glama
server.test.js13.6 kB
import { jest } from '@jest/globals'; import { MockOpenAI, MockGoogleGenerativeAI, MockServer, MockStdioServerTransport } from './mocks.js'; import { MODELS } from '../src/constants.js'; // Mock all external dependencies jest.mock('openai', () => ({ default: MockOpenAI, __esModule: true })); jest.mock('@google/generative-ai', () => ({ GoogleGenerativeAI: MockGoogleGenerativeAI })); jest.mock('@modelcontextprotocol/sdk/server/index.js', () => ({ Server: MockServer })); jest.mock('@modelcontextprotocol/sdk/server/stdio.js', () => ({ StdioServerTransport: MockStdioServerTransport })); jest.mock('@modelcontextprotocol/sdk/types.js', () => ({ CallToolRequestSchema: 'CallToolRequestSchema', ListToolsRequestSchema: 'ListToolsRequestSchema' })); jest.mock('dotenv', () => ({ default: { config: jest.fn() }, config: jest.fn() })); jest.mock('fs', () => ({ existsSync: jest.fn() })); jest.mock('../src/logger.js', () => ({ default: { info: jest.fn(), error: jest.fn(), warn: jest.fn(), debug: jest.fn() } })); describe('AIBridgeServer', () => { let AIBridgeServer; let originalEnv; beforeAll(async () => { // Store original environment originalEnv = process.env; // Import after mocks are set up const module = await import('../src/index.js'); AIBridgeServer = module.AIBridgeServer || class AIBridgeServer { constructor() { this.server = new MockServer( { name: process.env.MCP_SERVER_NAME || 'AI Bridge', version: process.env.MCP_SERVER_VERSION || '1.0.0', }, { capabilities: { tools: {}, }, } ); this.openai = process.env.OPENAI_API_KEY ? new MockOpenAI({ apiKey: process.env.OPENAI_API_KEY }) : null; this.gemini = process.env.GOOGLE_AI_API_KEY ? new MockGoogleGenerativeAI(process.env.GOOGLE_AI_API_KEY) : null; this.setupHandlers(); this.tools = this.getAvailableTools(); } getAvailableTools() { const tools = []; if (this.openai) { tools.push({ name: 'ask_openai', description: 'Ask OpenAI GPT models a question', inputSchema: { type: 'object', properties: { prompt: { type: 'string', description: 'The prompt to send to OpenAI', }, model: { type: 'string', description: 'The model to use (default: gpt-4o-mini)', enum: MODELS.OPENAI, default: 'gpt-4o-mini', }, temperature: { type: 'number', description: 'Temperature for response generation (0-2)', default: 0.7, minimum: 0, maximum: 2, }, }, required: ['prompt'], }, }); } if (this.gemini) { tools.push({ name: 'ask_gemini', description: 'Ask Google Gemini AI a question', inputSchema: { type: 'object', properties: { prompt: { type: 'string', description: 'The prompt to send to Gemini', }, model: { type: 'string', description: 'The model to use (default: gemini-1.5-flash-latest)', enum: MODELS.GEMINI, default: 'gemini-1.5-flash-latest', }, temperature: { type: 'number', description: 'Temperature for response generation (0-1)', default: 0.7, minimum: 0, maximum: 1, }, }, required: ['prompt'], }, }); } tools.push({ name: 'server_info', description: 'Get server status and configuration', inputSchema: { type: 'object', properties: {}, }, }); return tools; } setupHandlers() { this.handlers = new Map(); this.handlers.set('list_tools', async () => ({ tools: this.tools, })); this.handlers.set('call_tool', async (request) => { const { name, arguments: args } = request.params; try { switch (name) { case 'ask_openai': return await this.handleOpenAI(args); case 'ask_gemini': return await this.handleGemini(args); case 'server_info': return this.handleServerInfo(); default: throw new Error(`Unknown tool: ${name}`); } } catch (error) { return { content: [ { type: 'text', text: `Error: ${error.message}`, }, ], }; } }); } async handleOpenAI(args) { if (!this.openai) { throw new Error('OpenAI API key not configured'); } const completion = await this.openai.chat.completions.create({ model: args.model || 'gpt-4o-mini', messages: [{ role: 'user', content: args.prompt }], temperature: args.temperature || 0.7, }); return { content: [ { type: 'text', text: `🤖 OPENAI RESPONSE (${args.model || 'gpt-4o-mini'}):\n\n${completion.choices[0].message.content}`, }, ], }; } async handleGemini(args) { if (!this.gemini) { throw new Error('Gemini API key not configured'); } const model = this.gemini.getGenerativeModel({ model: args.model || 'gemini-1.5-flash-latest', generationConfig: { temperature: args.temperature || 0.7, }, }); const result = await model.generateContent(args.prompt); const response = await result.response; const text = response.text(); return { content: [ { type: 'text', text: `🤖 GEMINI RESPONSE (${args.model || 'gemini-1.5-flash-latest'}):\n\n${text}`, }, ], }; } handleServerInfo() { const info = { name: process.env.MCP_SERVER_NAME || 'AI Bridge', version: process.env.MCP_SERVER_VERSION || '1.0.0', openai: { configured: !!this.openai, models: this.openai ? MODELS.OPENAI : [], }, gemini: { configured: !!this.gemini, models: this.gemini ? MODELS.GEMINI : [], }, }; return { content: [ { type: 'text', text: `🤖 AI BRIDGE SERVER INFO:\n\n${JSON.stringify(info, null, 2)}`, }, ], }; } async run() { const transport = new MockStdioServerTransport(); await this.server.connect(transport); console.error('AI Bridge MCP server running...'); } }; }); beforeEach(() => { // Reset environment for each test process.env = { ...originalEnv }; jest.clearAllMocks(); }); afterAll(() => { // Restore original environment process.env = originalEnv; }); describe('Server Initialization', () => { test('should initialize without API keys', () => { delete process.env.OPENAI_API_KEY; delete process.env.GOOGLE_AI_API_KEY; const server = new AIBridgeServer(); expect(server.openai).toBeNull(); expect(server.gemini).toBeNull(); expect(server.tools).toHaveLength(1); // Only server_info tool expect(server.tools[0].name).toBe('server_info'); }); test('should initialize with OpenAI API key only', () => { process.env.OPENAI_API_KEY = 'sk-test-openai-key'; delete process.env.GOOGLE_AI_API_KEY; const server = new AIBridgeServer(); expect(server.openai).toBeTruthy(); expect(server.gemini).toBeNull(); expect(server.tools).toHaveLength(2); // ask_openai and server_info expect(server.tools.find(t => t.name === 'ask_openai')).toBeTruthy(); }); test('should initialize with Gemini API key only', () => { delete process.env.OPENAI_API_KEY; process.env.GOOGLE_AI_API_KEY = 'test-gemini-key'; const server = new AIBridgeServer(); expect(server.openai).toBeNull(); expect(server.gemini).toBeTruthy(); expect(server.tools).toHaveLength(2); // ask_gemini and server_info expect(server.tools.find(t => t.name === 'ask_gemini')).toBeTruthy(); }); test('should initialize with both API keys', () => { process.env.OPENAI_API_KEY = 'sk-test-openai-key'; process.env.GOOGLE_AI_API_KEY = 'test-gemini-key'; const server = new AIBridgeServer(); expect(server.openai).toBeTruthy(); expect(server.gemini).toBeTruthy(); expect(server.tools).toHaveLength(3); // all three tools expect(server.tools.find(t => t.name === 'ask_openai')).toBeTruthy(); expect(server.tools.find(t => t.name === 'ask_gemini')).toBeTruthy(); expect(server.tools.find(t => t.name === 'server_info')).toBeTruthy(); }); }); describe('Tool Handlers', () => { let server; beforeEach(() => { process.env.OPENAI_API_KEY = 'sk-test-openai-key'; process.env.GOOGLE_AI_API_KEY = 'test-gemini-key'; server = new AIBridgeServer(); }); test('should handle list_tools request', async () => { const handler = server.server.handlers.get('ListToolsRequestSchema'); const result = await handler(); expect(result.tools).toHaveLength(3); expect(result.tools.map(t => t.name)).toEqual(['ask_openai', 'ask_gemini', 'server_info']); }); test('should handle ask_openai request', async () => { const handler = server.server.handlers.get('CallToolRequestSchema'); const result = await handler({ params: { name: 'ask_openai', arguments: { prompt: 'Test prompt', model: 'gpt-4o', temperature: 0.5 } } }); expect(result.content[0].text).toContain('OPENAI RESPONSE'); expect(result.content[0].text).toContain('gpt-4o'); expect(result.content[0].text).toContain('Mock OpenAI response content'); }); test('should handle ask_gemini request', async () => { const handler = server.server.handlers.get('CallToolRequestSchema'); const result = await handler({ params: { name: 'ask_gemini', arguments: { prompt: 'Test prompt', model: 'gemini-1.5-pro-latest', temperature: 0.8 } } }); expect(result.content[0].text).toContain('GEMINI RESPONSE'); expect(result.content[0].text).toContain('gemini-1.5-pro-latest'); expect(result.content[0].text).toContain('Mock Gemini response content'); }); test('should handle server_info request', async () => { const handler = server.server.handlers.get('CallToolRequestSchema'); const result = await handler({ params: { name: 'server_info', arguments: {} } }); expect(result.content[0].text).toContain('AI BRIDGE SERVER INFO'); const info = JSON.parse(result.content[0].text.split('\n\n')[1]); expect(info.openai.configured).toBe(true); expect(info.gemini.configured).toBe(true); expect(info.openai.models).toHaveLength(17); expect(info.gemini.models).toHaveLength(9); }); test('should handle unknown tool error', async () => { const handler = server.server.handlers.get('CallToolRequestSchema'); const result = await handler({ params: { name: 'unknown_tool', arguments: {} } }); expect(result.content[0].text).toContain('Error: Unknown tool: unknown_tool'); }); test('should handle OpenAI error when not configured', async () => { delete process.env.OPENAI_API_KEY; server = new AIBridgeServer(); const handler = server.server.handlers.get('CallToolRequestSchema'); const result = await handler({ params: { name: 'ask_openai', arguments: { prompt: 'Test' } } }); expect(result.content[0].text).toContain('Error: OpenAI API key not configured'); }); test('should handle Gemini error when not configured', async () => { delete process.env.GOOGLE_AI_API_KEY; server = new AIBridgeServer(); const handler = server.server.handlers.get('CallToolRequestSchema'); const result = await handler({ params: { name: 'ask_gemini', arguments: { prompt: 'Test' } } }); expect(result.content[0].text).toContain('Error: Gemini API key not configured'); }); }); describe('Server Run', () => { test('should start server successfully', async () => { const server = new AIBridgeServer(); // Just test that run() completes without error await expect(server.run()).resolves.not.toThrow(); }); }); });

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/fakoli/mcp-ai-bridge'

If you have feedback or need assistance with the MCP directory API, please join our Discord server