Skip to main content
Glama

OpenAI MCP Server

by bhjo0930
test_new_tools.jsโ€ข3.73 kB
#!/usr/bin/env node const { OpenAIStdioMCPServer } = require('./dist/server.js'); const { spawn } = require('child_process'); async function testTools() { console.log('๐Ÿงช Testing new OpenAI MCP Server tools...\n'); // Start the server in a child process const serverProcess = spawn('node', ['./dist/server.js'], { stdio: ['pipe', 'pipe', 'pipe'], env: { ...process.env, DEBUG: 'false' } }); let serverOutput = ''; serverProcess.stderr.on('data', (data) => { serverOutput += data.toString(); }); // Wait for server to start await new Promise(resolve => setTimeout(resolve, 1000)); // Test list_tools request const listToolsRequest = { jsonrpc: '2.0', id: 1, method: 'tools/list' }; console.log('๐Ÿ“‹ Testing tools/list...'); serverProcess.stdin.write(JSON.stringify(listToolsRequest) + '\n'); // Wait for response let responseReceived = false; const responseTimeout = setTimeout(() => { if (!responseReceived) { console.log('โŒ Timeout waiting for response'); serverProcess.kill(); } }, 5000); serverProcess.stdout.on('data', (data) => { try { const response = JSON.parse(data.toString().trim()); if (response.id === 1) { responseReceived = true; clearTimeout(responseTimeout); console.log('โœ… Received tools list:'); if (response.result && response.result.tools) { response.result.tools.forEach((tool, index) => { console.log(` ${index + 1}. ${tool.name} - ${tool.description}`); }); console.log(`\n๐Ÿ“Š Total tools: ${response.result.tools.length}`); if (response.result.tools.length >= 5) { console.log('โœ… Successfully added new tools!'); } else { console.log('โš ๏ธ Expected at least 5 tools'); } } else { console.log('โŒ No tools found in response'); } // Test a specific tool console.log('\n๐Ÿ”ง Testing analyze_token_usage tool...'); const tokenAnalysisRequest = { jsonrpc: '2.0', id: 2, method: 'tools/call', params: { name: 'analyze_token_usage', arguments: { text: 'This is a test text for token analysis. It contains several sentences and should give us some useful metrics.', includeOptimization: true } } }; serverProcess.stdin.write(JSON.stringify(tokenAnalysisRequest) + '\n'); } else if (response.id === 2) { console.log('โœ… Token analysis result received:'); if (response.result && response.result.content) { const content = response.result.content[0].text; const lines = content.split('\n').slice(0, 10); // Show first 10 lines lines.forEach(line => { if (line.trim()) console.log(` ${line}`); }); console.log(' ... (truncated)'); } console.log('\n๐ŸŽ‰ All tests completed successfully!'); serverProcess.kill(); } } catch (error) { console.log('Response parsing error:', error.message); console.log('Raw data:', data.toString()); } }); // Handle server exit serverProcess.on('exit', (code) => { console.log(`\n๐Ÿ“Š Server exited with code: ${code}`); if (serverOutput) { console.log('\n๐Ÿ“ Server output:'); console.log(serverOutput); } }); // Cleanup after 10 seconds setTimeout(() => { if (!serverProcess.killed) { console.log('\nโฐ Test timeout, killing server...'); serverProcess.kill(); } }, 10000); } testTools().catch(console.error);

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/bhjo0930/openai_mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server