Skip to main content
Glama

Web-LLM MCP Server

by ragingwind
test.ts3.77 kB
import { spawn, ChildProcess } from 'child_process'; import { fileURLToPath } from 'url'; import { dirname, join } from 'path'; const __filename = fileURLToPath(import.meta.url); const __dirname = dirname(__filename); interface MCPRequest { jsonrpc: '2.0'; id: number; method: string; params: Record<string, unknown>; } interface MCPResponse { jsonrpc: '2.0'; id: number; result?: unknown; error?: { code: number; message: string; }; } async function testMCPServer(): Promise<void> { console.log('Testing Web-LLM Playwright MCP Server...'); const serverProcess: ChildProcess = spawn('tsx', [join(__dirname, 'index.ts')], { stdio: ['pipe', 'pipe', 'inherit'], }); const responses: (MCPResponse | string)[] = []; serverProcess.stdout?.on('data', (data: Buffer) => { const lines = data .toString() .split('\n') .filter((line) => line.trim()); lines.forEach((line) => { try { const response = JSON.parse(line) as MCPResponse; responses.push(response); console.log('Received:', JSON.stringify(response, null, 2)); } catch (e) { console.log('Raw output:', line); responses.push(line); } }); }); // Wait for server to initialize await new Promise((resolve) => setTimeout(resolve, 2000)); // Test initialization request const initRequest: MCPRequest = { jsonrpc: '2.0', id: 1, method: 'initialize', params: { protocolVersion: '2024-11-05', capabilities: {}, clientInfo: { name: 'test-client', version: '1.0.0', }, }, }; console.log('Sending initialize request...'); serverProcess.stdin?.write(JSON.stringify(initRequest) + '\n'); // Wait for response await new Promise((resolve) => setTimeout(resolve, 1000)); // Test list tools request const listToolsRequest: MCPRequest = { jsonrpc: '2.0', id: 2, method: 'tools/list', params: {}, }; console.log('Sending list tools request...'); serverProcess.stdin?.write(JSON.stringify(listToolsRequest) + '\n'); // Wait for response await new Promise((resolve) => setTimeout(resolve, 1000)); // Test status request const statusRequest: MCPRequest = { jsonrpc: '2.0', id: 3, method: 'tools/call', params: { name: 'playwright_llm_status', arguments: {}, }, }; console.log('Sending status request...'); serverProcess.stdin?.write(JSON.stringify(statusRequest) + '\n'); // Wait for response await new Promise((resolve) => setTimeout(resolve, 10000)); // Test simple generation (this will take longer due to model loading) const generateRequest: MCPRequest = { jsonrpc: '2.0', id: 4, method: 'tools/call', params: { name: 'playwright_llm_generate', arguments: { prompt: 'Hello! Please respond with a short greeting.', maxTokens: 50, }, }, }; console.log('Sending generate request (this may take a while for model loading)...'); serverProcess.stdin?.write(JSON.stringify(generateRequest) + '\n'); // Wait longer for generation await new Promise((resolve) => setTimeout(resolve, 30000 * 100)); console.log('Test completed. Terminating server...'); // Send SIGINT to allow graceful shutdown serverProcess.kill('SIGINT'); // Wait a bit for graceful shutdown await new Promise((resolve) => setTimeout(resolve, 2000)); // Force kill if still running if (!serverProcess.killed) { console.log('Force killing server...'); serverProcess.kill('SIGKILL'); } } // Handle cleanup in test process too process.on('SIGINT', () => { console.log('Test interrupted, exiting...'); process.exit(0); }); // Run the test testMCPServer().catch(console.error);

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/ragingwind/web-llm-mcp-server'

If you have feedback or need assistance with the MCP directory API, please join our Discord server