Skip to main content
Glama
server.ts2.35 kB
import { FastMCP } from '@mcp/server'; import { PredictionServiceClient } from '@google-cloud/aiplatform'; import { readFileSync } from 'fs'; // Environment variables are expected to be set before launch const { VERTEX_PROJECT, VERTEX_REGION = 'us-central1', ENDPOINT_27B, ENDPOINT_4B } = process.env; if (!VERTEX_PROJECT || !ENDPOINT_27B || !ENDPOINT_4B) { console.error('Missing one of VERTEX_PROJECT, ENDPOINT_27B or ENDPOINT_4B'); process.exit(1); } const client = new PredictionServiceClient(); const mcp = new FastMCP('medgemma'); async function vertexPredict(endpointId: string, instances: unknown[], parameters?: unknown) { const endpoint = `projects/${VERTEX_PROJECT}/locations/${VERTEX_REGION}/endpoints/${endpointId}`; const [response] = await client.predict({ endpoint, instances, parameters }); return response?.predictions?.[0] ?? null; } /** * Answer a high-level medical question using the MedGemma-27B (text-only) endpoint. * @param question Free-text medical question * @param temperature Sampling temperature (default 0.2) */ // eslint-disable-next-line @typescript-eslint/no-unused-vars mcp.tool(async function answer_med_text(question: string, temperature = 0.2) { const result = await vertexPredict( ENDPOINT_27B as string, [{ prompt: question }], { temperature } ); // Vertex responses vary by model; try to normalise return (result as any)?.content || JSON.stringify(result); }); /** * Interpret a medical image with optional prompt using the MedGemma-4B multimodal endpoint. * @param imageUri HTTPS URL, data URI, or file:// path * @param prompt Optional text prompt * @param temperature Sampling temperature (default 0.1) */ // eslint-disable-next-line @typescript-eslint/no-unused-vars mcp.tool(async function answer_med_image(imageUri: string, prompt = '', temperature = 0.1) { const imageBytes = imageUri.startsWith('file:') ? readFileSync(new URL(imageUri)) : imageUri; const result = await vertexPredict( ENDPOINT_4B as string, [{ image: { bytesBase64Encoded: imageBytes }, prompt }], { temperature } ); return (result as any)?.content || JSON.stringify(result); }); // Start server over HTTP on port 4000 (Claude Desktop-friendly) mcp.start({ httpPort: 4000 }); console.log('MedGemma MCP server listening on http://localhost:4000/mcp');

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/199-mcp/mcp-medical-imaging'

If you have feedback or need assistance with the MCP directory API, please join our Discord server