Skip to main content
Glama
pushGeminiFlex.ts6.37 kB
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js"; import { messagingApi } from "@line/bot-sdk"; import { z } from "zod"; import { createErrorResponse, createSuccessResponse, } from "../common/response.js"; import { AbstractTool } from "./AbstractTool.js"; import { NO_USER_ID_ERROR } from "../common/schema/constants.js"; import { flexMessageSchema } from "../common/schema/flexMessage.js"; type GenerateContentResponse = { candidates?: Array<{ content?: { parts?: Array<{ text?: string }> } }>; error?: { message?: string }; }; export default class PushGeminiFlex extends AbstractTool { private client: messagingApi.MessagingApiClient; private destinationId: string; constructor(client: messagingApi.MessagingApiClient, destinationId: string) { super(); this.client = client; this.destinationId = destinationId; } register(server: McpServer) { const userIdSchema = z .string() .default(this.destinationId) .describe( "The user ID to receive a message. Defaults to DESTINATION_USER_ID.", ); const modelSchema = z .string() .default("gemini-2.0-flash") .describe("Gemini model name, e.g., gemini-2.0-flash"); const promptSchema = z .string() .min(1) .describe("Describe the Flex card you want."); const altTextSchema = z .string() .default("Generated card") .describe("Alternative text for Flex message."); server.tool( "push_gemini_flex", "Generate a LINE Flex message (bubble/carousel) from a natural language prompt using Gemini, then push it to a user.", { userId: userIdSchema, prompt: promptSchema, model: modelSchema, altText: altTextSchema, }, async ({ userId, prompt, model, altText }) => { if (!userId) return createErrorResponse(NO_USER_ID_ERROR); const apiKey = process.env.GEMINI_API_KEY || process.env.GOOGLE_API_KEY; if (!apiKey) { return createErrorResponse( "Please set GEMINI_API_KEY (or GOOGLE_API_KEY) in environment variables.", ); } try { async function callGeminiOnce( modelName: string, apiVersion: "v1" | "v1beta", ) { const endpoint = `https://generativelanguage.googleapis.com/${apiVersion}/models/${encodeURIComponent( modelName, )}:generateContent`; const body = { contents: [ { role: "user", parts: [ { text: `You are an assistant that outputs only JSON for LINE Flex Message 'contents'.\n` + `Return a valid 'contents' object (type: 'bubble' or 'carousel'). Do not include markdown or explanations.\n` + `Keep text short. Avoid unsupported fields.\n\n` + `Requirement: ${prompt}`, }, ], }, ], }; const res = await fetch(endpoint, { method: "POST", headers: { "Content-Type": "application/json", "X-goog-api-key": apiKey, }, body: JSON.stringify(body), }); return res; } const tryModels: string[] = [model]; if (!model.endsWith("-latest")) tryModels.push(`${model}-latest`); for (const m of [ "gemini-2.0-flash", "gemini-2.0-flash-latest", "gemini-1.5-flash-latest", ]) { if (!tryModels.includes(m)) tryModels.push(m); } let res: Response | undefined; let lastErr = ""; for (const m of tryModels) { for (const ver of ["v1", "v1beta"] as const) { res = await callGeminiOnce(m, ver); if (res.ok) break; lastErr = await res.text(); if (res.status !== 404) break; } if (res?.ok) break; } if (!res || !res.ok) { return createErrorResponse( `Gemini API error: HTTP ${res?.status} ${res?.statusText} - ${lastErr}`, ); } const data = (await res.json()) as GenerateContentResponse; const raw = data?.candidates?.[0]?.content?.parts ?.map(p => p.text || "") .join("") || ""; if (!raw) return createErrorResponse( data?.error?.message || "Empty result from Gemini", ); // Extract plain JSON even if wrapped with ```json fences or extra text const fence = raw.match(/```(?:json)?\s*([\s\S]*?)```/i); let jsonText = fence ? fence[1].trim() : raw; if (!fence) { const s = raw.indexOf("{"); const e = raw.lastIndexOf("}"); if (s !== -1 && e !== -1 && e > s) jsonText = raw.slice(s, e + 1); } let contents: unknown; try { contents = JSON.parse(jsonText); } catch (e1: any) { // Attempt a light sanitization (remove trailing commas) then parse again try { const sanitized = jsonText.replace(/,\s*([}\]])/g, "$1"); contents = JSON.parse(sanitized); } catch (e2: any) { return createErrorResponse( `Failed to parse Flex contents JSON from Gemini: ${e1?.message || e1}`, ); } } const msg = { type: "flex", altText, contents }; const parsed = flexMessageSchema.safeParse(msg); if (!parsed.success) { return createErrorResponse( `Generated Flex invalid: ${parsed.error.issues.map(i => i.message).join(", ")}`, ); } const response = await this.client.pushMessage({ to: userId, messages: [parsed.data as unknown as messagingApi.Message], }); return createSuccessResponse(response); } catch (error: any) { return createErrorResponse( `Failed to push Gemini Flex: ${error.message}`, ); } }, ); } }

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/tndfame/mcp_management'

If you have feedback or need assistance with the MCP directory API, please join our Discord server