Skip to main content
Glama

push_gemini_text

Generate text using Gemini AI and send it as a message to LINE users through the LINE Bot MCP Server.

Instructions

Generate text with Gemini and push it to a LINE user as a text message.

Input Schema

TableJSON Schema
NameRequiredDescriptionDefault
userIdNoThe user ID to receive a message. Defaults to DESTINATION_USER_ID.U1a2b3c4d5e6f7g8h9i0j1k2l3m4n5o6p
promptYesPrompt to send to Gemini
modelNoGemini model name, e.g., gemini-1.5-flashgemini-1.5-flash

Implementation Reference

  • The core handler function that invokes the Gemini generative AI API (trying multiple models and API versions), extracts the generated text, truncates it to LINE's 2000 char limit, and pushes it as a text message to the specified user ID using the LINE Messaging API client.
    }, async ({ userId, prompt, model }) => { if (!userId) { return createErrorResponse(NO_USER_ID_ERROR); } const apiKey = process.env.GEMINI_API_KEY || process.env.GOOGLE_API_KEY; if (!apiKey) { return createErrorResponse( "Please set GEMINI_API_KEY (or GOOGLE_API_KEY) in environment variables.", ); } try { async function callGeminiOnce( modelName: string, apiVersion: "v1" | "v1beta", ) { const endpoint = `https://generativelanguage.googleapis.com/${apiVersion}/models/${encodeURIComponent( modelName, )}:generateContent`; const body = { contents: [ { role: "user", parts: [{ text: prompt }], }, ], }; const res = await fetch(endpoint, { method: "POST", headers: { "Content-Type": "application/json", "X-goog-api-key": apiKey, }, body: JSON.stringify(body), }); return res; } const tryModels: string[] = [model]; if (!model.endsWith("-latest")) tryModels.push(`${model}-latest`); for (const m of [ "gemini-2.0-flash", "gemini-2.0-flash-latest", "gemini-1.5-flash-latest", ]) { if (!tryModels.includes(m)) tryModels.push(m); } let res: Response | undefined; let lastErrorText = ""; for (const m of tryModels) { for (const ver of ["v1", "v1beta"] as const) { res = await callGeminiOnce(m, ver); if (res.ok) { break; } lastErrorText = await res.text(); if (res.status !== 404) break; } if (res?.ok) break; } if (!res || !res.ok) { return createErrorResponse( `Gemini API error: HTTP ${res?.status} ${res?.statusText} - ${lastErrorText}`, ); } const data = (await res.json()) as GenerateContentResponse; const generated = data?.candidates?.[0]?.content?.parts ?.map(p => p.text || "") .join("") || ""; if (!generated) { return createErrorResponse( data?.error?.message || "Gemini returned empty content.", ); } // LINE text message limit is 2000 chars const textMessage = generated.slice(0, 2000); const response = await this.client.pushMessage({ to: userId, messages: [ { type: "text", text: textMessage, } as unknown as messagingApi.Message, ], }); return createSuccessResponse(response); } catch (error: any) { return createErrorResponse( `Failed to push Gemini text: ${error.message}`, ); } }, );
  • Zod-based input schema definition for the tool parameters: userId (string with default), model (string with default 'gemini-1.5-flash'), and prompt (non-empty string). Used in server.tool call.
    const userIdSchema = z .string() .default(this.destinationId) .describe( "The user ID to receive a message. Defaults to DESTINATION_USER_ID.", ); const modelSchema = z .string() .default("gemini-1.5-flash") .describe("Gemini model name, e.g., gemini-1.5-flash"); const promptSchema = z.string().min(1).describe("Prompt to send to Gemini"); server.tool( "push_gemini_text", "Generate text with Gemini and push it to a LINE user as a text message.", { userId: userIdSchema, prompt: promptSchema, model: modelSchema, },
  • The register method in the PushGeminiText class that registers the 'push_gemini_text' tool on the MCP server using server.tool, including name, description, schema, and handler.
    register(server: McpServer) { const userIdSchema = z .string() .default(this.destinationId) .describe( "The user ID to receive a message. Defaults to DESTINATION_USER_ID.", ); const modelSchema = z .string() .default("gemini-1.5-flash") .describe("Gemini model name, e.g., gemini-1.5-flash"); const promptSchema = z.string().min(1).describe("Prompt to send to Gemini"); server.tool( "push_gemini_text", "Generate text with Gemini and push it to a LINE user as a text message.", { userId: userIdSchema, prompt: promptSchema, model: modelSchema, }, async ({ userId, prompt, model }) => { if (!userId) { return createErrorResponse(NO_USER_ID_ERROR); } const apiKey = process.env.GEMINI_API_KEY || process.env.GOOGLE_API_KEY; if (!apiKey) { return createErrorResponse( "Please set GEMINI_API_KEY (or GOOGLE_API_KEY) in environment variables.", ); } try { async function callGeminiOnce( modelName: string, apiVersion: "v1" | "v1beta", ) { const endpoint = `https://generativelanguage.googleapis.com/${apiVersion}/models/${encodeURIComponent( modelName, )}:generateContent`; const body = { contents: [ { role: "user", parts: [{ text: prompt }], }, ], }; const res = await fetch(endpoint, { method: "POST", headers: { "Content-Type": "application/json", "X-goog-api-key": apiKey, }, body: JSON.stringify(body), }); return res; } const tryModels: string[] = [model]; if (!model.endsWith("-latest")) tryModels.push(`${model}-latest`); for (const m of [ "gemini-2.0-flash", "gemini-2.0-flash-latest", "gemini-1.5-flash-latest", ]) { if (!tryModels.includes(m)) tryModels.push(m); } let res: Response | undefined; let lastErrorText = ""; for (const m of tryModels) { for (const ver of ["v1", "v1beta"] as const) { res = await callGeminiOnce(m, ver); if (res.ok) { break; } lastErrorText = await res.text(); if (res.status !== 404) break; } if (res?.ok) break; } if (!res || !res.ok) { return createErrorResponse( `Gemini API error: HTTP ${res?.status} ${res?.statusText} - ${lastErrorText}`, ); } const data = (await res.json()) as GenerateContentResponse; const generated = data?.candidates?.[0]?.content?.parts ?.map(p => p.text || "") .join("") || ""; if (!generated) { return createErrorResponse( data?.error?.message || "Gemini returned empty content.", ); } // LINE text message limit is 2000 chars const textMessage = generated.slice(0, 2000); const response = await this.client.pushMessage({ to: userId, messages: [ { type: "text", text: textMessage, } as unknown as messagingApi.Message, ], }); return createSuccessResponse(response); } catch (error: any) { return createErrorResponse( `Failed to push Gemini text: ${error.message}`, ); } }, ); }
  • src/index.ts:71-71 (registration)
    Global registration of the PushGeminiText tool instance on the MCP server in the main entry point, passing the LINE messaging API client and default destination user ID.
    new PushGeminiText(messagingApiClient, destinationId).register(server);
  • TypeScript type definition for the Gemini API generateContent response structure, used to parse the API output.
    type GenerateContentResponse = { candidates?: Array<{ content?: { parts?: Array<{ text?: string }>; }; }>; error?: { message?: string }; };

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/tndfame/mcp_management'

If you have feedback or need assistance with the MCP directory API, please join our Discord server