Skip to main content
Glama

mcp-google-sheets

index.ts1.65 kB
import { encoding_for_model } from 'tiktoken'; export const calculateTokensFromString = (string: string, model: string) => { try { const encoder = encoding_for_model(model as any); const tokens = encoder.encode(string); encoder.free(); return tokens.length; } catch (e) { // Model not supported by tiktoken, every 4 chars is a token return Math.round(string.length / 4); } }; export const calculateMessagesTokenSize = async ( messages: string[], model: string ) => { let tokenLength = 0; await Promise.all( messages.map((message: string) => { return new Promise((resolve) => { tokenLength += calculateTokensFromString(message, model); resolve(tokenLength); }); }) ); return tokenLength; }; export const reduceContextSize = async ( messages: string[], model: string, maxTokens: number ) => { // TODO: Summarize context instead of cutoff const cutoffSize = Math.round(messages.length * 0.1); const cutoffMessages = messages.splice(cutoffSize, messages.length - 1); if ( (await calculateMessagesTokenSize(cutoffMessages, model)) > maxTokens / 1.5 ) { reduceContextSize(cutoffMessages, model, maxTokens); } return cutoffMessages; }; export const exceedsHistoryLimit = ( tokenLength: number, model: string, maxTokens: number ) => { if ( tokenLength >= tokenLimit / 1.1 || tokenLength >= (modelTokenLimit(model) - maxTokens) / 1.1 ) { return true; } return false; }; export const tokenLimit = 32000; export const modelTokenLimit = (model: string) => { switch (model) { default: return 2048; } };

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/activepieces/activepieces'

If you have feedback or need assistance with the MCP directory API, please join our Discord server