Skip to main content
Glama
llm.ts1.4 kB
import { pipeline } from '@huggingface/transformers'; // Singleton to hold the pipeline instance let generator: any = null; export async function optimizeContent(content: string, prompt: string): Promise<string> { if (!prompt) { return content; } if (!generator) { console.error('Loading local model (LaMini-Flan-T5-248M)...'); // Using an instruction-tuned model for better prompt adherence generator = await pipeline('text2text-generation', 'Xenova/LaMini-Flan-T5-248M') as any; } // If content is too short, just return it (unless prompt specifically asks to expand, but let's keep it simple) if (content.length < 50) { return content; } try { // Truncate content to avoid token limits (model specific, but ~512-1024 tokens usually) // LaMini-Flan-T5 has a context window. We'll be conservative. const truncatedContent = content.slice(0, 2000); const input = `Instruction: ${prompt}\n\nInput: ${truncatedContent}`; const result = await generator(input, { max_new_tokens: 512, temperature: 0.3, repetition_penalty: 1.2 }); return result[0]?.generated_text || content; } catch (error) { console.error('Error optimizing content:', error); return content; // Fallback to original content on error } }

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/v3nom/toon-fetch'

If you have feedback or need assistance with the MCP directory API, please join our Discord server