Skip to main content
Glama

OpenAI MCP Server

by bhjo0930
context-optimizer-tool.ts7.43 kB
import { z } from 'zod'; import { BaseTool, ToolDefinition, ToolResponse } from './base-tool.js'; const ContextOptimizerSchema = z.object({ context: z.string().describe('Context text to optimize'), maxTokens: z.number().describe('Maximum tokens for optimized context'), preservationStrategy: z.enum(['important_first', 'recent_first', 'balanced', 'semantic']) .optional() .describe('Strategy for preserving context'), preserveKeywords: z.array(z.string()).optional().describe('Keywords to preserve') }); export class ContextOptimizerTool extends BaseTool { readonly definition: ToolDefinition = { name: 'optimize_context_window', description: 'Optimize long context for efficient token usage while preserving important information', inputSchema: ContextOptimizerSchema }; async execute(args: z.infer<typeof ContextOptimizerSchema>): Promise<ToolResponse> { try { const originalTokens = this.estimateTokens(args.context); if (originalTokens <= args.maxTokens) { return this.createSuccessResponse( `Context is already within token limit:\n` + `- Original: ${originalTokens} tokens\n` + `- Limit: ${args.maxTokens} tokens\n\n` + `No optimization needed.` ); } const optimizedContext = this.optimizeContext( args.context, args.maxTokens, args.preservationStrategy || 'balanced', args.preserveKeywords || [] ); const optimizedTokens = this.estimateTokens(optimizedContext); const reductionPercent = ((originalTokens - optimizedTokens) / originalTokens * 100).toFixed(1); let result = `Context Optimization Results:\n\n`; result += `📊 **Token Reduction**:\n`; result += `- Original: ${originalTokens} tokens\n`; result += `- Optimized: ${optimizedTokens} tokens\n`; result += `- Reduction: ${reductionPercent}%\n\n`; result += `🔧 **Strategy Used**: ${args.preservationStrategy || 'balanced'}\n\n`; if (args.preserveKeywords && args.preserveKeywords.length > 0) { result += `🔑 **Keywords Preserved**: ${args.preserveKeywords.join(', ')}\n\n`; } result += `📝 **Optimized Context**:\n`; result += `${optimizedContext}`; return this.createSuccessResponse(result); } catch (error) { return this.createErrorResponse(`Context optimization failed: ${error instanceof Error ? error.message : 'Unknown error'}`); } } private optimizeContext( context: string, maxTokens: number, strategy: string, preserveKeywords: string[] ): string { const sentences = this.splitIntoSentences(context); const targetRatio = maxTokens / this.estimateTokens(context); switch (strategy) { case 'important_first': return this.preserveImportantSentences(sentences, targetRatio, preserveKeywords); case 'recent_first': return this.preserveRecentSentences(sentences, targetRatio, preserveKeywords); case 'semantic': return this.preserveSemanticallySimilar(sentences, targetRatio, preserveKeywords); case 'balanced': default: return this.balancedOptimization(sentences, targetRatio, preserveKeywords); } } private splitIntoSentences(text: string): string[] { return text .split(/[.!?]+/) .map(s => s.trim()) .filter(s => s.length > 0) .map(s => s + '.'); } private preserveImportantSentences(sentences: string[], targetRatio: number, keywords: string[]): string { // Score sentences based on importance indicators const scoredSentences = sentences.map(sentence => ({ sentence, score: this.calculateImportanceScore(sentence, keywords) })); // Sort by importance and take top sentences scoredSentences.sort((a, b) => b.score - a.score); const targetCount = Math.floor(sentences.length * targetRatio); return scoredSentences .slice(0, targetCount) .map(item => item.sentence) .join(' '); } private preserveRecentSentences(sentences: string[], targetRatio: number, keywords: string[]): string { const targetCount = Math.floor(sentences.length * targetRatio); // Preserve keyword sentences + recent sentences const keywordSentences = sentences.filter(s => keywords.some(keyword => s.toLowerCase().includes(keyword.toLowerCase())) ); const recentSentences = sentences.slice(-targetCount); const combined = [...keywordSentences, ...recentSentences]; // Remove duplicates and return return [...new Set(combined)].join(' '); } private preserveSemanticallySimilar(sentences: string[], targetRatio: number, keywords: string[]): string { // Simple semantic similarity based on shared words const targetCount = Math.floor(sentences.length * targetRatio); if (keywords.length === 0) { return this.balancedOptimization(sentences, targetRatio, keywords); } const scoredSentences = sentences.map(sentence => ({ sentence, score: this.calculateSemanticScore(sentence, keywords) })); scoredSentences.sort((a, b) => b.score - a.score); return scoredSentences .slice(0, targetCount) .map(item => item.sentence) .join(' '); } private balancedOptimization(sentences: string[], targetRatio: number, keywords: string[]): string { const targetCount = Math.floor(sentences.length * targetRatio); // Combine importance and recency const scoredSentences = sentences.map((sentence, index) => ({ sentence, score: this.calculateImportanceScore(sentence, keywords) + (index / sentences.length) * 0.3 // Recency bonus })); scoredSentences.sort((a, b) => b.score - a.score); return scoredSentences .slice(0, targetCount) .map(item => item.sentence) .join(' '); } private calculateImportanceScore(sentence: string, keywords: string[]): number { let score = 0; const lowerSentence = sentence.toLowerCase(); // Keyword matches keywords.forEach(keyword => { if (lowerSentence.includes(keyword.toLowerCase())) { score += 2; } }); // Important words/phrases const importantIndicators = [ 'important', 'critical', 'key', 'main', 'primary', 'significant', 'major', 'essential', 'crucial', 'note that', 'remember', 'warning', 'caution' ]; importantIndicators.forEach(indicator => { if (lowerSentence.includes(indicator)) { score += 1; } }); // Numbers and data often important if (/\d+/.test(sentence)) { score += 0.5; } // Questions might be important if (sentence.includes('?')) { score += 0.5; } return score; } private calculateSemanticScore(sentence: string, keywords: string[]): number { let score = 0; const words = sentence.toLowerCase().split(/\s+/); keywords.forEach(keyword => { const keywordWords = keyword.toLowerCase().split(/\s+/); keywordWords.forEach(word => { if (words.includes(word)) { score += 1; } }); }); return score; } private estimateTokens(text: string): number { // Simple token estimation: 1 token ≈ 4 characters return Math.ceil(text.length / 4); } }

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/bhjo0930/openai_mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server