Skip to main content
Glama

OpenAI MCP Server

by bhjo0930
token-analysis-tool.ts4.37 kB
import { z } from 'zod'; import { BaseTool, ToolDefinition, ToolResponse } from './base-tool.js'; const TokenAnalysisSchema = z.object({ text: z.string().describe('Text to analyze for token usage'), model: z.string().optional().describe('Model to use for token counting (default: gpt-4)'), includeOptimization: z.boolean().optional().describe('Include optimization suggestions') }); export class TokenAnalysisTool extends BaseTool { readonly definition: ToolDefinition = { name: 'analyze_token_usage', description: 'Analyze text for token usage and provide optimization suggestions', inputSchema: TokenAnalysisSchema }; async execute(args: z.infer<typeof TokenAnalysisSchema>): Promise<ToolResponse> { try { const tokenCount = this.estimateTokenCount(args.text, args.model || 'gpt-4'); const analysis = this.analyzeText(args.text); let result = `Token Analysis Results:\n\n`; result += `📊 **Token Count**: ${tokenCount} tokens\n`; result += `📝 **Text Length**: ${args.text.length} characters\n`; result += `⚡ **Efficiency**: ${(args.text.length / tokenCount).toFixed(2)} chars/token\n\n`; result += `📈 **Text Composition**:\n`; result += `- Words: ${analysis.wordCount}\n`; result += `- Sentences: ${analysis.sentenceCount}\n`; result += `- Paragraphs: ${analysis.paragraphCount}\n\n`; if (args.includeOptimization) { const optimizations = this.generateOptimizationSuggestions(args.text, tokenCount); result += `💡 **Optimization Suggestions**:\n${optimizations}\n`; } result += `💰 **Estimated Cost** (GPT-4):\n`; result += `- Input: $${(tokenCount * 0.03 / 1000).toFixed(4)}\n`; result += `- Output (500 tokens): $${(500 * 0.06 / 1000).toFixed(4)}\n`; return this.createSuccessResponse(result); } catch (error) { return this.createErrorResponse(`Token analysis failed: ${error instanceof Error ? error.message : 'Unknown error'}`); } } private estimateTokenCount(text: string, model: string): number { // Simple token estimation (more accurate counting would require tiktoken) // This is a rough approximation: 1 token ≈ 4 characters for English const baseTokens = Math.ceil(text.length / 4); // Adjust based on model type if (model.includes('gpt-4') || model.includes('gpt-5')) { return baseTokens; // More efficient tokenization } else { return Math.ceil(baseTokens * 1.1); // Slightly less efficient } } private analyzeText(text: string) { const wordCount = text.split(/\s+/).filter(word => word.length > 0).length; const sentenceCount = text.split(/[.!?]+/).filter(s => s.trim().length > 0).length; const paragraphCount = text.split(/\n\s*\n/).filter(p => p.trim().length > 0).length; return { wordCount, sentenceCount, paragraphCount }; } private generateOptimizationSuggestions(text: string, tokenCount: number): string { const suggestions: string[] = []; // Check for redundancy if (text.includes('very') || text.includes('really') || text.includes('quite')) { suggestions.push('• Remove filler words like "very", "really", "quite"'); } // Check for repetitive phrases const words = text.split(/\s+/); const wordFreq = new Map<string, number>(); words.forEach(word => { const cleaned = word.toLowerCase().replace(/[^\w]/g, ''); if (cleaned.length > 3) { wordFreq.set(cleaned, (wordFreq.get(cleaned) || 0) + 1); } }); const repeatedWords = Array.from(wordFreq.entries()) .filter(([_, count]) => count > 3) .map(([word, _]) => word); if (repeatedWords.length > 0) { suggestions.push(`• Consider varying repeated words: ${repeatedWords.slice(0, 3).join(', ')}`); } // Check for length if (tokenCount > 1000) { suggestions.push('• Consider breaking into smaller chunks for better processing'); } // Check for structure if (text.split('\n').length === 1 && tokenCount > 100) { suggestions.push('• Add structure with paragraphs or bullet points'); } if (suggestions.length === 0) { suggestions.push('• Text appears well-optimized'); } return suggestions.join('\n'); } }

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/bhjo0930/openai_mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server