#!/usr/bin/env node
import { Server } from "@modelcontextprotocol/sdk/server/index.js";
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
import {
CallToolRequestSchema,
ListToolsRequestSchema,
} from "@modelcontextprotocol/sdk/types.js";
import { TokenMeasurementEngine } from "./TokenMeasurementEngine.js";
import { IncrementalImpactAnalyzer } from "./IncrementalImpactAnalyzer.js";
import { MCPConfigurationAnalyzer } from "./MCPConfigurationAnalyzer.js";
const server = new Server(
{
name: "token-analyzer-mcp",
version: "1.0.0",
},
{
capabilities: {
tools: {},
},
}
);
// Initialize analysis engines
const tokenEngine = new TokenMeasurementEngine();
const impactAnalyzer = new IncrementalImpactAnalyzer();
const configAnalyzer = new MCPConfigurationAnalyzer();
// Tool definitions for token analysis and optimization
server.setRequestHandler(ListToolsRequestSchema, async () => {
return {
tools: [
{
name: "analyze_mcp_servers",
description: "Analyze token consumption of all configured MCP servers with detailed breakdown",
inputSchema: {
type: "object",
properties: {
include_incremental: {
type: "boolean",
description: "Include incremental impact analysis showing cumulative effect",
default: true
},
include_recommendations: {
type: "boolean",
description: "Include optimization recommendations",
default: true
}
},
additionalProperties: false
}
},
{
name: "count_text_tokens",
description: "Count tokens in arbitrary text using character-based approximation",
inputSchema: {
type: "object",
properties: {
text: {
type: "string",
description: "Text to count tokens for"
}
},
required: ["text"],
additionalProperties: false
}
},
{
name: "analyze_context_efficiency",
description: "Evaluate total context window usage and overhead percentage",
inputSchema: {
type: "object",
properties: {
window_size: {
type: "number",
description: "Context window size to analyze against (default: 200000)",
default: 200000
}
},
additionalProperties: false
}
},
{
name: "get_mcp_configuration",
description: "Show current MCP server configuration and status",
inputSchema: {
type: "object",
properties: {},
additionalProperties: false
}
},
{
name: "quick_overhead_estimate",
description: "Fast token overhead estimation without connecting to servers",
inputSchema: {
type: "object",
properties: {},
additionalProperties: false
}
},
{
name: "analyze_file_tokens",
description: "Count tokens in a file or multiple files",
inputSchema: {
type: "object",
properties: {
file_path: {
type: "string",
description: "Path to file to analyze (can be single file or glob pattern)"
},
files: {
type: "array",
items: { type: "string" },
description: "Array of file paths to analyze"
}
},
additionalProperties: false
}
}
]
};
});
server.setRequestHandler(CallToolRequestSchema, async (request) => {
const { name, arguments: args } = request.params;
try {
switch (name) {
case "analyze_mcp_servers": {
// Perform full MCP server analysis
const results: any = await impactAnalyzer.performCompleteAnalysis();
if (results.error) {
return {
content: [{
type: "text",
text: JSON.stringify({
error: results.error,
checked_paths: results.configPaths
}, null, 2)
}],
isError: true
};
}
// Format response based on options
const includeIncremental = args?.include_incremental !== false;
const includeRecommendations = args?.include_recommendations !== false;
const response: any = {
summary: {
total_servers: results.summary.totalServers,
active_servers: results.summary.activeServers,
total_tools: results.summary.totalTools,
total_overhead_tokens: results.overhead.totalOverhead,
context_usage_percentage: results.overhead.overheadPercentage,
remaining_context: results.overhead.remainingContext
},
servers: results.serverResults.map((server: any) => ({
name: server.serverName,
success: server.success,
tools: server.toolCount || 0,
tokens: server.totalTokens || 0,
average_per_tool: server.averageTokensPerTool || 0,
error: server.error
}))
};
if (includeIncremental && results.incrementalAnalysis) {
response.incremental_impact = results.incrementalAnalysis.steps.map((step: any) => ({
server: step.serverName,
tokens_added: step.tokensAdded,
cumulative_tokens: step.cumulativeTokens,
cumulative_percentage: step.cumulativePercentage
}));
}
if (includeRecommendations && results.recommendations) {
response.recommendations = results.recommendations;
}
return {
content: [{
type: "text",
text: JSON.stringify(response, null, 2)
}]
};
}
case "count_text_tokens": {
const text = args?.text as string;
if (!text) {
return {
content: [{
type: "text",
text: JSON.stringify({ error: "No text provided" }, null, 2)
}],
isError: true
};
}
const tokens = tokenEngine.countTokens(text);
const charCount = text.length;
const wordCount = text.split(/\s+/).filter((w: string) => w.length > 0).length;
return {
content: [{
type: "text",
text: JSON.stringify({
tokens,
characters: charCount,
words: wordCount,
avg_chars_per_token: (charCount / tokens).toFixed(2),
method: "character-based approximation (~3.5-4 chars/token)"
}, null, 2)
}]
};
}
case "analyze_context_efficiency": {
const windowSize = (args?.window_size as number) || 200000;
const results: any = await impactAnalyzer.performCompleteAnalysis();
if (results.error) {
return {
content: [{
type: "text",
text: JSON.stringify({ error: results.error }, null, 2)
}],
isError: true
};
}
const efficiency = {
context_window: windowSize,
overhead: {
builtin_tools: results.overhead.builtInTokens,
mcp_servers: results.overhead.mcpTokens,
total: results.overhead.totalOverhead,
percentage: results.overhead.overheadPercentage
},
available: {
tokens: results.overhead.remainingContext,
percentage: (100 - results.overhead.overheadPercentage).toFixed(2)
},
status: results.overhead.overheadPercentage < 5 ? "optimal" :
results.overhead.overheadPercentage < 10 ? "acceptable" :
results.overhead.overheadPercentage < 15 ? "moderate" : "high"
};
return {
content: [{
type: "text",
text: JSON.stringify(efficiency, null, 2)
}]
};
}
case "get_mcp_configuration": {
const config: any = await configAnalyzer.analyzeConfiguration();
return {
content: [{
type: "text",
text: JSON.stringify({
has_configuration: config.hasConfiguration,
config_path: config.configPath,
total_servers: config.totalServers,
servers: config.servers.map((s: any) => ({
name: s.name,
type: s.configType,
disabled: s.disabled || false,
command: s.command
}))
}, null, 2)
}]
};
}
case "quick_overhead_estimate": {
const config: any = await configAnalyzer.analyzeConfiguration();
if (!config.hasConfiguration) {
return {
content: [{
type: "text",
text: JSON.stringify({
error: "No MCP configuration found",
checked_paths: configAnalyzer.configPaths
}, null, 2)
}],
isError: true
};
}
const activeServers = config.servers.filter((s: any) => !s.disabled);
const baseline: any = tokenEngine.measureBaselineTokens();
// Conservative estimate without connecting to servers
const estimatedTokensPerServer = 1500;
const estimatedMcpTokens = activeServers.length * estimatedTokensPerServer;
const totalEstimated = baseline.totalBuiltInTokens + estimatedMcpTokens;
const percentageEstimated = (totalEstimated / 200000) * 100;
return {
content: [{
type: "text",
text: JSON.stringify({
estimate: {
builtin_tools: baseline.totalBuiltInTokens,
active_servers: activeServers.length,
estimated_mcp_tokens: estimatedMcpTokens,
total_estimated: totalEstimated,
percentage: percentageEstimated.toFixed(1)
},
note: "This is a fast estimate. Use 'analyze_mcp_servers' for accurate measurement.",
servers: activeServers.map((s: any) => s.name)
}, null, 2)
}]
};
}
case "analyze_file_tokens": {
const { readFile } = await import('fs/promises');
let filePaths: string[] = [];
if (args?.file_path) {
filePaths = [args.file_path as string];
} else if (args?.files) {
filePaths = args.files as string[];
} else {
return {
content: [{
type: "text",
text: JSON.stringify({ error: "No file path or files provided" }, null, 2)
}],
isError: true
};
}
const results = [];
let totalTokens = 0;
for (const filePath of filePaths) {
try {
const content = await readFile(filePath, 'utf-8');
const tokens = tokenEngine.countTokens(content);
totalTokens += tokens;
results.push({
file: filePath,
tokens,
characters: content.length,
lines: content.split('\n').length
});
} catch (error: any) {
results.push({
file: filePath,
error: error.message
});
}
}
return {
content: [{
type: "text",
text: JSON.stringify({
files: results,
summary: {
total_files: filePaths.length,
successful: results.filter((r: any) => !r.error).length,
total_tokens: totalTokens
}
}, null, 2)
}]
};
}
default:
return {
content: [{
type: "text",
text: JSON.stringify({
error: `Unknown tool: ${name}`,
available_tools: [
"analyze_mcp_servers",
"count_text_tokens",
"analyze_context_efficiency",
"get_mcp_configuration",
"quick_overhead_estimate",
"analyze_file_tokens"
]
}, null, 2)
}],
isError: true
};
}
} catch (error: any) {
return {
content: [{
type: "text",
text: JSON.stringify({
error: `Error executing ${name}: ${error.message}`,
stack: error.stack
}, null, 2)
}],
isError: true
};
}
});
async function main() {
const transport = new StdioServerTransport();
await server.connect(transport);
}
main().catch((error) => {
console.error("Token analyzer server error:", error);
process.exit(1);
});