Skip to main content
Glama
clusterInfo.ts5.85 kB
import { exec } from "child_process"; import util from "util"; const execAsync = util.promisify(exec); async function runCmd(cmd: string): Promise<string> { try { const { stdout } = await execAsync(cmd); return stdout.trim(); } catch (err: any) { return `Error: ${err.message}`; } } export async function clusterInfo() { try { // Cluster basic info const clusterName = await runCmd( "kubectl config view --minify -o jsonpath='{.clusters[0].name}'" ); const apiServer = await runCmd( "kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}'" ); // Cluster version const versionJson = await runCmd("kubectl version -o json"); let clusterVersion: any = {}; try { const v = JSON.parse(versionJson); clusterVersion = { clientVersion: v.clientVersion?.gitVersion || "unknown", serverVersion: v.serverVersion?.gitVersion || "unknown", }; } catch { clusterVersion = { error: "Failed to parse version info" }; } // Node info (size + capacity) const nodesJson = await runCmd("kubectl get nodes -o json"); let nodeSummary: any = {}; let totalCPU = 0; let totalMemory = 0; try { const nodes = JSON.parse(nodesJson).items || []; nodeSummary.totalNodes = nodes.length; nodeSummary.nodes = nodes.map((n: any) => { const cpu = parseInt(n.status.capacity.cpu); const memory = n.status.capacity.memory; // memory is in Ki, Mi, Gi → normalize to Mi let memMi = 0; if (memory.endsWith("Ki")) { memMi = Math.floor(parseInt(memory.replace("Ki", "")) / 1024); } else if (memory.endsWith("Mi")) { memMi = parseInt(memory.replace("Mi", "")); } else if (memory.endsWith("Gi")) { memMi = parseInt(memory.replace("Gi", "")) * 1024; } totalCPU += cpu; totalMemory += memMi; return { name: n.metadata.name, cpu: `${cpu} cores`, memory: `${memMi} Mi`, }; }); nodeSummary.totalCPU = `${totalCPU} cores`; nodeSummary.totalMemory = `${totalMemory} Mi`; } catch { nodeSummary = { error: "Failed to parse node info" }; } // Resource usage (CPU & memory from metrics-server) let resourceUsage: any = {}; const metricsJson = await runCmd("kubectl top nodes -o json"); try { const metrics = JSON.parse(metricsJson).items || []; let usedCPU = 0; let usedMemory = 0; metrics.forEach((m: any) => { // CPU in millicores const cpu = parseInt(m.usage.cpu.replace("n", "")) / 1000000; // convert nano to millicores const mem = m.usage.memory; let memMi = 0; if (mem.endsWith("Ki")) { memMi = Math.floor(parseInt(mem.replace("Ki", "")) / 1024); } else if (mem.endsWith("Mi")) { memMi = parseInt(mem.replace("Mi", "")); } else if (mem.endsWith("Gi")) { memMi = parseInt(mem.replace("Gi", "")) * 1024; } usedCPU += Math.round(cpu / 1000); // cores usedMemory += memMi; }); resourceUsage = { usedCPU: `${usedCPU} cores`, totalCPU: `${totalCPU} cores`, availableCPU: `${totalCPU - usedCPU} cores`, usedMemory: `${usedMemory} Mi`, totalMemory: `${totalMemory} Mi`, availableMemory: `${totalMemory - usedMemory} Mi`, }; } catch { resourceUsage = { error: "Metrics API not available. Install metrics-server." }; } // Pods per namespace const podsJson = await runCmd("kubectl get pods --all-namespaces -o json"); let podSummary: any = {}; try { const pods = JSON.parse(podsJson).items || []; podSummary.totalPods = pods.length; podSummary.byNamespace = {}; pods.forEach((p: any) => { const ns = p.metadata.namespace; podSummary.byNamespace[ns] = (podSummary.byNamespace[ns] || 0) + 1; }); } catch { podSummary = { error: "Failed to parse pod info" }; } // Storage (PVCs + capacity) const pvcJson = await runCmd("kubectl get pvc --all-namespaces -o json"); let pvcSummary: any = {}; try { const pvcs = JSON.parse(pvcJson).items || []; let totalRequested = 0; pvcSummary.totalPVCs = pvcs.length; pvcSummary.claims = pvcs.map((p: any) => { const size = p.spec.resources.requests?.storage || "0Gi"; let sizeGi = 0; if (size.endsWith("Mi")) { sizeGi = Math.ceil(parseInt(size.replace("Mi", "")) / 1024); } else if (size.endsWith("Gi")) { sizeGi = parseInt(size.replace("Gi", "")); } totalRequested += sizeGi; return { namespace: p.metadata.namespace, name: p.metadata.name, storage: size, status: p.status.phase, }; }); pvcSummary.totalRequested = `${totalRequested} Gi`; } catch { pvcSummary = { error: "Failed to parse PVC info" }; } // Final aggregated report const report = { clusterName: clusterName.replace(/'/g, ""), apiServer: apiServer.replace(/'/g, ""), clusterVersion, nodeSummary, resourceUsage, podSummary, pvcSummary, timestamp: new Date().toISOString(), }; return { content: [ { type: "text", text: JSON.stringify(report, null, 2), }, ], }; } catch (err: any) { return { content: [{ type: "text", text: `Error fetching cluster info: ${err.message}` }], }; } } export const clusterInfoSchema = { name: "cluster_info", description: "Get full Kubernetes cluster details: name, API server, version, nodes, CPU/memory usage, pods, PVCs, and usage summary", parameters: {}, };

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/Abinesh0206/kube-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server