package tools
import (
"context"
"fmt"
"io"
"os/exec"
"strings"
"sync"
"time"
"github.com/modelcontextprotocol/go-sdk/mcp"
"github.com/sirupsen/logrus"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/version"
"k8s.io/client-go/kubernetes"
k8scontext "mcp-k8swizard/internal/context"
"mcp-k8swizard/internal/k8s"
)
// CacheEntry represents a cached data entry
type CacheEntry struct {
Data interface{}
Timestamp time.Time
TTL time.Duration
}
// K8sTools handles Kubernetes-specific tool operations
type K8sTools struct {
clientManager k8s.ClientManagerInterface
clientCache map[string]*kubernetes.Clientset
cache map[string]CacheEntry
cacheMutex sync.RWMutex
clientMutex sync.RWMutex
cacheExpiry time.Duration
contextManager *k8scontext.Manager
}
// NewK8sTools creates a new instance of K8sTools
func NewK8sTools(cm *k8scontext.Manager) *K8sTools {
return &K8sTools{
clientManager: k8s.NewClientManager(),
clientCache: make(map[string]*kubernetes.Clientset),
cache: make(map[string]CacheEntry),
cacheExpiry: 30 * time.Second, // Default cache TTL
contextManager: cm,
}
}
// getCachedClient returns a cached client or creates a new one
func (kt *K8sTools) getCachedClient(context string) (*kubernetes.Clientset, error) {
// Add nil safety checks
if kt == nil {
return nil, fmt.Errorf("K8sTools instance is nil")
}
if kt.clientCache == nil {
kt.clientMutex.Lock()
kt.clientCache = make(map[string]*kubernetes.Clientset)
kt.clientMutex.Unlock()
}
kt.clientMutex.RLock()
if client, exists := kt.clientCache[context]; exists && client != nil {
kt.clientMutex.RUnlock()
return client, nil
}
kt.clientMutex.RUnlock()
// Create new client
var client *kubernetes.Clientset
var err error
if context != "" {
client, err = kt.clientManager.GetClientWithContext(context)
} else {
client, err = kt.clientManager.GetClient()
}
if err != nil {
return nil, err
}
// Cache the client
kt.clientMutex.Lock()
kt.clientCache[context] = client
kt.clientMutex.Unlock()
return client, nil
}
// getCachedData returns cached data or fetches fresh data
func (kt *K8sTools) getCachedData(key string, ttl time.Duration, fetchFunc func() (interface{}, error)) (interface{}, error) {
// Add nil safety checks
if kt == nil {
return nil, fmt.Errorf("K8sTools instance is nil")
}
if kt.cache == nil {
kt.cacheMutex.Lock()
kt.cache = make(map[string]CacheEntry)
kt.cacheMutex.Unlock()
}
kt.cacheMutex.RLock()
if entry, exists := kt.cache[key]; exists && time.Since(entry.Timestamp) < entry.TTL {
kt.cacheMutex.RUnlock()
return entry.Data, nil
}
kt.cacheMutex.RUnlock()
// Fetch fresh data
data, err := fetchFunc()
if err != nil {
return nil, err
}
// Cache the result
kt.cacheMutex.Lock()
kt.cache[key] = CacheEntry{
Data: data,
Timestamp: time.Now(),
TTL: ttl,
}
kt.cacheMutex.Unlock()
return data, nil
}
// getAllContexts returns all available Kubernetes contexts
func (kt *K8sTools) getAllContexts() ([]string, error) {
// Try to get from cache first
if contexts, err := kt.getCachedData("contexts", 30*time.Second, func() (interface{}, error) {
return kt.fetchContextsFromKubectl()
}); err == nil {
return contexts.([]string), nil
}
return nil, fmt.Errorf("failed to get contexts")
}
// fetchContextsFromKubectl fetches contexts using kubectl command
func (kt *K8sTools) fetchContextsFromKubectl() ([]string, error) {
cmd := exec.Command("kubectl", "config", "get-contexts", "-o", "name")
output, err := cmd.Output()
if err != nil {
return nil, err
}
contexts := strings.Split(strings.TrimSpace(string(output)), "\n")
var result []string
for _, ctx := range contexts {
if ctx != "" {
result = append(result, ctx)
}
}
return result, nil
}
// getEffectiveContext returns the effective context to use
func (kt *K8sTools) getEffectiveContext(providedContext string) string {
if providedContext != "" {
return providedContext
}
if kt.contextManager != nil {
return kt.contextManager.GetCurrentContext()
}
return ""
}
// HandleClusterInfo handles cluster information requests
func (kt *K8sTools) HandleClusterInfo(ctx context.Context, req *mcp.CallToolRequest, args ClusterInfoArgs) (result *mcp.CallToolResult, data any, err error) {
// Add panic recovery
defer func() {
if r := recover(); r != nil {
logrus.Errorf("Panic in HandleClusterInfo: %v", r)
result = &mcp.CallToolResult{
Content: []mcp.Content{
&mcp.TextContent{Text: fmt.Sprintf("Internal error: %v", r)},
},
}
data = nil
err = nil
}
}()
logrus.Info("handleClusterInfo called")
logrus.Debugf("handleClusterInfo called with args: %+v", args)
effectiveContext := kt.getEffectiveContext(args.Context)
client, err := kt.getCachedClient(effectiveContext)
if err != nil {
logrus.Errorf("Failed to create Kubernetes client: %v", err)
return &mcp.CallToolResult{
Content: []mcp.Content{
&mcp.TextContent{Text: fmt.Sprintf("Error connecting to Kubernetes: %v", err)},
},
}, nil, nil
}
// Get cluster version
version, err := client.Discovery().ServerVersion()
if err != nil {
logrus.Errorf("Failed to get server version: %v", err)
return &mcp.CallToolResult{
Content: []mcp.Content{
&mcp.TextContent{Text: fmt.Sprintf("Error getting server version: %v", err)},
},
}, nil, nil
}
// Get nodes
nodes, err := client.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
logrus.Errorf("Failed to list nodes: %v", err)
return &mcp.CallToolResult{
Content: []mcp.Content{
&mcp.TextContent{Text: fmt.Sprintf("Error listing nodes: %v", err)},
},
}, nil, nil
}
// Get namespaces
namespaces, err := client.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
if err != nil {
logrus.Errorf("Failed to list namespaces: %v", err)
return &mcp.CallToolResult{
Content: []mcp.Content{
&mcp.TextContent{Text: fmt.Sprintf("Error listing namespaces: %v", err)},
},
}, nil, nil
}
info := fmt.Sprintf(`Kubernetes Cluster Information:
Version: %s
Nodes: %d
Namespaces: %d
Node Details:`,
version.GitVersion,
len(nodes.Items),
len(namespaces.Items))
for _, node := range nodes.Items {
info += fmt.Sprintf(`
- %s: %s (%s)`, node.Name, node.Status.Phase, node.Status.NodeInfo.KubeletVersion)
}
info += "\n\nNamespace Details:"
for _, ns := range namespaces.Items {
info += fmt.Sprintf(`
- %s: %s`, ns.Name, ns.Status.Phase)
}
return &mcp.CallToolResult{
Content: []mcp.Content{
&mcp.TextContent{Text: info},
},
}, nil, nil
}
// HandleCheckAllClusters handles parallel checking of all clusters
func (kt *K8sTools) HandleCheckAllClusters(ctx context.Context, req *mcp.CallToolRequest, args CheckAllClustersArgs) (*mcp.CallToolResult, any, error) {
logrus.Info("handleCheckAllClusters called")
logrus.Debugf("handleCheckAllClusters called with args: %+v", args)
// Get all contexts
contexts, err := kt.getAllContexts()
if err != nil {
logrus.Errorf("Failed to get contexts: %v", err)
return &mcp.CallToolResult{
Content: []mcp.Content{
&mcp.TextContent{Text: fmt.Sprintf("Error getting contexts: %v", err)},
},
}, nil, nil
}
// Use goroutines for parallel checking
var wg sync.WaitGroup
results := make(map[string]ClusterInfo, len(contexts))
mutex := &sync.Mutex{}
for _, contextName := range contexts {
wg.Add(1)
go func(ctxName string) {
defer wg.Done()
info, err := kt.getClusterInfoForContext(ctxName)
mutex.Lock()
if err != nil {
results[ctxName] = ClusterInfo{Name: ctxName, Error: err.Error(), Status: "Error"}
} else {
results[ctxName] = info
}
mutex.Unlock()
}(contextName)
}
wg.Wait()
// Format results
formatted := kt.formatClusterResults(results)
return &mcp.CallToolResult{
Content: []mcp.Content{
&mcp.TextContent{Text: formatted},
},
}, nil, nil
}
// getClusterInfoForContext gets cluster info for a specific context
func (kt *K8sTools) getClusterInfoForContext(contextName string) (ClusterInfo, error) {
client, err := kt.getCachedClient(contextName)
if err != nil {
return ClusterInfo{Name: contextName}, err
}
// Use parallel API calls for better performance
var wg sync.WaitGroup
var version *version.Info
var nodes *v1.NodeList
var namespaces *v1.NamespaceList
var versionErr, nodesErr, namespacesErr error
wg.Add(3)
go func() {
defer wg.Done()
version, versionErr = client.Discovery().ServerVersion()
}()
go func() {
defer wg.Done()
nodes, nodesErr = client.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{})
}()
go func() {
defer wg.Done()
namespaces, namespacesErr = client.CoreV1().Namespaces().List(context.Background(), metav1.ListOptions{})
}()
wg.Wait()
// Check for errors
if versionErr != nil {
return ClusterInfo{Name: contextName}, versionErr
}
if nodesErr != nil {
return ClusterInfo{Name: contextName}, nodesErr
}
if namespacesErr != nil {
return ClusterInfo{Name: contextName}, namespacesErr
}
// Count ready nodes
readyNodes := 0
for _, node := range nodes.Items {
if node.Status.Phase == v1.NodeRunning {
readyNodes++
}
}
// Determine status
status := "Healthy"
if readyNodes == 0 {
status = "No Ready Nodes"
} else if readyNodes < len(nodes.Items) {
status = "Partially Ready"
}
return ClusterInfo{
Name: contextName,
Version: version.GitVersion,
Nodes: len(nodes.Items),
Namespaces: len(namespaces.Items),
Status: status,
ReadyNodes: readyNodes,
TotalNodes: len(nodes.Items),
}, nil
}
// formatClusterResults formats the cluster results for display
func (kt *K8sTools) formatClusterResults(results map[string]ClusterInfo) string {
formatted := "## All Clusters Status\n\n"
for contextName, info := range results {
formatted += fmt.Sprintf("### %s\n", contextName)
if info.Error != "" {
formatted += fmt.Sprintf("❌ **Error**: %s\n\n", info.Error)
continue
}
statusIcon := "✅"
if info.Status == "Error" {
statusIcon = "❌"
} else if info.Status == "Partially Ready" {
statusIcon = "⚠️"
}
formatted += fmt.Sprintf("%s **Status**: %s\n", statusIcon, info.Status)
formatted += fmt.Sprintf("📊 **Version**: %s\n", info.Version)
formatted += fmt.Sprintf("🖥️ **Nodes**: %d/%d ready\n", info.ReadyNodes, info.TotalNodes)
formatted += fmt.Sprintf("📁 **Namespaces**: %d\n\n", info.Namespaces)
}
return formatted
}
// HandleListResources handles resource listing requests
func (kt *K8sTools) HandleListResources(ctx context.Context, req *mcp.CallToolRequest, args ListResourcesArgs) (result *mcp.CallToolResult, data any, err error) {
// Add panic recovery
defer func() {
if r := recover(); r != nil {
logrus.Errorf("Panic in HandleListResources: %v", r)
result = &mcp.CallToolResult{
Content: []mcp.Content{
&mcp.TextContent{Text: fmt.Sprintf("Internal error: %v", r)},
},
}
data = nil
err = nil
}
}()
logrus.Info("handleListResources called")
logrus.Debugf("handleListResources called with args: %+v", args)
client, err := kt.getCachedClient(args.Context)
if err != nil {
logrus.Errorf("Failed to create Kubernetes client: %v", err)
return &mcp.CallToolResult{
Content: []mcp.Content{
&mcp.TextContent{Text: fmt.Sprintf("Error connecting to Kubernetes: %v", err)},
},
}, nil, nil
}
// Build list options
listOptions := metav1.ListOptions{}
if args.LabelSelector != "" {
listOptions.LabelSelector = args.LabelSelector
}
if args.Limit > 0 {
listOptions.Limit = int64(args.Limit)
}
// List resources based on kind
var resources interface{}
switch args.Kind {
case "pods":
resources, err = client.CoreV1().Pods(args.Namespace).List(ctx, listOptions)
case "services":
resources, err = client.CoreV1().Services(args.Namespace).List(ctx, listOptions)
case "deployments":
resources, err = client.AppsV1().Deployments(args.Namespace).List(ctx, listOptions)
case "nodes":
resources, err = client.CoreV1().Nodes().List(ctx, listOptions)
case "namespaces":
resources, err = client.CoreV1().Namespaces().List(ctx, listOptions)
case "configmaps":
resources, err = client.CoreV1().ConfigMaps(args.Namespace).List(ctx, listOptions)
case "secrets":
resources, err = client.CoreV1().Secrets(args.Namespace).List(ctx, listOptions)
default:
return &mcp.CallToolResult{
Content: []mcp.Content{
&mcp.TextContent{Text: fmt.Sprintf("Unsupported resource kind: %s", args.Kind)},
},
}, nil, nil
}
if err != nil {
logrus.Errorf("Failed to list %s: %v", args.Kind, err)
return &mcp.CallToolResult{
Content: []mcp.Content{
&mcp.TextContent{Text: fmt.Sprintf("Error listing %s: %v", args.Kind, err)},
},
}, nil, nil
}
formatted := fmt.Sprintf("%s List:\n%+v", args.Kind, resources)
return &mcp.CallToolResult{
Content: []mcp.Content{
&mcp.TextContent{Text: formatted},
},
}, nil, nil
}
// HandleGetResource handles getting specific resource requests
func (kt *K8sTools) HandleGetResource(ctx context.Context, req *mcp.CallToolRequest, args GetResourceArgs) (*mcp.CallToolResult, any, error) {
logrus.Info("handleGetResource called")
logrus.Debugf("handleGetResource called with args: %+v", args)
client, err := kt.getCachedClient(args.Context)
if err != nil {
logrus.Errorf("Failed to create Kubernetes client: %v", err)
return &mcp.CallToolResult{
Content: []mcp.Content{
&mcp.TextContent{Text: fmt.Sprintf("Error connecting to Kubernetes: %v", err)},
},
}, nil, nil
}
// Get resource based on kind
var resource interface{}
switch args.Kind {
case "pod":
resource, err = client.CoreV1().Pods(args.Namespace).Get(ctx, args.Name, metav1.GetOptions{})
case "service":
resource, err = client.CoreV1().Services(args.Namespace).Get(ctx, args.Name, metav1.GetOptions{})
case "deployment":
resource, err = client.AppsV1().Deployments(args.Namespace).Get(ctx, args.Name, metav1.GetOptions{})
case "node":
resource, err = client.CoreV1().Nodes().Get(ctx, args.Name, metav1.GetOptions{})
case "namespace":
resource, err = client.CoreV1().Namespaces().Get(ctx, args.Name, metav1.GetOptions{})
case "configmap":
resource, err = client.CoreV1().ConfigMaps(args.Namespace).Get(ctx, args.Name, metav1.GetOptions{})
case "secret":
resource, err = client.CoreV1().Secrets(args.Namespace).Get(ctx, args.Name, metav1.GetOptions{})
default:
return &mcp.CallToolResult{
Content: []mcp.Content{
&mcp.TextContent{Text: fmt.Sprintf("Unsupported resource kind: %s", args.Kind)},
},
}, nil, nil
}
if err != nil {
logrus.Errorf("Failed to get %s %s: %v", args.Kind, args.Name, err)
return &mcp.CallToolResult{
Content: []mcp.Content{
&mcp.TextContent{Text: fmt.Sprintf("Error getting %s %s: %v", args.Kind, args.Name, err)},
},
}, nil, nil
}
formatted := fmt.Sprintf("%s Details:\n%+v", args.Kind, resource)
return &mcp.CallToolResult{
Content: []mcp.Content{
&mcp.TextContent{Text: formatted},
},
}, nil, nil
}
// HandleGetLogs handles getting pod logs requests
func (kt *K8sTools) HandleGetLogs(ctx context.Context, req *mcp.CallToolRequest, args GetLogsArgs) (*mcp.CallToolResult, any, error) {
logrus.Info("handleGetLogs called")
logrus.Debugf("handleGetLogs called with args: %+v", args)
client, err := kt.getCachedClient(args.Context)
if err != nil {
logrus.Errorf("Failed to create Kubernetes client: %v", err)
return &mcp.CallToolResult{
Content: []mcp.Content{
&mcp.TextContent{Text: fmt.Sprintf("Error connecting to Kubernetes: %v", err)},
},
}, nil, nil
}
// Build log options
logOptions := &v1.PodLogOptions{}
if args.Container != "" {
logOptions.Container = args.Container
}
if args.Tail > 0 {
tailLines := int64(args.Tail)
logOptions.TailLines = &tailLines
}
if args.Since != "" {
if duration, err := time.ParseDuration(args.Since); err == nil {
sinceTime := metav1.NewTime(time.Now().Add(-duration))
logOptions.SinceTime = &sinceTime
} else {
logrus.Warnf("Failed to parse since duration '%s': %v", args.Since, err)
}
}
if args.Follow {
logOptions.Follow = args.Follow
}
// Get logs
logReq := client.CoreV1().Pods(args.Namespace).GetLogs(args.Pod, logOptions)
// Use Do() instead of Stream() for non-streaming logs
if !args.Follow {
podLogs := logReq.Do(ctx)
err = podLogs.Error()
if err != nil {
logrus.Errorf("Failed to get logs: %v", err)
return &mcp.CallToolResult{
Content: []mcp.Content{
&mcp.TextContent{Text: fmt.Sprintf("Error getting logs: %v", err)},
},
}, nil, nil
}
data, err := podLogs.Raw()
if err != nil {
logrus.Errorf("Failed to read logs: %v", err)
return &mcp.CallToolResult{
Content: []mcp.Content{
&mcp.TextContent{Text: fmt.Sprintf("Error reading logs: %v", err)},
},
}, nil, nil
}
return &mcp.CallToolResult{
Content: []mcp.Content{
&mcp.TextContent{Text: string(data)},
},
}, nil, nil
}
// For streaming logs (follow=true), use Stream()
logs, err := logReq.Stream(ctx)
if err != nil {
logrus.Errorf("Failed to stream logs: %v", err)
return &mcp.CallToolResult{
Content: []mcp.Content{
&mcp.TextContent{Text: fmt.Sprintf("Error getting logs: %v", err)},
},
}, nil, nil
}
defer logs.Close()
// Read logs
logContent := ""
buffer := make([]byte, 1024)
for {
n, err := logs.Read(buffer)
if n > 0 {
logContent += string(buffer[:n])
}
if err != nil {
if err != io.EOF {
logrus.Warnf("Error reading log stream: %v", err)
}
break
}
}
return &mcp.CallToolResult{
Content: []mcp.Content{
&mcp.TextContent{Text: fmt.Sprintf("Logs for pod %s in namespace %s:\n\n%s", args.Pod, args.Namespace, logContent)},
},
}, nil, nil
}
// HandleNaturalLanguage handles natural language query requests
func (kt *K8sTools) HandleNaturalLanguage(ctx context.Context, req *mcp.CallToolRequest, args NaturalLanguageArgs) (*mcp.CallToolResult, any, error) {
query := args.Query
response := ""
if contains(query, "pods") || contains(query, "pod") {
response = "I can help you with pod operations. Try asking me to list pods, get pod details, or view pod logs."
} else if contains(query, "services") || contains(query, "service") {
response = "I can help you with service operations. Try asking me to list services or get service details."
} else if contains(query, "deployments") || contains(query, "deployment") {
response = "I can help you with deployment operations. Try asking me to list deployments or get deployment details."
} else if contains(query, "cluster") || contains(query, "info") {
response = "I can help you get cluster information. Try asking me for cluster details."
} else if contains(query, "logs") || contains(query, "log") {
response = "I can help you get logs from pods. Try asking me to get logs from a specific pod."
} else {
response = "I can help you with Kubernetes operations. Try asking me about pods, services, deployments, cluster info, or logs."
}
return &mcp.CallToolResult{
Content: []mcp.Content{
&mcp.TextContent{Text: response},
},
}, nil, nil
}
// HandlePing handles ping requests
func (kt *K8sTools) HandlePing(ctx context.Context, req *mcp.CallToolRequest, args PingArgs) (*mcp.CallToolResult, any, error) {
logrus.Info("handlePing called")
logrus.Debugf("handlePing called with args: %+v", args)
// Test connection
err := kt.clientManager.TestConnection(ctx)
if err != nil {
logrus.Errorf("Failed to test connection: %v", err)
return &mcp.CallToolResult{
Content: []mcp.Content{
&mcp.TextContent{Text: fmt.Sprintf("❌ Connection failed: %v", err)},
},
}, nil, nil
}
// Get cluster version for additional verification
client, err := kt.getCachedClient(args.Context)
if err != nil {
return &mcp.CallToolResult{
Content: []mcp.Content{
&mcp.TextContent{Text: fmt.Sprintf("❌ Failed to create client: %v", err)},
},
}, nil, nil
}
version, err := client.Discovery().ServerVersion()
if err != nil {
return &mcp.CallToolResult{
Content: []mcp.Content{
&mcp.TextContent{Text: fmt.Sprintf("❌ Failed to get cluster version: %v", err)},
},
}, nil, nil
}
return &mcp.CallToolResult{
Content: []mcp.Content{
&mcp.TextContent{Text: fmt.Sprintf("✅ Connection successful!\nKubernetes version: %s", version.String())},
},
}, nil, nil
}
// HandleGetEvents handles getting Kubernetes events
func (kt *K8sTools) HandleGetEvents(ctx context.Context, req *mcp.CallToolRequest, args GetEventsArgs) (*mcp.CallToolResult, any, error) {
logrus.Info("handleGetEvents called")
logrus.Debugf("handleGetEvents called with args: %+v", args)
client, err := kt.getCachedClient(args.Context)
if err != nil {
logrus.Errorf("Failed to create Kubernetes client: %v", err)
return &mcp.CallToolResult{
Content: []mcp.Content{
&mcp.TextContent{Text: fmt.Sprintf("Error connecting to Kubernetes: %v", err)},
},
}, nil, nil
}
// Build list options
listOptions := metav1.ListOptions{}
if args.LabelSelector != "" {
listOptions.LabelSelector = args.LabelSelector
}
if args.FieldSelector != "" {
listOptions.FieldSelector = args.FieldSelector
}
if args.Limit > 0 {
listOptions.Limit = int64(args.Limit)
}
// Get events
logrus.Debugf("Getting events in namespace: %s", args.Namespace)
events, err := client.CoreV1().Events(args.Namespace).List(ctx, listOptions)
if err != nil {
logrus.Errorf("Failed to list events: %v", err)
return &mcp.CallToolResult{
Content: []mcp.Content{
&mcp.TextContent{Text: fmt.Sprintf("Error listing events: %v", err)},
},
}, nil, nil
}
// Format events as readable text
formatted := fmt.Sprintf("Events in namespace '%s':\n", args.Namespace)
if len(events.Items) == 0 {
formatted += "No events found.\n"
} else {
for _, event := range events.Items {
formatted += fmt.Sprintf("- %s: %s (%s) - %s\n",
event.CreationTimestamp.Format("2006-01-02 15:04:05"),
event.Name,
event.Type,
event.Message)
}
}
return &mcp.CallToolResult{
Content: []mcp.Content{
&mcp.TextContent{Text: formatted},
},
}, nil, nil
}
// HandleExecInPod handles executing commands in pods
func (kt *K8sTools) HandleExecInPod(ctx context.Context, req *mcp.CallToolRequest, args ExecInPodArgs) (*mcp.CallToolResult, any, error) {
logrus.Info("handleExecInPod called")
logrus.Debugf("handleExecInPod called with args: %+v", args)
// Build kubectl exec command
cmd := []string{"kubectl", "exec", args.Pod}
if args.Namespace != "" {
cmd = append(cmd, "-n", args.Namespace)
}
if args.Container != "" {
cmd = append(cmd, "-c", args.Container)
}
if args.Stdin {
cmd = append(cmd, "-i")
}
if args.TTY {
cmd = append(cmd, "-t")
}
// Add the command and arguments
cmd = append(cmd, "--", args.Command)
if args.Args != "" {
// Split additional args and append
additionalArgs := strings.Fields(args.Args)
cmd = append(cmd, additionalArgs...)
}
if args.Context != "" {
cmd = append(cmd, "--context", args.Context)
}
// Execute command using kubectl
logrus.Debugf("Executing command in pod: %s", strings.Join(cmd, " "))
// Use kubectl tools to execute the command
kubectlTools := NewKubectlTools()
output, err := kubectlTools.ExecuteKubectlCommand(cmd)
if err != nil {
logrus.Errorf("Failed to execute command in pod: %v", err)
return &mcp.CallToolResult{
Content: []mcp.Content{
&mcp.TextContent{Text: fmt.Sprintf("Error executing command in pod: %v", err)},
},
}, nil, nil
}
return &mcp.CallToolResult{
Content: []mcp.Content{
&mcp.TextContent{Text: fmt.Sprintf("Command output from pod %s:\n\n%s", args.Pod, output)},
},
}, nil, nil
}
// contains checks if a string contains a substring
func contains(s, substr string) bool {
return strings.Contains(s, substr)
}