import { extractContent } from "../extractContent.js";
import fs from 'fs';
import path from 'path';
import { McpClient } from "./mcpClient.js";
export function createRouteHandlers(planExecutor, llmClient, broadcastFunction = null) {
/* ────────── /api/memory — debug memory endpoint ────────── */
const handleMemoryRoute = async (req, res) => {
try {
const { sessionId, debug = false } = req.body;
// Simple auth check (for now just a debug flag)
if (!debug) {
return res.status(403).json({
error: "Access denied. Set debug: true to access memory endpoint."
});
}
if (!sessionId) {
return res.status(400).json({
error: "sessionId is required for memory debugging"
});
}
console.log(`\n🧠 === MEMORY DEBUG === Session: ${sessionId} ===`);
// Read memory using the MCP client
const memory = await planExecutor.mcpClient.readMemory("MemoryDebugEndpoint");
// Format the memory data for better readability
const debugData = {
memory: memory,
analysis: {
conversationLength: memory.fullContext?.conversationThread?.length || 0,
bufferSize: memory.fullContext?.conversationContext?.buffer?.length || 0,
workflowCount: memory.fullContext?.insights?.workflowHistory?.length || 0,
memoryUpdates: memory.fullContext?.insights?.memoryUpdateCount || 0,
patternCount: memory.fullContext?.insights?.patterns?.length || 0,
topicCount: memory.fullContext?.insights?.keyTopics?.length || 0,
chronologicalToolFlow: memory.fullContext?.currentSession?.chronologicalUseOfTools?.slice(-5) || [], // Show last 5 tools
sessionDuration: Math.round((Date.now() - (memory.fullContext?.currentSession?.startTime || Date.now())) / 1000 / 60) + " minutes",
lastActivity: new Date(memory.fullContext?.metadata?.lastWrite || Date.now()).toISOString()
}
}; // Save to file for persistent debugging
const debugFilePath = path.join(process.cwd(), 'memory_check.json');
const rawFilePath = path.join(process.cwd(), 'memory_check_raw.json');
// Save formatted debug data
fs.writeFileSync(debugFilePath, JSON.stringify(debugData, null, 2), 'utf8');
// Save raw memory data without any processing
fs.writeFileSync(rawFilePath, JSON.stringify(memory, null, 2), 'utf8');
console.log(`✅ Memory debug data saved to: ${debugFilePath}`);
console.log(`📄 Raw memory data saved to: ${rawFilePath}`);
console.log(`📊 Analysis:`);
console.log(` - Conversation Messages: ${debugData.analysis.conversationLength}`);
console.log(` - Buffer Size: ${debugData.analysis.bufferSize}`);
console.log(` - Total Workflows: ${debugData.analysis.workflowCount}`);
console.log(` - Memory Updates: ${debugData.analysis.memoryUpdates}`);
console.log(` - Tool Flow (last 5): [${debugData.analysis.chronologicalToolFlow.join(' → ')}]`);
console.log(` - Session Age: ${debugData.analysis.sessionDuration}`);
console.log(` - Last Activity: ${debugData.analysis.lastActivity}`);
// Return clean response
res.json({
success: true,
message: "Memory debug data saved to memory_check.json and memory_check_raw.json",
sessionId: sessionId,
analysis: debugData.analysis,
dataLocation: {
formatted: debugFilePath,
raw: rawFilePath
},
preview: {
conversationMessages: debugData.memory.fullContext?.conversationThread?.length || 0,
memoryUpdates: debugData.memory.fullContext?.insights?.memoryUpdateCount || 0,
recentTopics: debugData.memory.fullContext?.insights?.keyTopics?.slice(0, 3)?.map(t => t.topic) || [],
lastWorkflow: debugData.memory.fullContext?.insights?.workflowHistory?.slice(-1)[0]?.workflow || null
}
});
} catch (error) {
console.error("❌ Memory debug error:", error);
res.status(500).json({
error: "Memory debug failed",
details: error.message
});
}
};
/* ────────── /api/plan — plan iterator route ────────── */
const handlePlanRoute = async (req, res) => {
try {
const userText = req.body?.text ?? "";
const action = req.body?.action ?? "create"; // create, execute, status
console.log(`\n=== /api/plan (${action}) user:`, userText);
if (action === "create") {
// Create a new execution plan
const planResult = await planExecutor.createExecutionPlan(userText);
res.json({
success: true,
action: "plan_created",
message: "Execution plan created successfully",
planResult: extractContent(planResult.result)
});
} else if (action === "execute") {
// Execute the next step in the current plan
const currentPlan = await planExecutor.readCurrentPlan();
if (!currentPlan) {
return res.json({
success: false,
message: "No execution plan found. Create a plan first."
});
}
const stepResult = await planExecutor.executeNextPlanStep(currentPlan);
res.json({
success: true,
action: "step_executed",
completed: stepResult.completed,
result: stepResult.result,
currentPlan: await planExecutor.readCurrentPlan() // Get updated plan state
});
} else if (action === "status") {
// Get current plan status
const currentPlan = await planExecutor.readCurrentPlan();
res.json({
success: true,
action: "status",
plan: currentPlan
});
} else {
res.json({
success: false,
message: "Invalid action. Use 'create', 'execute', or 'status'"
});
}
} catch (error) {
console.error("Plan iterator error:", error);
res.json({
success: false,
message: "Plan execution failed",
error: error.message
});
}
};
/* ────────── Chat Route Handler ────────── */
async function handleChatRoute(req, res) {
const { message } = req.body;
if (!message) {
return res.status(400).json({ error: "Message is required" });
}
try {
// Check if tools are needed
const toolDecision = await llmClient.needsTools(message);
const needsTools = toolDecision.needsTools || false;
console.log(`🎯 Query analysis: needsTools = ${needsTools} (${toolDecision.reasoning})`);
// Broadcast decision information
if (broadcastFunction) {
broadcastFunction({
type: 'chat_decision',
data: { needsTools, reasoning: toolDecision.reasoning }
});
}
if (!needsTools) {
// Simple streaming response via WebSocket
console.log("📡 Using streaming response via WebSocket...");
// Broadcast streaming start
if (broadcastFunction) {
broadcastFunction({
type: 'chat_stream_start',
data: { message }
});
}
const onChunk = (chunk) => {
console.log("📦 Received chunk:", JSON.stringify(chunk));
// Broadcast each chunk via WebSocket
if (broadcastFunction) {
broadcastFunction({
type: 'chat_chunk',
data: { content: chunk }
});
}
};
const onComplete = (fullResponse) => {
console.log("✅ Streaming complete. Full response:", JSON.stringify(fullResponse));
// Broadcast completion via WebSocket
if (broadcastFunction) {
broadcastFunction({
type: 'chat_complete',
data: { content: fullResponse }
});
}
};
console.log("🚀 Starting streamChat...");
await llmClient.streamChat(message, onChunk, onComplete);
console.log("🏁 streamChat completed");
// Send simple HTTP response
res.json({ success: true, message: "Streaming response sent via WebSocket" });
} else {
// Full agent workflow with tools
console.log("🔧 Using full agent workflow...");
const result = await planExecutor.executeCompleteWorkflow(message);
const response = result?.text || "No response generated";
res.json({ content: response });
}
} catch (error) {
console.error("Chat route error:", error);
res.status(500).json({
error: "An error occurred while processing your request",
details: error.message
});
}
} /* ────────── /api/chat-tools — force tool usage (original behavior) ────────── */
const handleChatToolsRoute = async (req, res) => {
const userText = req.body?.text ?? "";
console.log("\n=== /api/chat-tools (Forced Tool Execution) user:", userText);
const result = await planExecutor.executeCompleteWorkflow(userText);
res.json(result);
};
const handleResourceRoute = async (req, res) => {
try {
// Use the initialized MCP client from planExecutor
const mcpClient = planExecutor.mcpClient;
// Use cached resources if available
const resources = mcpClient.catalog?.resources?.length
? mcpClient.catalog.resources
: await mcpClient.listAll("resources/list");
res.json({ resources });
} catch (error) {
res.status(500).json({ error: error.message });
}
};
return {
handlePlanRoute,
handleChatRoute,
handleChatToolsRoute,
handleResourceRoute,
handleMemoryRoute // Add the new memory debug route
};
}