#!/usr/bin/env node
/**
* Debug Script: Challenge-Hypothesis Tool
*
* Simulates challenge-hypothesis MCP tool behavior for UI development.
* Starts a mock server, sends a realistic hypothesis for evaluation,
* waits for response, then outputs the formatted result that would be sent to the LLM.
*/
import { MockServer } from './shared/mock-server.js';
import { ResponseSimulator } from './shared/response-simulator.js';
import { mockChallengeHypothesisData, mockResponses } from './shared/mock-data.js';
async function runDebugChallengeHypothesis(): Promise<void> {
const server = new MockServer(3000);
const simulator = new ResponseSimulator(server, {
responseDelay: 5000, // 5 seconds for thoughtful hypothesis evaluation
simulateTyping: false
});
try {
// Start the mock server
await server.start();
console.error('[Debug] Challenge-Hypothesis debug server started');
console.error('[Debug] Open http://localhost:3000 in your browser to see the UI');
console.error('[Debug] Waiting for UI connection...');
// Wait a moment for potential UI connection
await new Promise(resolve => setTimeout(resolve, 2000));
// Create the mock request
const requestId = `challenge-hypothesis-${Date.now()}`;
const mockRequest = {
id: requestId,
type: 'challenge-hypothesis',
data: {
...mockChallengeHypothesisData,
timestamp: new Date().toISOString(),
context: {
...mockChallengeHypothesisData.context,
debugMode: true,
evaluationCriteria: ['feasibility', 'cost', 'risk', 'timeline', 'impact']
}
},
timestamp: new Date().toISOString()
};
// Add the request (this triggers SSE to UI)
server.addRequest(mockRequest);
console.error(`[Debug] Sent challenge-hypothesis request: ${requestId}`);
console.error(`[Debug] Hypothesis: "${mockChallengeHypothesisData.hypothesis.substring(0, 100)}..."`);
// Set up response handler
const responsePromise = new Promise<any>((resolve) => {
server.once('response', (data) => {
resolve(data.response);
});
});
// Auto-simulate response after delay if no UI response
const autoResponsePromise = new Promise<any>(async (resolve) => {
await new Promise(r => setTimeout(r, 12000)); // Wait 12 seconds for hypothesis evaluation
if (server.hasPendingRequests()) {
console.error('[Debug] No UI response received, using mock response');
const mockResponse = await simulator.simulateResponse(mockRequest);
resolve(mockResponse);
}
});
// Wait for either UI response or auto-response
const humanResponse = await Promise.race([responsePromise, autoResponsePromise]);
// Format the final result as it would be sent to LLM
const llmResult = formatChallengeHypothesisResult(mockRequest, humanResponse);
// Output the result to stdout (this is what the LLM would receive)
console.log('='.repeat(80));
console.log('FORMATTED RESULT FOR LLM:');
console.log('='.repeat(80));
console.log(llmResult);
console.log('='.repeat(80));
} catch (error) {
console.error('[Debug] Error:', error);
process.exit(1);
} finally {
await server.stop();
console.error('[Debug] Challenge-Hypothesis debug session completed');
process.exit(0);
}
}
/**
* Format the challenge-hypothesis result as it would be returned to the LLM
*/
function formatChallengeHypothesisResult(request: any, response: any): string {
const hypothesis = request.data.hypothesis;
const context = request.data.context || {};
const evidence = request.data.evidence || [];
const agreement = response.agreement || 4; // 1-7 scale
const reasoning = response.reasoning || 'No reasoning provided';
const confidence = response.confidence || 5; // 1-10 scale
const alternativeApproach = response.alternativeApproach || '';
const additionalContext = response.additionalContext || '';
let result = `# Human Evaluation of Hypothesis\n\n`;
// Original hypothesis
result += `## Hypothesis Under Evaluation\n\n`;
result += `"${hypothesis}"\n\n`;
// Context information
if (Object.keys(context).length > 0) {
result += `## Context\n\n`;
Object.entries(context).forEach(([key, value]) => {
if (key !== 'debugMode' && value) {
result += `- **${key.charAt(0).toUpperCase() + key.slice(1)}:** ${value}\n`;
}
});
result += `\n`;
}
// Evidence (if provided)
if (evidence.length > 0) {
result += `## Supporting Evidence\n\n`;
evidence.forEach((item: string, index: number) => {
result += `${index + 1}. ${item}\n`;
});
result += `\n`;
}
// Agreement level with emoji scale
result += `## Human Assessment\n\n`;
result += `### Agreement Level: ${getAgreementEmoji(agreement)} (${agreement}/7)\n\n`;
result += getAgreementDescription(agreement) + `\n\n`;
// Detailed reasoning
result += `### Reasoning\n\n`;
result += `${reasoning}\n\n`;
// Confidence level
result += `### Confidence in Assessment\n\n`;
const confidenceBar = '█'.repeat(confidence) + '░'.repeat(10 - confidence);
result += `**${confidence}/10** ${confidenceBar}\n\n`;
// Alternative approach if provided
if (alternativeApproach) {
result += `### Alternative Approach Suggested\n\n`;
result += `${alternativeApproach}\n\n`;
}
// Additional context if provided
if (additionalContext) {
result += `### Additional Considerations\n\n`;
result += `${additionalContext}\n\n`;
}
// Decision guidance
result += `## Decision Guidance\n\n`;
if (agreement >= 6) {
result += `✅ **Strong Support** - The human strongly agrees with this hypothesis. Proceed with confidence, but monitor the mentioned risks.\n\n`;
} else if (agreement >= 5) {
result += `👍 **General Agreement** - The human agrees with the hypothesis but has some reservations. Address the concerns raised.\n\n`;
} else if (agreement === 4) {
result += `🤔 **Neutral/Mixed** - The human sees both pros and cons. Carefully weigh the trade-offs before deciding.\n\n`;
} else if (agreement >= 2) {
result += `👎 **Skeptical** - The human has significant concerns about this hypothesis. Consider alternative approaches.\n\n`;
} else {
result += `❌ **Strong Disagreement** - The human strongly disagrees with this hypothesis. Do not proceed without major changes.\n\n`;
}
// Summary metadata
result += `## Response Summary\n\n`;
result += `- **Evaluation Type:** Hypothesis challenge and assessment\n`;
result += `- **Agreement Score:** ${agreement}/7 (${getAgreementLabel(agreement)})\n`;
result += `- **Confidence Level:** ${confidence}/10\n`;
result += `- **Alternative Suggested:** ${alternativeApproach ? 'Yes' : 'No'}\n`;
result += `- **Session:** demo\n`;
result += `- **Timestamp:** ${new Date().toISOString()}\n\n`;
result += `---\n\n`;
result += `**Note:** Use this evaluation to inform your decision-making process. Pay special attention to the reasoning and any alternative approaches suggested.`;
return result;
}
/**
* Get emoji representation for agreement level (1-7 scale)
*/
function getAgreementEmoji(level: number): string {
const emojis = ['😤', '😠', '😞', '😐', '🙂', '😊', '😍'];
return emojis[Math.max(0, Math.min(6, level - 1))] || '😐';
}
/**
* Get text description for agreement level
*/
function getAgreementDescription(level: number): string {
const descriptions = [
'Strongly Disagree',
'Disagree',
'Somewhat Disagree',
'Neutral',
'Somewhat Agree',
'Agree',
'Strongly Agree'
];
return descriptions[Math.max(0, Math.min(6, level - 1))] || 'Neutral';
}
/**
* Get label for agreement level
*/
function getAgreementLabel(level: number): string {
if (level >= 6) return 'Strong Support';
if (level >= 5) return 'General Agreement';
if (level === 4) return 'Neutral/Mixed';
if (level >= 2) return 'Skeptical';
return 'Strong Opposition';
}
// CLI execution
if (require.main === module) {
console.error('[Debug] Starting Challenge-Hypothesis debug session...');
runDebugChallengeHypothesis().catch(error => {
console.error('[Debug] Fatal error:', error);
process.exit(1);
});
}
export { runDebugChallengeHypothesis, formatChallengeHypothesisResult };