import { Server } from '@modelcontextprotocol/sdk/server/index.js';
import { CallToolRequestSchema } from '@modelcontextprotocol/sdk/types.js';
/**
* Example: How to use Debug MCP in your AI assistant
*/
// Example 1: Simple bug debugging workflow
async function debugBug(server: Server, bugDescription: string, files: string[]) {
console.log('=== Debugging Workflow Example ===\n');
// Step 1: Analyze the bug
console.log('Step 1: Analyzing bug...');
const analysis = await server.request({
method: 'tools/call',
params: {
name: 'analyze_bug',
arguments: {
bugDescription,
files
}
}
});
console.log('Possible causes:', analysis);
// Step 2: Detect environment for first file
console.log('\nStep 2: Detecting environment...');
const envDetection = await server.request({
method: 'tools/call',
params: {
name: 'detect_environment',
arguments: {
filePath: files[0]
}
}
});
console.log('Environment:', envDetection);
// Step 3: Add debug logs
console.log('\nStep 3: Adding debug logs...');
const injection = await server.request({
method: 'tools/call',
params: {
name: 'add_debug_logs',
arguments: {
filePath: files[0],
insertLine: 25,
logMessage: 'Function execution started',
variables: ['param1', 'param2'],
level: 'info'
}
}
});
console.log('Debug code injected:', injection);
// Step 4: Generate test steps
console.log('\nStep 4: Generating test steps...');
const testSteps = await server.request({
method: 'tools/call',
params: {
name: 'create_test_steps',
arguments: {
bugDescription,
attemptNumber: 1,
modifiedFiles: files,
possibleCauses: ['Possible cause 1', 'Possible cause 2']
}
}
});
console.log('Test steps:', testSteps);
return testSteps;
}
// Example 2: Iterative debugging loop
async function iterativeDebugging(
server: Server,
bugDescription: string,
files: string[],
maxAttempts: number = 5
) {
let attempt = 1;
let fixed = false;
while (attempt <= maxAttempts && !fixed) {
console.log(`\n=== Attempt ${attempt} ===`);
// Add debug logs based on previous analysis
for (const file of files) {
await server.request({
method: 'tools/call',
params: {
name: 'add_debug_logs',
arguments: {
filePath: file,
insertLine: attempt * 10, // Different line each attempt
logMessage: `Debug attempt ${attempt}`,
variables: ['data', 'error'],
level: 'info'
}
}
});
}
// Generate test steps
const testSteps = await server.request({
method: 'tools/call',
params: {
name: 'create_test_steps',
arguments: {
bugDescription,
attemptNumber: attempt,
modifiedFiles: files
}
}
});
// Present to user and get feedback
console.log('\nTest Steps for User:');
console.log(testSteps);
// Simulate user feedback (in real scenario, this comes from user)
const userFeedback = await getUserFeedback();
if (userFeedback === 'fixed') {
fixed = true;
console.log('\n✓ Bug is fixed! Cleaning up...');
// Remove all debug logs
const cleanup = await server.request({
method: 'tools/call',
params: {
name: 'remove_debug_logs',
arguments: {
filePaths: files
}
}
});
console.log('Cleanup complete:', cleanup);
} else {
// Read and analyze debug logs
const logs = await server.request({
method: 'tools/call',
params: {
name: 'read_debug_logs',
arguments: {
lastLines: 100
}
}
});
console.log('\nDebug logs analysis:');
console.log(logs);
// Use this analysis to add more targeted debug logs in next iteration
attempt++;
}
}
return fixed;
}
// Example 3: Manual environment specification
async function manualEnvironmentExample(server: Server) {
// When auto-detection might fail
const result = await server.request({
method: 'tools/call',
params: {
name: 'add_debug_logs',
arguments: {
filePath: 'src/custom-handler.js',
insertLine: 15,
logMessage: 'Custom handler called',
variables: ['event', 'context'],
environment: 'node', // Manually specify environment
level: 'debug'
}
}
});
console.log('Manual environment injection:', result);
}
// Example 4: Listing and previewing debug blocks
async function inspectDebugBlocks(server: Server, filePath: string) {
// List all debug blocks in a file
const blocks = await server.request({
method: 'tools/call',
params: {
name: 'list_debug_blocks',
arguments: {
filePath
}
}
});
console.log(`\nDebug blocks in ${filePath}:`);
console.log(JSON.stringify(blocks, null, 2));
// This can be useful before cleanup to see what will be removed
return blocks;
}
// Example 5: Environment detection for different file types
async function environmentDetectionExample(server: Server) {
const testFiles = [
'src/components/Button.jsx', // Browser/React
'server/api/auth.js', // Node.js
'app/controllers/UserController.php', // PHP
'main.py', // Python
'electron/main.js', // Electron
'miniprogram/pages/index.js' // WeChat Mini Program
];
console.log('=== Environment Detection Examples ===\n');
for (const file of testFiles) {
try {
const detection = await server.request({
method: 'tools/call',
params: {
name: 'detect_environment',
arguments: {
filePath: file
}
}
});
console.log(`${file}:`);
console.log(` Environment: ${detection.environment}`);
console.log(` Confidence: ${detection.confidence}`);
console.log(` Reasoning: ${detection.reasoning}\n`);
} catch (error) {
console.log(`${file}: Detection failed - ${error}\n`);
}
}
}
// Helper function (in real scenario, this would be user input)
async function getUserFeedback(): Promise<'fixed' | 'not-fixed'> {
// Simulated - in real scenario, this would be actual user input
return 'not-fixed';
}
// Export examples
export {
debugBug,
iterativeDebugging,
manualEnvironmentExample,
inspectDebugBlocks,
environmentDetectionExample
};
/**
* REAL-WORLD USAGE EXAMPLE
*
* When a user reports a bug, your AI would:
*
* 1. Analyze the bug description
* 2. Detect environment of relevant files
* 3. Inject appropriate debug logs
* 4. Provide clear test steps
* 5. Wait for user feedback
* 6. If not fixed, analyze logs and add more targeted logs
* 7. Repeat until fixed
* 8. Clean up all debug code
* 9. Summarize the fix
*
* This creates an interactive debugging loop where the AI
* systematically narrows down the issue based on real data.
*/