chain_of_draft_solve
Solve reasoning problems using a Chain of Draft approach that generates minimal intermediate steps to reduce token usage while maintaining accuracy across domains like math, logic, and code.
Instructions
Solve a reasoning problem using Chain of Draft approach
Input Schema
TableJSON Schema
| Name | Required | Description | Default |
|---|---|---|---|
| problem | Yes | The problem to solve | |
| domain | No | Domain for context (math, logic, code, common-sense, etc.) | |
| max_words_per_step | No | Maximum words per reasoning step | |
| approach | No | Force 'CoD' or 'CoT' approach | |
| enforce_format | No | Whether to enforce the word limit | |
| adaptive_word_limit | No | Adjust word limits based on complexity |
Implementation Reference
- server.py:36-80 (handler)Primary handler for the chain_of_draft_solve MCP tool. Registers the tool and handles execution by delegating to the ChainOfDraftClient.@app.tool() async def chain_of_draft_solve( problem: str, domain: str = "general", max_words_per_step: int = None, approach: str = None, enforce_format: bool = True, adaptive_word_limit: bool = True ) -> str: """Solve a reasoning problem using Chain of Draft approach. Args: problem: The problem to solve domain: Domain for context (math, logic, code, common-sense, etc.) max_words_per_step: Maximum words per reasoning step (default: adaptive) approach: Force "CoD" or "CoT" approach (default: auto-select) enforce_format: Whether to enforce the word limit (default: True) adaptive_word_limit: Adjust word limits based on complexity (default: True) """ # Track execution time start_time = time.time() # Process the request with the client result = await cod_client.solve_with_reasoning( problem=problem, domain=domain, max_words_per_step=max_words_per_step, approach=approach, enforce_format=enforce_format, adaptive_word_limit=adaptive_word_limit ) # Calculate execution time execution_time = (time.time() - start_time) * 1000 # ms # Format the response formatted_response = ( f"Chain of {result['approach']} reasoning ({result['word_limit']} word limit):\n\n" f"{result['reasoning_steps']}\n\n" f"Final answer: {result['final_answer']}\n\n" f"Stats: {result['token_count']} tokens, {execution_time:.0f}ms, " f"complexity score: {result['complexity']}" ) return formatted_response
- client.py:214-307 (helper)Core helper function implementing the Chain of Draft logic: complexity estimation, approach selection (CoD/CoT), prompt generation, LLM invocation, response extraction, format enforcement, and analytics.async def solve_with_reasoning(self, problem, domain="general", **kwargs): """ Solve a problem using the appropriate reasoning approach. Args: problem: The problem text domain: Problem domain (math, code, logic, etc.) **kwargs: Additional parameters and settings Returns: Dictionary with reasoning steps and answer """ start_time = time.time() # Override settings with kwargs local_settings = {**self.settings, **kwargs} # Determine complexity and select approach complexity = await self.complexity_estimator.estimate_complexity(problem, domain) if local_settings.get("approach"): # Manually specified approach approach = local_settings["approach"] approach_reason = "Manually specified" else: # Auto-select based on problem approach, approach_reason = await self.reasoning_selector.select_approach( problem, domain, complexity ) # Determine word limit if local_settings["adaptive_word_limit"] and approach == "CoD": word_limit = complexity # Use estimated complexity as word limit else: word_limit = local_settings["max_words_per_step"] # Get examples examples = await self.example_db.get_examples(domain, approach) # Create prompt based on approach if approach == "CoD": prompt = create_cod_prompt(problem, domain, word_limit, examples) else: prompt = create_cot_prompt(problem, domain, examples) # Generate response from LLM response = await self.client.messages.create( model=local_settings.get("model", "claude-3-5-sonnet-20240620"), max_tokens=local_settings.get("max_tokens", 500), system=prompt["system"], messages=[{"role": "user", "content": prompt["user"]}] ) # Extract reasoning and answer full_response = response.content[0].text parts = full_response.split("####") reasoning = parts[0].strip() answer = parts[1].strip() if len(parts) > 1 else "No clear answer found" # Apply format enforcement if needed if local_settings["enforce_format"] and approach == "CoD": reasoning = self.format_enforcer.enforce_word_limit(reasoning, word_limit) adherence = self.format_enforcer.analyze_adherence(reasoning, word_limit) else: adherence = None # Record analytics if local_settings["track_analytics"]: execution_time = (time.time() - start_time) * 1000 # ms await self.analytics.record_inference( problem=problem, domain=domain, approach=approach, word_limit=word_limit, tokens_used=len(full_response.split()), execution_time=execution_time, reasoning=reasoning, answer=answer, metadata={ "complexity": complexity, "approach_reason": approach_reason, "adherence": adherence } ) return { "reasoning_steps": reasoning, "final_answer": answer, "token_count": len(full_response.split()), "approach": approach, "complexity": complexity, "word_limit": word_limit }
- index.js:424-456 (schema)Input schema definition for the chain_of_draft_solve tool in the JavaScript implementation.const CHAIN_OF_DRAFT_TOOL = { name: "chain_of_draft_solve", description: "Solve a reasoning problem using Chain of Draft approach", inputSchema: { type: "object", properties: { problem: { type: "string", description: "The problem to solve" }, domain: { type: "string", description: "Domain for context (math, logic, code, common-sense, etc.)" }, max_words_per_step: { type: "number", description: "Maximum words per reasoning step" }, approach: { type: "string", description: "Force 'CoD' or 'CoT' approach" }, enforce_format: { type: "boolean", description: "Whether to enforce the word limit" }, adaptive_word_limit: { type: "boolean", description: "Adjust word limits based on complexity" } }, required: ["problem"] }
- index.js:599-614 (handler)Handler block for chain_of_draft_solve tool call in the JavaScript MCP server implementation. Delegates to chainOfDraftClient and formats response.if (name === "chain_of_draft_solve") { const result = await chainOfDraftClient.solveWithReasoning(args); const formattedResponse = `Chain of ${result.approach} reasoning (${result.word_limit} word limit):\n\n` + `${result.reasoning_steps}\n\n` + `Final answer: ${result.final_answer}\n\n` + `Stats: ${result.token_count} tokens, ${result.execution_time_ms.toFixed(0)}ms, ` + `complexity score: ${result.complexity}`; return { content: [{ type: "text", text: formattedResponse }] };
- index.js:308-420 (helper)JavaScript equivalent helper implementing Chain of Draft solving logic, mirroring the Python client.py version.const chainOfDraftClient = { async solveWithReasoning(params) { const { problem, domain = 'general', max_words_per_step = null, approach = null, enforce_format = true, adaptive_word_limit = true } = params; const startTime = Date.now(); // Analyze problem complexity const analysis = complexityEstimator.analyzeProblem(problem, domain); const complexity = analysis.estimated_complexity; // Determine word limit let wordLimit = max_words_per_step; if (!wordLimit && adaptive_word_limit) { wordLimit = complexity; } else if (!wordLimit) { // Default based on domain wordLimit = complexityEstimator.domainBaseLimits[domain] || 5; } // Determine approach (CoD or CoT) const performanceStats = analyticsDb.getPerformanceByDomain(domain); const selectedApproach = approach || reasoningSelector.selectApproach(domain, complexity, performanceStats); // Create prompt based on approach const prompt = selectedApproach === 'CoD' ? createCodPrompt(problem, domain, [], wordLimit) : createCotPrompt(problem, domain, []); // Call Claude const response = await anthropic.messages.create({ model: 'claude-3-sonnet-20240229', max_tokens: 1000, messages: [ { role: 'user', content: prompt } ] }); // Extract reasoning and answer const fullText = response.content[0].text; // Extract final answer (assuming it comes after the reasoning, often starts with "Answer:" or similar) let reasoningSteps = fullText; let finalAnswer = ''; // Common patterns for final answer sections const answerPatterns = [ /(?:Final Answer|Answer|Therefore):?\s*(.*?)$/is, /(?:In conclusion|To conclude|Thus|Hence|So),\s*(.*?)$/is, /(?:The answer is|The result is|The solution is)\s*(.*?)$/is ]; // Try to extract the final answer with each pattern for (const pattern of answerPatterns) { const match = fullText.match(pattern); if (match && match[1]) { finalAnswer = match[1].trim(); reasoningSteps = fullText.substring(0, fullText.indexOf(match[0])).trim(); break; } } // If no pattern matched, just use the last sentence if (!finalAnswer) { const sentences = fullText.split(/[.!?]+\s+/); if (sentences.length > 1) { finalAnswer = sentences.pop().trim(); reasoningSteps = sentences.join('. ') + '.'; } } // Apply format enforcement if needed if (enforce_format && selectedApproach === 'CoD') { reasoningSteps = formatEnforcer.enforceWordLimit(reasoningSteps, wordLimit); } // Calculate execution time const executionTime = Date.now() - startTime; // Estimate token count (rough approximation) const tokenCount = Math.ceil(fullText.length / 4); // Record analytics analyticsDb.addRecord({ problem_id: problem.substring(0, 20), problem_text: problem, domain, approach: selectedApproach, word_limit: wordLimit, tokens_used: tokenCount, execution_time_ms: executionTime, reasoning_steps: reasoningSteps, answer: finalAnswer }); // Return result return { approach: selectedApproach, reasoning_steps: reasoningSteps, final_answer: finalAnswer, token_count: tokenCount, word_limit: wordLimit, complexity: complexity, execution_time_ms: executionTime }; }