"""
LLM-powered task breakdown for large tasks
Uses Claude to decompose overwhelming tasks into manageable subtasks.
"""
import json
from datetime import datetime
from typing import Any
import aiosqlite
async def break_down_task_with_claude(
todo: dict[str, Any],
subtask_count: int = 5,
) -> dict[str, Any]:
"""
Use Claude (via prompt) to break down a large task into smaller subtasks
This function returns a prompt that should be sent to Claude.
The caller (MCP tool) will handle the actual LLM call.
Args:
todo: The todo dict to break down
subtask_count: Target number of subtasks
Returns:
Dict with prompt for LLM
"""
prompt = f"""You are helping someone with ADHD and executive dysfunction break down a large task.
**Original Task:**
Title: {todo['title']}
Priority: {todo.get('priority', 'medium')}
Notes: {todo.get('notes', 'None')}
Current timeframe: {todo.get('timeframe', 'Not set')}
**Your job:**
Break this into {subtask_count} smaller, concrete subtasks that:
1. Each takes under 2 hours to complete
2. Have crystal-clear completion criteria (no ambiguity)
3. Build logically on each other
4. **Start with the EASIEST step first** (lowest activation energy)
5. Avoid perfectionism - aim for "good enough" on each step
**For each subtask, provide:**
- **title**: Specific, actionable (start with a verb)
- **time_estimate_minutes**: 15, 30, 60, or 120
- **energy_required**: "low" (routine/mechanical), "medium" (moderate focus), or "high" (deep work/creative)
- **notes**: What "done" looks like in 1-2 sentences
- **order**: 0-indexed position (0 = first/easiest)
**Important guidelines:**
- The first subtask (order: 0) should be the SMALLEST possible step
- Avoid vague tasks like "Research X" - make them concrete like "Find 3 examples of X and save links"
- If the task involves creation (deck, document), start with outline/structure, not content
- Break "thinking" work into visible artifacts (notes, outlines, drafts)
**Return ONLY valid JSON** (no markdown, no explanation):
[
{{
"title": "Draft 1-page outline of key sections",
"time_estimate_minutes": 30,
"energy_required": "medium",
"notes": "List 5-7 section headings with 1-sentence description each. Don't write content yet.",
"order": 0
}},
...
]
"""
return {"prompt": prompt, "todo": todo}
async def create_subtasks_in_db(
subtasks_data: list[dict[str, Any]],
parent_todo: dict[str, Any],
db: aiosqlite.Connection,
) -> list[int]:
"""
Create subtask todos in database based on breakdown result
Args:
subtasks_data: List of subtask dicts from LLM
parent_todo: The parent todo dict
db: Database connection
Returns:
List of created todo IDs
"""
created_ids = []
# Sort by order
subtasks_data.sort(key=lambda x: x.get("order", 999))
for subtask in subtasks_data:
cursor = await db.execute(
"""
INSERT INTO todos (
title, priority, notes, time_estimate, energy_required,
timeframe, theme_tag, parent_todo_id, task_order, quick, status, created_at
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, 'active', ?)
""",
(
subtask["title"],
parent_todo.get("priority", "medium"),
subtask.get("notes", ""),
subtask["time_estimate_minutes"],
subtask["energy_required"],
parent_todo.get("timeframe"),
parent_todo.get("theme_tag"),
parent_todo["id"],
subtask["order"],
subtask["time_estimate_minutes"] <= 30, # Auto-mark quick if <=30min
datetime.now().isoformat(),
),
)
created_ids.append(cursor.lastrowid)
await db.commit()
# Add context to parent task
context_note = f"[{datetime.now().isoformat()}] Broken down into {len(created_ids)} subtasks. Start with: {subtasks_data[0]['title']}"
await db.execute(
"""
UPDATE todos
SET task_context = COALESCE(task_context || '\n' || ?, ?)
WHERE id = ?
""",
(context_note, context_note, parent_todo["id"]),
)
await db.commit()
return created_ids
def parse_llm_response(response_text: str) -> list[dict[str, Any]]:
"""
Parse LLM response to extract subtask JSON
Handles markdown code blocks and other formatting
"""
# Try to extract JSON from markdown code blocks
if "```json" in response_text:
json_str = response_text.split("```json")[1].split("```")[0].strip()
elif "```" in response_text:
json_str = response_text.split("```")[1].split("```")[0].strip()
else:
json_str = response_text.strip()
try:
subtasks_data = json.loads(json_str)
return subtasks_data
except json.JSONDecodeError as e:
raise ValueError(
f"LLM returned invalid JSON: {e}\n\nResponse: {response_text}"
)