create_workplan
Generate detailed implementation plans by analyzing your codebase and creating GitHub issues with step-by-step instructions, file modifications, and testing strategies.
Instructions
Creates a GitHub issue with a detailed implementation plan.
This tool will:
Create a GitHub issue immediately with the provided title and description
Launch a background AI process to generate a comprehensive workplan
Update the issue with the generated workplan once complete
The AI will analyze your entire codebase (respecting .gitignore) to create a detailed plan with:
Specific files to modify/create
Code snippets and examples
Step-by-step implementation instructions
Testing strategies
Codebase reasoning modes:
"full": Complete file contents (most comprehensive)
"lsp": Function signatures and docstrings only (lighter weight)
"file_structure": Directory tree only (fastest)
"none": No codebase context
Returns the created issue URL and number immediately.
Input Schema
| Name | Required | Description | Default |
|---|---|---|---|
| title | Yes | ||
| detailed_description | Yes | ||
| codebase_reasoning | No | full | |
| debug | No | ||
| disable_search_grounding | No |
Implementation Reference
- yellhorn_mcp/server.py:240-262 (registration)MCP tool registration decorator defining the create_workplan tool with name, description, and input schema via parameters.@mcp.tool( name="create_workplan", description="""Creates a GitHub issue with a detailed implementation plan. This tool will: 1. Create a GitHub issue immediately with the provided title and description 2. Launch a background AI process to generate a comprehensive workplan 3. Update the issue with the generated workplan once complete The AI will analyze your entire codebase (respecting .gitignore) to create a detailed plan with: - Specific files to modify/create - Code snippets and examples - Step-by-step implementation instructions - Testing strategies Codebase reasoning modes: - "full": Complete file contents (most comprehensive) - "lsp": Function signatures and docstrings only (lighter weight) - "file_structure": Directory tree only (fastest) - "none": No codebase context Returns the created issue URL and number immediately.""", )
- yellhorn_mcp/server.py:263-389 (handler)The main handler function for the create_workplan tool. It creates the GitHub issue immediately, adds a submission metadata comment, launches an asynchronous background task using process_workplan_async for AI generation, and returns the issue URL and number.async def create_workplan( ctx: Context, title: str, detailed_description: str, codebase_reasoning: str = "full", debug: bool = False, disable_search_grounding: bool = False, ) -> str: """Creates a GitHub issue with a detailed implementation plan based on codebase analysis. Args: ctx: Server context. title: Title for the GitHub issue and workplan. detailed_description: Detailed description of what needs to be implemented. codebase_reasoning: Reasoning mode for codebase analysis: - "full": Include complete file contents (most comprehensive) - "lsp": Include only function signatures and docstrings (lighter weight) - "file_structure": Include only directory/file structure (fastest) - "none": No codebase context (relies only on description) debug: If True, adds a comment to the issue with the full prompt used for generation. disable_search_grounding: If True, disables Google Search Grounding for this request. Returns: JSON string containing the issue URL and number. Raises: YellhornMCPError: If issue creation fails. """ try: repo_path: Path = ctx.request_context.lifespan_context["repo_path"] # Handle search grounding override if specified original_search_grounding = ctx.request_context.lifespan_context.get( "use_search_grounding", True ) if disable_search_grounding: ctx.request_context.lifespan_context["use_search_grounding"] = False await ctx.log( level="info", message="Search grounding temporarily disabled for this request", ) # Create the GitHub issue first issue_data = await create_github_issue(repo_path, title, detailed_description) issue_number = issue_data["number"] issue_url = issue_data["url"] await ctx.log( level="info", message=f"Created GitHub issue #{issue_number}", ) # Extract URLs from the description submitted_urls = extract_urls(detailed_description) # Add submission comment submission_metadata = SubmissionMetadata( status="Generating workplan...", model_name=ctx.request_context.lifespan_context["model"], search_grounding_enabled=ctx.request_context.lifespan_context.get( "use_search_grounding", False ), yellhorn_version=__version__, submitted_urls=submitted_urls if submitted_urls else None, codebase_reasoning_mode=codebase_reasoning, timestamp=datetime.now(timezone.utc), ) submission_comment = format_submission_comment(submission_metadata) await add_issue_comment(repo_path, issue_number, submission_comment) # Skip AI workplan generation if codebase_reasoning is "none" if codebase_reasoning != "none": llm_manager = ctx.request_context.lifespan_context.get("llm_manager") model = ctx.request_context.lifespan_context["model"] reasoning_effort = ctx.request_context.lifespan_context.get("reasoning_effort") # Store codebase_reasoning in context for process_workplan_async ctx.request_context.lifespan_context["codebase_reasoning"] = codebase_reasoning # Launch background task to process the workplan with AI await ctx.log( level="info", message=f"Launching background task to generate workplan with AI model {model}", ) start_time = datetime.now(timezone.utc) asyncio.create_task( process_workplan_async( repo_path, llm_manager, model, title, issue_number, codebase_reasoning, detailed_description, debug=debug, disable_search_grounding=disable_search_grounding, reasoning_effort=reasoning_effort, _meta={ "original_search_grounding": original_search_grounding, "start_time": start_time, "submitted_urls": submitted_urls, }, ctx=ctx, github_command_func=ctx.request_context.lifespan_context.get( "github_command_func" ), git_command_func=ctx.request_context.lifespan_context.get("git_command_func"), ) ) else: await ctx.log( level="info", message="Skipping AI workplan generation (codebase_reasoning='none')", ) # Restore original search grounding setting if modified if disable_search_grounding: ctx.request_context.lifespan_context["use_search_grounding"] = original_search_grounding # Return the issue URL and number as JSON return json.dumps({"issue_url": issue_url, "issue_number": issue_number}) except Exception as e: raise YellhornMCPError(f"Failed to create workplan: {str(e)}")
- Supporting helper function called asynchronously by the handler. It gathers codebase context based on reasoning mode, constructs a detailed prompt for workplan generation, and invokes the LLM via _generate_and_update_issue to generate the content and update the GitHub issue with metrics and citations.async def process_workplan_async( repo_path: Path, llm_manager: LLMManager, model: str, title: str, issue_number: str, codebase_reasoning: str, detailed_description: str, debug: bool = False, disable_search_grounding: bool = False, _meta: dict[str, object] | None = None, ctx: Context | None = None, github_command_func: Callable | None = None, git_command_func: Callable | None = None, reasoning_effort: ReasoningEffort | None = None, ) -> None: """Generate a workplan asynchronously and update the GitHub issue. Args: repo_path: Path to the repository. llm_manager: LLM Manager instance. model: Model name to use (Gemini or OpenAI). title: Title for the workplan. issue_number: GitHub issue number to update. codebase_reasoning: Reasoning mode to use for codebase analysis. detailed_description: Detailed description for the workplan. debug: If True, add a comment with the full prompt used for generation. disable_search_grounding: If True, disables search grounding for this request. _meta: Optional metadata from the caller. ctx: Optional context for logging. github_command_func: Optional GitHub command function (for mocking). git_command_func: Optional Git command function (for mocking). reasoning_effort: Optional reasoning effort to apply for supported models. """ try: # Create a simple logging function that uses ctx if available def context_log(msg: str): if ctx: asyncio.create_task(ctx.log(level="info", message=msg)) # Get codebase info based on reasoning mode # Calculate token limit for codebase context (70% of model's context window) token_counter = TokenCounter() model_limit = token_counter.get_model_limit(model) # Reserve tokens for prompt template, task details, and response # Estimate: prompt template ~1000, task details ~500, safety margin for response ~4000 codebase_token_limit = int((model_limit - 5500) * 0.7) codebase_info, _ = await get_codebase_context( repo_path, codebase_reasoning, context_log, token_limit=codebase_token_limit, model=model, git_command_func=git_command_func, ) # Construct prompt prompt = f"""You are a senior software architect and technical writer. Your task is to output a GitHub-issue–ready **work-plan** that fully complies with the “Strong Work-Plan Rules” and the “Gap-Fix Guidelines” below. The answer you return will be copied verbatim into a GitHub issue, so structure, order and precision matter. CONTEXT ─────────────────── Multi-file snippet of the current repo (trimmed for length) {codebase_info} One-line task title {title} Product / feature description from the PM {detailed_description} GLOBAL TONE & STYLE ─────────────────── • Write as one senior engineer explaining to another. • Zero “TODO”, “placeholder”, or speculative wording—everything must be concrete and actionable. • Be self-sufficient: an unfamiliar engineer can execute the plan end-to-end without additional guidance. • All headings and check-box bullets must render correctly in GitHub Markdown. • Keep line length ≤ 120 characters where feasible. TOP-LEVEL SECTIONS (DO NOT ADD, REMOVE, OR RE-ORDER) ────────────────────────────────────────────────────── ## Summary ## Technical Details ## Architecture ## Completion Criteria & Metrics ## References ## Implementation Steps ## Global Test Strategy ## Files to Modify ## New Files to Create MANDATORY CONTENT PER SECTION ───────────────────────────── ## Summary (≤ 5 sentences) 1 . Problem – one sentence that states the issue or feature. 2 . Proposed solution – what will be built. 3 . Why it matters – business or technical impact. 4 . Success criteria – concise, measurable, single sentence. 5 . Main subsystems touched. ## Technical Details • Languages, frameworks, min versions. • “External Dependencies” sub-section: – List every new third-party package AND specify how it will be introduced (e.g., `pyproject.toml` stanza, Dockerfile line). • Dependency management & pinning strategy (uv, npm, go-mods, etc.). • Build, lint, formatting, type-checking commands. • Logging & observability – logger names, redaction strategy, trace IDs, dashboards. • Analytics/KPIs – event names, schema, when they fire. • Testing frameworks & helpers (mention async helpers or fixtures unique to repo). ## Architecture • “Existing Components Leveraged” bullet list (files / classes). • “New Components Introduced” bullet list (fully enumerated). • Control-flow & data-flow diagram (ASCII or Mermaid). • State-management, retry/fallback, and error-handling patterns (e.g., three-strike fallback). ## Completion Criteria & Metrics • Engineering metrics – latency, SLA, test coverage ≥ X %, lint/type-check clean, etc. • Business metrics – conversion, NPS, error-rate < Y %, etc. • Code-state definition of done – all CI jobs green, new DAG registered, talk-suite passes, docs updated. ## References • Exact repo paths examined – include line numbers or symbols when helpful. • External URLs (one per line). • Commit hashes or tags if specific revisions were read. ## Implementation Steps Break work into atomic tasks suitable for individual PRs. Use the sub-template **verbatim** for every task: ### - [ ] Step <N>: <Concise Title> **Description**: 1–2 sentences. **Files**: list of files created/modified in this step. **Reference(s)**: pointer(s) to rows in “## References”. **Test(s)**: concrete test file names, fixtures/mocks, and the CI command that must pass. Granularity rules: • One node/class/function per step unless trivial. • No mixed concerns (e.g., “Implement X and refactor Y” must be two steps). • Each step’s **Test(s)** must name at least one assertion or expected behaviour. ## Global Test Strategy • Unit, integration, e2e, load – what’s covered where. • How to run locally (`make test`, `python -m pytest`, etc.). • Env-vars / secrets layout (`.env.test`). • Async helpers, fixtures, sandbox accounts. • Coverage enforcement rule (PR fails if coverage < threshold). ## Files to Modify / ## New Files to Create • Use Markdown tables or bullet lists. • For **new files** provide: – One-line purpose. – Stub code block with signature(s). – Required exports (`__all__`) or module wiring. – Note if protobuf, OpenAPI, or YAML specs also added. GAP-FIX GUIDELINES (Always Apply) ──────────────────────────────── 1. ALWAYS describe how dependencies are added/pinned (e.g., `pyproject.toml`, `poetry.lock`). 2. If repo has custom test helpers (e.g., async graph helpers), reference & use them. 3. Call out existing services or models to be injected instead of rebuilt. 4. Explicitly enumerate **every** new component – no omissions. 5. Include retry/fallback/strike logic if part of the design pattern. 6. “Completion Criteria” must state both code-state and operational success metrics. 7. Each Implementation Step must have: references, granular scope, concrete tests. 8. Provide GitHub check-box list ready for copy-paste. 9. If conversational or persona suites are required, add a task for them. PRE-FLIGHT QUALITY GATE (Auto-check before you answer) ─────────────────────────────────────────────────────── ✔ All top-level sections present and in correct order. ✔ “Summary” ≤ 5 sentences and includes Problem + Success criteria. ✔ “Technical Details” contains “External Dependencies” + dependency pinning method. ✔ Architecture lists both Existing & New components. ✔ Completion Criteria includes code-state AND operational metrics. ✔ Implementation Steps use the exact sub-template and include tests. ✔ Global Test Strategy explains commands and coverage enforcement. ✔ New Files section provides stubs and export notes. ✔ No placeholders, “TODO”, or speculative language. ✔ All repo paths / URLs referenced are enumerated in “## References”. IF ANY ITEM IS MISSING, STOP, FIX, AND RE-EMIT THE ENTIRE PLAN. BEGIN OUTPUT ──────────── Return only the GitHub-Markdown for the issue body, starting with “## Summary”. The workplan should be comprehensive enough that a developer or AI assistant could implement it without additional context, and structured in a way that makes it easy for an LLM to quickly understand and work with the contained information. IMPORTANT: Respond *only* with the Markdown content for the GitHub issue body. Do *not* wrap your entire response in a single Markdown code block (```). Start directly with the '## Summary' heading. """ # Add the title as header prefix content_prefix = f"# {title}\n\n" # If not disable_search_grounding, use search grounding if not disable_search_grounding: prompt += ( "Search the internet for latest package versions and describe how to use them." ) # Generate and update issue using the helper await _generate_and_update_issue( repo_path, llm_manager, model, prompt, issue_number, title, content_prefix, disable_search_grounding, debug, codebase_reasoning, _meta, ctx, github_command_func=github_command_func, git_command_func=git_command_func, reasoning_effort=reasoning_effort, ) except Exception as e: detail = _format_exception_message(e) error_msg = ( f"Error processing workplan: {detail}" if detail else "Error processing workplan" ) if ctx: await ctx.log(level="error", message=error_msg) # Try to add error comment to issue try: error_comment = f"❌ **Error generating workplan**\n\n{detail if detail else str(e)}" await add_issue_comment( repo_path, issue_number, error_comment, github_command_func=github_command_func ) except Exception: # If we can't even add a comment, just log if ctx: await ctx.log( level="error", message=f"Failed to add error comment to issue: {str(e)}" )