parallel_agents
Execute multiple TDD agents in parallel to decompose high-level tasks into atomic subtasks. Agents work in RED then GREEN groups, iterating based on quality gate reviews for reliable code generation.
Instructions
Execute multiple TDD agents in parallel with quality gate iteration. Decomposes high-level tasks into atomic subtasks, executes them in parallel groups (RED before GREEN), and iterates based on quality review.
Input Schema
| Name | Required | Description | Default |
|---|---|---|---|
| task | Yes | High-level task to decompose and execute via TDD workflow | |
| max_parallel | No | Maximum parallel agents (matches GPU slots, default: 2) | |
| iterate_until_quality | No | Whether to iterate on failed quality checks | |
| max_iterations | No | Maximum quality gate iterations (prevents infinite loops) | |
| write_files | No | Write generated code to files in work_directory (default: true). Files are organized by phase (red/green/refactor subdirectories). | |
| work_directory | No | Optional directory for generated files (default: /tmp/parallel-agents-{timestamp}) |
Implementation Reference
- Main entry point for the parallel_agents tool. Orchestrates the full TDD workflow: decomposes high-level tasks, executes subtasks in parallel groups (RED→GREEN→REFACTOR), runs quality gate with retry iterations, and synthesizes results.
async execute(args) { const { task, max_parallel, // Now optional - will auto-detect from router iterate_until_quality = true, max_iterations = 3, work_directory, write_files = true // NEW: Write generated code to files (default: true) } = args; // Store write_files setting for use in executeGroup/executeTask this.writeFilesEnabled = write_files; const startTime = Date.now(); const workDir = work_directory || `/tmp/parallel-agents-${Date.now()}`; try { // Ensure work directory exists if (!fs.existsSync(workDir)) { fs.mkdirSync(workDir, { recursive: true }); } // Dynamic slot detection from llama.cpp router let effectiveMaxParallel = max_parallel; let routerInfo = null; if (!max_parallel) { routerInfo = await getRouterSlotCount(8081); effectiveMaxParallel = routerInfo.slots; console.error(`[ParallelAgents] Auto-detected ${effectiveMaxParallel} slots from router (model: ${routerInfo.model}, ctx: ${routerInfo.context})`); } else { effectiveMaxParallel = max_parallel; } // Safety cap at 10 (reasonable maximum for most setups) effectiveMaxParallel = Math.min(effectiveMaxParallel, 10); // Update ConcurrentRequestManager with actual slot count this.concurrentManager = new ConcurrentRequestManager(effectiveMaxParallel); console.error(`[ParallelAgents] Starting workflow for: ${task.substring(0, 50)}...`); console.error(`[ParallelAgents] Work directory: ${workDir}`); console.error(`[ParallelAgents] Max parallel: ${effectiveMaxParallel}${routerInfo ? ' (auto-detected)' : ''}`); // Stage 1: Decompose task using Worker (Phase 1 learning: not Orchestrator) // Pass slot count so decomposer can batch independent tasks appropriately const decomposed = await this.decompose(task, effectiveMaxParallel); if (decomposed.error) { return this.buildErrorResponse(new Error(decomposed.error), { stage: 'decomposition', raw_output: decomposed.raw }); } // Save decomposition result fs.writeFileSync( path.join(workDir, 'decomposed.json'), JSON.stringify(decomposed, null, 2) ); console.error(`[ParallelAgents] Decomposed into ${this.countTasks(decomposed)} tasks`); // Stage 2: Execute parallel groups sequentially const results = { groups: [], all_outputs: [], task_results: {} }; for (const group of decomposed.parallel_groups || []) { console.error(`[ParallelAgents] Executing group ${group.group}: ${group.name || 'unnamed'}`); // Pass accumulated results so REFACTOR phase can access GREEN outputs const groupResults = await this.executeGroup(group, effectiveMaxParallel, workDir, results.task_results); results.groups.push(groupResults); // Collect outputs for (const output of groupResults.outputs) { results.all_outputs.push(output); if (output.task_id) { results.task_results[output.task_id] = output; } } } // Save execution results fs.writeFileSync( path.join(workDir, 'results.json'), JSON.stringify(results, null, 2) ); // Stage 3: Quality gate with iteration let qualityResult = { verdict: 'skip', score: 0 }; let iteration = 0; if (iterate_until_quality) { while (iteration < max_iterations) { console.error(`[ParallelAgents] Quality review iteration ${iteration + 1}/${max_iterations}`); qualityResult = await this.qualityReview(results); // Save quality result fs.writeFileSync( path.join(workDir, `quality-${iteration}.json`), JSON.stringify(qualityResult, null, 2) ); if (qualityResult.verdict === 'pass') { console.error(`[ParallelAgents] Quality gate PASSED (score: ${qualityResult.score})`); break; } console.error(`[ParallelAgents] Quality gate ITERATE (score: ${qualityResult.score})`); // Retry failed tasks with specific feedback if (qualityResult.retry_tasks && qualityResult.retry_tasks.length > 0) { for (const taskId of qualityResult.retry_tasks) { // Get per-task feedback if available, otherwise use general issues const taskFeedback = qualityResult.task_issues?.[taskId] || qualityResult.issues || []; console.error(`[ParallelAgents] Retrying task: ${taskId} with ${taskFeedback.length} feedback items`); const retryResult = await this.retryTask(taskId, decomposed, results, taskFeedback); if (retryResult) { results.task_results[taskId] = retryResult; results.all_outputs.push(retryResult); } } } iteration++; } if (iteration >= max_iterations && qualityResult.verdict !== 'pass') { console.error(`[ParallelAgents] Max iterations reached, quality gate incomplete`); } } // Stage 4: Synthesize final result const synthesis = await this.synthesize(task, results, qualityResult); // Save synthesis fs.writeFileSync( path.join(workDir, 'synthesis.json'), JSON.stringify(synthesis, null, 2) ); const processingTime = Date.now() - startTime; // Collect all written files from task outputs const allWrittenFiles = results.all_outputs .filter(o => o.files_written && o.files_written.length > 0) .flatMap(o => o.files_written); return this.buildSuccessResponse({ task, decomposition: decomposed, execution: { groups_executed: results.groups.length, tasks_completed: results.all_outputs.filter(o => o.success).length, tasks_failed: results.all_outputs.filter(o => !o.success).length, max_parallel_used: effectiveMaxParallel, slots_auto_detected: !max_parallel, files_written: allWrittenFiles.length, // NEW: Count of files written write_files_enabled: write_files }, router_info: routerInfo || { slots: effectiveMaxParallel, model: 'manual', status: 'specified' }, quality: { verdict: qualityResult.verdict, score: qualityResult.score, iterations: iteration + (iterate_until_quality ? 1 : 0) }, synthesis, files: allWrittenFiles, // NEW: List of all written files work_directory: workDir, processing_time_ms: processingTime, metrics: this.concurrentManager.getMetrics() }); } catch (error) { console.error(`[ParallelAgents] Workflow failed:`, error); return this.buildErrorResponse(error, { work_directory: workDir, processing_time_ms: Date.now() - startTime }); } } - Tool definition and JSON schema for parallel_agents. Defines inputs: task (required), max_parallel, iterate_until_quality, max_iterations, write_files, work_directory.
{ name: 'parallel_agents', description: 'Execute multiple TDD agents in parallel with quality gate iteration. Decomposes high-level tasks into atomic subtasks, executes them in parallel groups (RED before GREEN), and iterates based on quality review.', handler: 'handleParallelAgents', schema: { type: 'object', properties: { task: { type: 'string', description: 'High-level task to decompose and execute via TDD workflow' }, max_parallel: { type: 'integer', default: 2, minimum: 1, maximum: 6, description: 'Maximum parallel agents (matches GPU slots, default: 2)' }, iterate_until_quality: { type: 'boolean', default: true, description: 'Whether to iterate on failed quality checks' }, max_iterations: { type: 'integer', default: 3, minimum: 1, maximum: 5, description: 'Maximum quality gate iterations (prevents infinite loops)' }, write_files: { type: 'boolean', default: true, description: 'Write generated code to files in work_directory (default: true). Files are organized by phase (red/green/refactor subdirectories).' }, work_directory: { type: 'string', description: 'Optional directory for generated files (default: /tmp/parallel-agents-{timestamp})' } }, required: ['task'] } - src/handlers/index.js:51-51 (registration)Registration of ParallelAgentsHandler in the HANDLER_REGISTRY map under the key 'handleParallelAgents', connecting the tool definition to the handler class.
'handleParallelAgents': ParallelAgentsHandler, - Decomposes a high-level task into parallel groups using the tdd-decomposer role, then reorganizes tasks by TDD phase (RED, GREEN, REFACTOR) to maximize parallelism.
async decompose(task, slots) { console.error(`[ParallelAgents] Decomposing task with ${slots} slots available`); try { // Use tdd-decomposer role (routes to Worker per Phase 1 learning) // The role template contains {{SLOTS}} placeholder that SubagentHandler will replace const result = await this.subagentHandler.execute({ role: 'tdd-decomposer', task: `Task: ${task}`, context: { available_slots: slots, slot_replacement: { '{{SLOTS}}': String(slots) } // For template replacement }, verdict_mode: 'full' }); if (!result.success) { return { error: 'Decomposition failed', raw: result.error }; } // Apply JSON repair (Phase 1 learning: LLMs produce malformed JSON) const parsed = this.repairAndParseJSON(result.response); if (parsed.error) { return { error: 'JSON parsing failed', raw: result.response }; } // Reorganize by phase to maximize parallelism // LLMs tend to group by feature; we force phase-based grouping const reorganized = this.reorganizeByPhase(parsed, slots); return reorganized; } catch (error) { console.error(`[ParallelAgents] Decomposition error:`, error); return { error: error.message }; } } - Executes a group of tasks in parallel batches using ConcurrentRequestManager. Each task is executed via executeTask which routes to SubagentHandler with the appropriate role.
async executeGroup(group, maxParallel, workDir, previousResults = {}) { const tasks = group.tasks || []; const outputs = []; // Process in batches if more tasks than slots const batches = []; for (let i = 0; i < tasks.length; i += maxParallel) { batches.push(tasks.slice(i, i + maxParallel)); } for (const batch of batches) { // Execute batch in parallel using ConcurrentRequestManager const promises = batch.map((taskDef, idx) => { const uniqueId = `${group.group}-${taskDef.id || idx}-${Date.now()}`; return this.concurrentManager.executeRequest( this.executeTask(taskDef, uniqueId, null, workDir, previousResults), 'normal' ); }); const batchResults = await Promise.allSettled(promises); for (const result of batchResults) { if (result.status === 'fulfilled') { outputs.push(result.value); } else { outputs.push({ success: false, error: result.reason?.message || 'Task failed', task_id: 'unknown' }); } } } return { group: group.group, name: group.name, outputs, completed: outputs.filter(o => o.success).length, failed: outputs.filter(o => !o.success).length }; }