autofix_batch
Automatically fix multiple TwinCAT files matching glob patterns to apply structural validation, safe fixes, and canonical formatting for industrial automation projects.
Instructions
Automatically fix multiple TwinCAT files matching glob patterns.
Args: file_patterns: Glob patterns (e.g., ["*.TcPOU"]) directory_path: Base directory create_backup: Create backup files before fixing profile: Response profile passed to per-file autofix (default: llm_strict) format_profile: Formatting profile for per-file autofix strict_contract: Enforce generation contract fail-closed in per-file autofix create_implicit_files: Auto-create missing interface/DUT dependencies orchestration_hints: Include loop-guard hints in per-file responses intent_profile: Programming paradigm intent — "auto" (default), "procedural", or "oop". With "auto", each file's content is inspected individually for EXTENDS/IMPLEMENTS, so OOP files receive full OOP checks even in mixed batches. ctx: FastMCP context for per-file progress notifications (injected automatically)
Input Schema
| Name | Required | Description | Default |
|---|---|---|---|
| file_patterns | Yes | ||
| directory_path | No | . | |
| create_backup | No | ||
| profile | No | llm_strict | |
| format_profile | No | twincat_canonical | |
| strict_contract | No | ||
| create_implicit_files | No | ||
| orchestration_hints | No | ||
| enforcement_mode | No | strict | |
| intent_profile | No | auto | |
| ctx | No |
Output Schema
| Name | Required | Description | Default |
|---|---|---|---|
| result | Yes |
Implementation Reference
- The autofix_batch tool handler implementation, which iterates through matched files and calls autofix_file for each.
async def autofix_batch( file_patterns: list[str], directory_path: str = ".", create_backup: bool = True, profile: str = "llm_strict", format_profile: str = "twincat_canonical", strict_contract: bool = True, create_implicit_files: bool = True, orchestration_hints: bool = True, enforcement_mode: str = DEFAULT_ENFORCEMENT_MODE, intent_profile: str = "auto", ctx: Optional[Any] = None, ) -> str: """Automatically fix multiple TwinCAT files matching glob patterns. Args: file_patterns: Glob patterns (e.g., ["*.TcPOU"]) directory_path: Base directory create_backup: Create backup files before fixing profile: Response profile passed to per-file autofix (default: llm_strict) format_profile: Formatting profile for per-file autofix strict_contract: Enforce generation contract fail-closed in per-file autofix create_implicit_files: Auto-create missing interface/DUT dependencies orchestration_hints: Include loop-guard hints in per-file responses intent_profile: Programming paradigm intent — "auto" (default), "procedural", or "oop". With "auto", each file's content is inspected individually for EXTENDS/IMPLEMENTS, so OOP files receive full OOP checks even in mixed batches. ctx: FastMCP context for per-file progress notifications (injected automatically) """ _t0 = time.monotonic() ctx_policy = None try: mode_error = _validate_enforcement_mode(enforcement_mode, start_time=_t0) if mode_error: return mode_error ctx_policy = _resolve_execution_context( directory_path, enforcement_mode=enforcement_mode ) if intent_profile not in _VALID_INTENT_PROFILES: return _tool_error( f"Invalid intent_profile: {intent_profile}", start_time=_t0, execution_context=ctx_policy, valid_intent_profiles=list(_VALID_INTENT_PROFILES), ) from glob import glob # Import autofix_file lazily to avoid module-level registration order issues. from twincat_validator.server import autofix_file profile_error = _validate_profile(profile, start_time=_t0, execution_context=ctx_policy) if profile_error: return profile_error format_profile_error = _validate_format_profile( format_profile, start_time=_t0, execution_context=ctx_policy ) if format_profile_error: return format_profile_error start_time = time.time() base_path = Path(directory_path) if not base_path.exists(): return _tool_error( f"Directory not found: {directory_path}", start_time=_t0, execution_context=ctx_policy, error_type="DirectoryNotFoundError", ) all_files: set[Path] = set() for pattern in file_patterns: pattern_path = base_path / pattern matches = glob(str(pattern_path), recursive=True) all_files.update(Path(f) for f in matches) tc_files = [f for f in all_files if f.suffix in config.supported_extensions] if not tc_files: return _tool_error( "No TwinCAT files found matching patterns", start_time=_t0, execution_context=ctx_policy, patterns=file_patterns, directory=str(base_path), ) total = len(tc_files) results = [] failed_files = [] fixed = 0 no_changes = 0 safe_to_import_count = 0 safe_to_compile_count = 0 for idx, file_path in enumerate(tc_files): await _emit_progress( ctx, current=idx, total=total, message=f"autofix {idx + 1}/{total}: {file_path.name}", ) try: fix_result = json.loads( autofix_file( str(file_path), create_backup=create_backup, fixes_to_apply=None, profile=profile, format_profile=format_profile, strict_contract=strict_contract, create_implicit_files=create_implicit_files, orchestration_hints=orchestration_hints, enforcement_mode=enforcement_mode, intent_profile=intent_profile, ) ) if not fix_result.get("success", False): failed_files.append( { "file_path": str(file_path), "error": fix_result.get("error", "Unknown autofix error"), "error_type": fix_result.get("error_type", "AutofixError"), } ) continue content_changed = bool(fix_result.get("content_changed", False)) if content_changed: fixed += 1 else: no_changes += 1 if fix_result.get("safe_to_import") is True: safe_to_import_count += 1 if fix_result.get("safe_to_compile") is True: safe_to_compile_count += 1 results.append( { "file_path": str(file_path), "fixes_applied_count": len(fix_result.get("fixes_applied", [])), "fix_result": fix_result, } ) except Exception as e: failed_files.append( { "file_path": str(file_path), "error": str(e), "error_type": type(e).__name__, } ) await _emit_progress(ctx, current=total, total=total, message="autofix complete") batch_id = f"batch_{datetime.now().strftime('%Y%m%d_%H%M%S')}" done = ( len(results) > 0 and len(failed_files) == 0 and safe_to_import_count == len(results) and safe_to_compile_count == len(results) ) blockers: list[dict] = [] for item in results: fix_result = item.get("fix_result", {}) or {} for blocker in fix_result.get("blockers", []) or []: blockers.append( { "file_path": item.get("file_path", ""), "check": blocker.get("check", "unknown"), "message": blocker.get("message", ""), "line": blocker.get("line"), } ) for failed_item in failed_files: blockers.append( { "file_path": failed_item.get("file_path", ""), "check": "infrastructure", "message": failed_item.get("error", "Autofix batch infrastructure error"), "line": None, } ) safe_to_import = len(results) > 0 and safe_to_import_count == len(results) safe_to_compile = len(results) > 0 and safe_to_compile_count == len(results) result = { "success": True, "batch_id": batch_id, "processed_files": len(results), "total_files": total, "processing_time": round(time.time() - start_time, 3), "batch_summary": { "fixed": fixed, "no_changes": no_changes, "failed": len(failed_files), "safe_to_import": safe_to_import_count, "safe_to_compile": safe_to_compile_count, }, "workflow_mode": "strict_pipeline", "done": done, "status": "done" if done else "blocked", "safe_to_import": safe_to_import, "safe_to_compile": safe_to_compile, "blocking_count": len(blockers), "blockers": blockers, "files": results, "failed_files": failed_files, } if done: result["terminal_mode"] = True result["next_action"] = "done_no_further_autofix" result["allow_followup_autofix_without_user_request"] = False else: result["terminal_mode"] = False result["next_action"] = "manual_intervention_or_targeted_fix" _assert_autofix_batch_contract(result) return _with_meta(result, _t0, execution_context=ctx_policy) except Exception as e: error_kwargs = {"execution_context": ctx_policy} if ctx_policy is None: error_kwargs.update(unresolved_policy_fields(enforcement_mode)) return _tool_error(str(e), start_time=_t0, **error_kwargs) - twincat_validator/mcp_tools_batch.py:307-308 (registration)The autofix_batch tool registration using @mcp.tool decorator.
@mcp.tool() async def autofix_batch(