process_twincat_batch
Validate and automatically fix TwinCAT 3 XML files in batch using deterministic quality checks and IEC 61131-3 OOP standards to ensure code quality in industrial automation projects.
Instructions
Run enforced deterministic batch TwinCAT workflow.
Steps:
validate_batch (pre-check)
autofix_batch (strict pipeline)
validate_batch (post-check)
Args: file_patterns: Glob patterns (e.g., ["*.TcPOU"]) directory_path: Base directory create_backup: Create backup files before fixing validation_level: "all", "critical", or "style" enforcement_mode: Policy enforcement mode ("strict" or "compat") response_mode: "summary" (minimal, default), "compact" (no pre/post blobs), or "full" (all detail sections included). include_sections: In summary mode only — optional list of heavy sections to add. Supported: "blockers", "issues", "pre_validation", "autofix", "post_validation", "effective_oop_policy", "meta_detailed". Unknown names are ignored with a warning in the response. Has no effect in compact or full mode. include_knowledge_hints: Include recommended_check_ids from blockers (when not done). intent_profile: Programming paradigm intent — "auto" (default), "procedural", or "oop". Controls which check families run: - "procedural": OOP checks are skipped. - "oop": Full OOP check family is enforced. - "auto": Scans matched .TcPOU declarations for EXTENDS/IMPLEMENTS; resolves to "oop" if any are found, otherwise "procedural".
Input Schema
| Name | Required | Description | Default |
|---|---|---|---|
| file_patterns | Yes | ||
| directory_path | No | . | |
| create_backup | No | ||
| validation_level | No | all | |
| enforcement_mode | No | strict | |
| response_mode | No | summary | |
| include_sections | No | ||
| include_knowledge_hints | No | ||
| intent_profile | No | auto |
Output Schema
| Name | Required | Description | Default |
|---|---|---|---|
| result | Yes |
Implementation Reference
- The implementation of the process_twincat_batch tool handler.
async def process_twincat_batch( file_patterns: list[str], directory_path: str = ".", create_backup: bool = False, validation_level: str = "all", enforcement_mode: str = DEFAULT_ENFORCEMENT_MODE, response_mode: str = "summary", include_sections: list[str] | None = None, include_knowledge_hints: bool = False, intent_profile: str = "auto", ) -> str: """Run enforced deterministic batch TwinCAT workflow. Steps: 1. validate_batch (pre-check) 2. autofix_batch (strict pipeline) 3. validate_batch (post-check) Args: file_patterns: Glob patterns (e.g., ["*.TcPOU"]) directory_path: Base directory create_backup: Create backup files before fixing validation_level: "all", "critical", or "style" enforcement_mode: Policy enforcement mode ("strict" or "compat") response_mode: "summary" (minimal, default), "compact" (no pre/post blobs), or "full" (all detail sections included). include_sections: In summary mode only — optional list of heavy sections to add. Supported: "blockers", "issues", "pre_validation", "autofix", "post_validation", "effective_oop_policy", "meta_detailed". Unknown names are ignored with a warning in the response. Has no effect in compact or full mode. include_knowledge_hints: Include recommended_check_ids from blockers (when not done). intent_profile: Programming paradigm intent — "auto" (default), "procedural", or "oop". Controls which check families run: - "procedural": OOP checks are skipped. - "oop": Full OOP check family is enforced. - "auto": Scans matched .TcPOU declarations for EXTENDS/IMPLEMENTS; resolves to "oop" if any are found, otherwise "procedural". """ _t0 = time.monotonic() ctx = None try: mode_error = _validate_enforcement_mode(enforcement_mode, start_time=_t0) if mode_error: return mode_error ctx = _resolve_execution_context(directory_path, enforcement_mode=enforcement_mode) from glob import glob as _glob from twincat_validator.server import autofix_batch, validate_batch if validation_level not in ["all", "critical", "style"]: return _tool_error( f"Invalid validation_level: {validation_level}", start_time=_t0, execution_context=ctx, valid_levels=["all", "critical", "style"], ) if response_mode not in ["full", "compact", "summary"]: return _tool_error( f"Invalid response_mode: {response_mode}", start_time=_t0, execution_context=ctx, valid_response_modes=["full", "compact", "summary"], ) if intent_profile not in _VALID_INTENT_PROFILES: return _tool_error( f"Invalid intent_profile: {intent_profile}", start_time=_t0, execution_context=ctx, valid_intent_profiles=list(_VALID_INTENT_PROFILES), ) # Resolve intent by scanning matched files so "auto" detects OOP content. _base_path = Path(directory_path) _all_files: set[Path] = set() for _pattern in file_patterns: _matches = _glob(str(_base_path / _pattern), recursive=True) _all_files.update(Path(f) for f in _matches) _tc_files = [f for f in _all_files if f.suffix in config.supported_extensions] intent_profile_resolved = _batch_auto_resolve_intent(_tc_files, intent_profile) check_categories_executed = ( ["core", "oop"] if intent_profile_resolved == "oop" else ["core"] ) pre_validation = json.loads( await validate_batch( file_patterns=file_patterns, directory_path=directory_path, validation_level=validation_level, enforcement_mode=enforcement_mode, intent_profile=intent_profile_resolved, ) ) if not pre_validation.get("success", False): return _with_meta( { "success": False, "workflow": "batch_strict_pipeline", "failed_step": "validate_batch_pre", "step_error": pre_validation, "done": False, "terminal_mode": False, "next_action": "inspect_error", }, _t0, execution_context=ctx, ) autofix_result = json.loads( await autofix_batch( file_patterns=file_patterns, directory_path=directory_path, create_backup=create_backup, profile="llm_strict", format_profile="twincat_canonical", strict_contract=True, create_implicit_files=True, orchestration_hints=True, enforcement_mode=enforcement_mode, intent_profile=intent_profile_resolved, ) ) if not autofix_result.get("success", False): return _with_meta( { "success": False, "workflow": "batch_strict_pipeline", "failed_step": "autofix_batch", "step_error": autofix_result, "done": False, "terminal_mode": False, "next_action": "inspect_error", }, _t0, execution_context=ctx, ) post_validation = json.loads( await validate_batch( file_patterns=file_patterns, directory_path=directory_path, validation_level=validation_level, enforcement_mode=enforcement_mode, intent_profile=intent_profile_resolved, ) ) if not post_validation.get("success", False): return _with_meta( { "success": False, "workflow": "batch_strict_pipeline", "failed_step": "validate_batch_post", "step_error": post_validation, "done": False, "terminal_mode": False, "next_action": "inspect_error", }, _t0, execution_context=ctx, ) workflow_compliance_warnings = _collect_intent_mismatch_warnings( intent_profile_resolved, steps=[ ("validate_batch_pre", pre_validation), ("autofix_batch", autofix_result), ("validate_batch_post", post_validation), ], ) batch_summary = post_validation.get("batch_summary", {}) file_summaries = _build_batch_file_summaries(post_validation, autofix_result) safe_to_import = ( all(item["safe_to_import"] for item in file_summaries) if file_summaries else False ) safe_to_compile = ( all(item["safe_to_compile"] for item in file_summaries) if file_summaries else False ) done = batch_summary.get("failed", 0) == 0 and safe_to_import and safe_to_compile blockers = _aggregate_blockers_from_files(file_summaries) result = { "success": True, "workflow": "batch_strict_pipeline", "tools_used": ["validate_batch", "autofix_batch", "validate_batch"], "file_patterns": file_patterns, "directory_path": directory_path, "response_mode": response_mode, "intent_profile_requested": intent_profile, "intent_profile_resolved": intent_profile_resolved, "check_categories_executed": check_categories_executed, "workflow_compliance_warnings": workflow_compliance_warnings, "batch_summary": batch_summary, "safe_to_import": safe_to_import, "safe_to_compile": safe_to_compile, "files": file_summaries, "done": done, "status": "done" if done else "blocked", "blocking_count": len(blockers), "blockers": blockers, "effective_oop_policy": { "policy_source": ctx.policy_source, "policy": ctx.effective_oop_policy, }, } if response_mode == "full": result["pre_validation"] = pre_validation result["autofix"] = autofix_result result["post_validation"] = post_validation if done: result["terminal_mode"] = True result["next_action"] = "done_no_further_autofix" result["allow_followup_autofix_without_user_request"] = False else: result["terminal_mode"] = False result["next_action"] = "manual_intervention_or_targeted_fix" if include_knowledge_hints: result["recommended_check_ids"] = sorted( set(b["check_id"] for b in blockers if b.get("check_id")) ) _assert_orchestration_contract(result, is_batch=True) # Apply response shaping (summary mode projects to minimal payload). shaped, unknown_sections = _shape_batch_response( result, response_mode, include_sections ) if unknown_sections: shaped["unknown_include_sections"] = unknown_sections return _with_meta(shaped, _t0, execution_context=ctx) except Exception as e: error_kwargs = {"execution_context": ctx} if ctx is None: error_kwargs.update(unresolved_policy_fields(enforcement_mode)) return _tool_error(str(e), start_time=_t0, **error_kwargs)