verify_determinism_batch
Validate TwinCAT 3 XML files for deterministic behavior by running batch orchestration twice and reporting idempotence stability across multiple files.
Instructions
Run strict batch orchestration twice and report per-file idempotence stability.
Args: file_patterns: Glob patterns (e.g., ["*.TcPOU"]) directory_path: Base directory create_backup: Create backup files before fixing validation_level: "all", "critical", or "style" enforcement_mode: Policy enforcement mode ("strict" or "compat") response_mode: "summary" (minimal, default), "compact", or "full". include_sections: In summary mode only — optional heavy sections to include. Supported: "blockers", "pre_validation", "autofix", "post_validation", "effective_oop_policy", "meta_detailed". Unknown names ignored with a warning in the response. Has no effect in compact or full mode.
Input Schema
| Name | Required | Description | Default |
|---|---|---|---|
| file_patterns | Yes | ||
| directory_path | No | . | |
| create_backup | No | ||
| validation_level | No | all | |
| enforcement_mode | No | strict | |
| response_mode | No | summary | |
| include_sections | No |
Output Schema
| Name | Required | Description | Default |
|---|---|---|---|
| result | Yes |
Implementation Reference
- The `verify_determinism_batch` tool handler function, which orchestrates batch processing twice and checks for determinism.
async def verify_determinism_batch( file_patterns: list[str], directory_path: str = ".", create_backup: bool = False, validation_level: str = "all", enforcement_mode: str = DEFAULT_ENFORCEMENT_MODE, response_mode: str = "summary", include_sections: list[str] | None = None, ) -> str: """Run strict batch orchestration twice and report per-file idempotence stability. Args: file_patterns: Glob patterns (e.g., ["*.TcPOU"]) directory_path: Base directory create_backup: Create backup files before fixing validation_level: "all", "critical", or "style" enforcement_mode: Policy enforcement mode ("strict" or "compat") response_mode: "summary" (minimal, default), "compact", or "full". include_sections: In summary mode only — optional heavy sections to include. Supported: "blockers", "pre_validation", "autofix", "post_validation", "effective_oop_policy", "meta_detailed". Unknown names ignored with a warning in the response. Has no effect in compact or full mode. """ _t0 = time.monotonic() ctx = None try: mode_error = _validate_enforcement_mode(enforcement_mode, start_time=_t0) if mode_error: return mode_error ctx = _resolve_execution_context(directory_path, enforcement_mode=enforcement_mode) if response_mode not in ["full", "compact", "summary"]: return _tool_error( f"Invalid response_mode: {response_mode}", start_time=_t0, execution_context=ctx, valid_response_modes=["full", "compact", "summary"], ) # Always use "compact" internally so per-file data is available for aggregation; # the caller's preferred response_mode is applied to the final result only. first = json.loads( await process_twincat_batch( file_patterns=file_patterns, directory_path=directory_path, create_backup=create_backup, validation_level=validation_level, enforcement_mode=enforcement_mode, response_mode="compact", ) ) if not first.get("success", False): return json.dumps(first, indent=2) second = json.loads( await process_twincat_batch( file_patterns=file_patterns, directory_path=directory_path, create_backup=create_backup, validation_level=validation_level, enforcement_mode=enforcement_mode, response_mode="compact", ) ) if not second.get("success", False): return json.dumps(second, indent=2) first_by_path = { str(item.get("file_path", "")): item for item in first.get("files", []) } second_by_path = { str(item.get("file_path", "")): item for item in second.get("files", []) } all_paths = sorted(set(first_by_path) | set(second_by_path)) files = [] stable_all = True for path in all_paths: first_item = first_by_path.get(path, {}) second_item = second_by_path.get(path, {}) changed_first = bool(first_item.get("content_changed", False)) changed_second = bool(second_item.get("content_changed", False)) stable = not changed_second if not stable: stable_all = False files.append( { "file_path": path, "file_name": Path(path).name if path else "", "safe_to_import": bool(second_item.get("safe_to_import", False)), "safe_to_compile": bool(second_item.get("safe_to_compile", False)), "content_changed_first_pass": changed_first, "content_changed_second_pass": changed_second, "stable": stable, } ) # RC-2 fix: done requires stability AND safety, not just stability. # Aggregate safety flags directly from per-file entries (already canonical — # set by process_twincat_batch which uses derive_contract_state internally). # Do NOT derive solely from per-file blockers: fixable errors make a file # unsafe without contributing blockers, so blockers-only aggregation can # produce safe_to_compile=True for genuinely unsafe files. all_safe_to_import = all(f["safe_to_import"] for f in files) if files else False all_safe_to_compile = all(f["safe_to_compile"] for f in files) if files else False # Collect actual per-file blockers from the second-pass results. second_pass_blockers: list[dict] = [] for item in second.get("files", []): second_pass_blockers.extend(item.get("blockers", []) or []) # Add a determinism-specific blocker when content still changed on pass 2. determinism_extra_blockers: list[dict] = [] if not stable_all: determinism_extra_blockers.append( {"check": "determinism", "message": "Second pass changed content", "line": None} ) all_blockers = second_pass_blockers + determinism_extra_blockers blocking_count = len(all_blockers) # Compute done: requires stability, both safe flags, and zero blockers. safe_to_import = all_safe_to_import safe_to_compile = all_safe_to_compile done = stable_all and safe_to_import and safe_to_compile and blocking_count == 0 det_status = "done" if done else "blocked" result = { "success": True, "workflow": "determinism_batch", "tools_used": ["process_twincat_batch", "process_twincat_batch"], "file_patterns": file_patterns, "directory_path": directory_path, "response_mode": response_mode, "stable": stable_all, "files": files, "first_pass_summary": first.get("batch_summary", {}), "second_pass_summary": second.get("batch_summary", {}), "batch_summary": second.get("batch_summary", {}), "safe_to_import": safe_to_import, "safe_to_compile": safe_to_compile, "done": done, "status": det_status, "blocking_count": blocking_count, "blockers": all_blockers, "terminal_mode": done, "next_action": ( "done_no_further_autofix" if done else "manual_intervention_or_targeted_fix" ), "effective_oop_policy": { "policy_source": ctx.policy_source, "policy": ctx.effective_oop_policy, }, } _assert_orchestration_contract(result, is_batch=True) # Apply response shaping (determinism-specific file keys preserve stability fields). shaped, unknown_sections = _shape_batch_response( result, response_mode, include_sections, summary_file_keys=_DETERMINISM_SUMMARY_FILE_KEYS, ) if unknown_sections: shaped["unknown_include_sections"] = unknown_sections return _with_meta(shaped, _t0, execution_context=ctx) except Exception as e: error_kwargs = {"execution_context": ctx} if ctx is None: error_kwargs.update(unresolved_policy_fields(enforcement_mode)) return _tool_error(str(e), start_time=_t0, **error_kwargs)