validate_batch
Validate multiple TwinCAT automation project files simultaneously using glob patterns to check code quality, structure, and compliance with IEC 61131-3 standards at specified validation levels.
Instructions
Validate multiple TwinCAT files matching glob patterns.
Args: file_patterns: Glob patterns (e.g., ["*.TcPOU"]) directory_path: Base directory validation_level: "all", "critical", or "style" intent_profile: Programming paradigm intent — "auto" (default), "procedural", or "oop". With "auto", the matched .TcPOU files are scanned for EXTENDS/ IMPLEMENTS; if any are found the batch resolves to "oop", otherwise "procedural". ctx: FastMCP context for per-file progress notifications (injected automatically)
Input Schema
| Name | Required | Description | Default |
|---|---|---|---|
| file_patterns | Yes | ||
| directory_path | No | . | |
| validation_level | No | all | |
| enforcement_mode | No | strict | |
| intent_profile | No | auto | |
| ctx | No |
Output Schema
| Name | Required | Description | Default |
|---|---|---|---|
| result | Yes |
Implementation Reference
- The validate_batch tool handler implementation using FastMCP @mcp.tool().
@mcp.tool() async def validate_batch( file_patterns: list[str], directory_path: str = ".", validation_level: str = "all", enforcement_mode: str = DEFAULT_ENFORCEMENT_MODE, intent_profile: str = "auto", ctx: Optional[Any] = None, ) -> str: """Validate multiple TwinCAT files matching glob patterns. Args: file_patterns: Glob patterns (e.g., ["*.TcPOU"]) directory_path: Base directory validation_level: "all", "critical", or "style" intent_profile: Programming paradigm intent — "auto" (default), "procedural", or "oop". With "auto", the matched .TcPOU files are scanned for EXTENDS/ IMPLEMENTS; if any are found the batch resolves to "oop", otherwise "procedural". ctx: FastMCP context for per-file progress notifications (injected automatically) """ _t0 = time.monotonic() ctx_policy = None try: mode_error = _validate_enforcement_mode(enforcement_mode, start_time=_t0) if mode_error: return mode_error ctx_policy = _resolve_execution_context( directory_path, enforcement_mode=enforcement_mode ) if intent_profile not in _VALID_INTENT_PROFILES: return _tool_error( f"Invalid intent_profile: {intent_profile}", start_time=_t0, execution_context=ctx_policy, valid_intent_profiles=list(_VALID_INTENT_PROFILES), ) from glob import glob start_time = time.time() base_path = Path(directory_path) if not base_path.exists(): return _tool_error( f"Directory not found: {directory_path}", start_time=_t0, execution_context=ctx_policy, error_type="DirectoryNotFoundError", ) all_files: set[Path] = set() for pattern in file_patterns: pattern_path = base_path / pattern matches = glob(str(pattern_path), recursive=True) all_files.update(Path(f) for f in matches) tc_files = [f for f in all_files if f.suffix in config.supported_extensions] # Resolve intent after file discovery so "auto" can scan .TcPOU declarations. _intent_resolved = _batch_auto_resolve_intent(tc_files, intent_profile) _exclude_cats = frozenset({"oop"}) if _intent_resolved == "procedural" else None if not tc_files: return _tool_error( "No TwinCAT files found matching patterns", start_time=_t0, execution_context=ctx_policy, patterns=file_patterns, directory=str(base_path), ) total = len(tc_files) results = [] failed_files = [] passed = 0 failed = 0 warnings = 0 for idx, file_path in enumerate(tc_files): await _emit_progress( ctx, current=idx, total=total, message=f"validate {idx + 1}/{total}: {file_path.name}", ) try: file = TwinCATFile.from_path(file_path) validation_time_start = time.time() engine_result = validation_engine.validate( file, validation_level, exclude_categories=_exclude_cats ) validation_time = time.time() - validation_time_start file_result = _convert_engine_result_to_mcp_format( engine_result, file, validation_time, validation_level ) status = file_result["validation_status"] if status == "passed": passed += 1 elif status == "failed": failed += 1 elif status == "warnings": warnings += 1 # Derive per-file contract state canonically (RC-1). per_file_cs = derive_contract_state(file_result.get("issues", [])) results.append( { "file_path": str(file_path), "status": status, # error_count from canonical contract (issues with severity error/critical), # not summary["failed"] which counts failed checks, not error issues. "error_count": per_file_cs.error_count, "warning_count": per_file_cs.warning_count, # Flat per-file safety schema for consistency with process_twincat_batch "safe_to_import": per_file_cs.safe_to_import, "safe_to_compile": per_file_cs.safe_to_compile, "blocking_count": per_file_cs.blocking_count, "blockers": per_file_cs.blockers, "validation_result": file_result, } ) except Exception as e: failed_files.append( { "file_path": str(file_path), "error": str(e), "error_type": type(e).__name__, } ) failed += 1 await _emit_progress(ctx, current=total, total=total, message="validate complete") batch_id = f"batch_{datetime.now().strftime('%Y%m%d_%H%M%S')}" safe_to_import = ( all(bool(item.get("safe_to_import", False)) for item in results) if results else False ) safe_to_compile = ( all(bool(item.get("safe_to_compile", False)) for item in results) if results else False ) done = failed == 0 and safe_to_import and safe_to_compile blockers = _aggregate_batch_blockers(results) result = { "success": True, "batch_id": batch_id, "processed_files": len(results), "total_files": total, "processing_time": round(time.time() - start_time, 3), "batch_summary": {"passed": passed, "failed": failed, "warnings": warnings}, "files": results, "failed_files": failed_files, "safe_to_import": safe_to_import, "safe_to_compile": safe_to_compile, "done": done, "status": "done" if done else "blocked", "blocking_count": len(blockers), "blockers": blockers, "next_action": ( "done_no_further_validation" if done else "manual_intervention_or_targeted_fix" ), } _assert_validate_batch_contract(result) return _with_meta(result, _t0, execution_context=ctx_policy) except Exception as e: error_kwargs = {"execution_context": ctx_policy} if ctx_policy is None: error_kwargs.update(unresolved_policy_fields(enforcement_mode)) return _tool_error(str(e), start_time=_t0, **error_kwargs) - twincat_validator/mcp_tools_batch.py:127-128 (registration)Function that registers batch tools, including validate_batch.
def register_batch_tools() -> None: """Register all batch tool handlers with the mcp instance."""