check_specific
Run selected validation checks on TwinCAT files to verify code quality and IEC 61131-3 OOP compliance for industrial automation projects.
Instructions
Run specific validation checks on a TwinCAT file.
Args: file_path: Path to TwinCAT file check_names: List of check IDs to run
Input Schema
| Name | Required | Description | Default |
|---|---|---|---|
| file_path | Yes | ||
| check_names | Yes | ||
| enforcement_mode | No | strict |
Output Schema
| Name | Required | Description | Default |
|---|---|---|---|
| result | Yes |
Implementation Reference
- The implementation of the check_specific MCP tool.
def check_specific( file_path: str, check_names: list[str], enforcement_mode: str = DEFAULT_ENFORCEMENT_MODE, ) -> str: """Run specific validation checks on a TwinCAT file. Args: file_path: Path to TwinCAT file check_names: List of check IDs to run """ _t0 = time.monotonic() ctx = None try: mode_error = _validate_enforcement_mode(enforcement_mode, start_time=_t0) if mode_error: return mode_error ctx = _resolve_execution_context(file_path, enforcement_mode=enforcement_mode) check_id_map = config.check_id_map check_ids = [] for name in check_names: check_id = check_id_map.get(name, name) check_ids.append(check_id) valid_checks = set(config.validation_checks.keys()) invalid = set(check_ids) - valid_checks if invalid: return _tool_error( f"Invalid check names: {', '.join(invalid)}", file_path=file_path, start_time=_t0, execution_context=ctx, valid_checks=sorted(list(valid_checks)), ) path, error = _validate_file_path(file_path, start_time=_t0, execution_context=ctx) if error: return error file = TwinCATFile.from_path(path) all_issues = [] check_results = [] for check_id in check_ids: if check_id in config.disabled_checks: continue try: from twincat_validator.exceptions import CheckNotFoundError check_class = CheckRegistry.get_check(check_id) except CheckNotFoundError: continue check = check_class() if check.should_skip(file): continue issues = check.run(file) if check_id in config.severity_overrides: for issue in issues: issue.severity = config.severity_overrides[check_id] for issue in issues: issue.check_id = ( check_id # stamp for dedupe/tracing parity with ValidationEngine ) if issue.line_num is not None: _apply_known_limitation_tags(check_id, issue, file) continue line_num, column = infer_issue_location(file.content, check_id, issue.message) issue.line_num = line_num issue.column = column _apply_known_limitation_tags(check_id, issue, file) all_issues.extend(issues) check_config = config.validation_checks.get(check_id, {}) if any(i.severity in ("error", "critical") for i in issues): status = "failed" elif any(i.severity == "warning" for i in issues): status = "warning" else: status = "passed" check_results.append( { "id": check_id, "name": check_config.get("name", "Unknown Check"), "status": status, "message": check_config.get("description", ""), "auto_fixable": check_config.get("auto_fixable", False), "severity": check_config.get("severity", "info"), } ) passed = sum(1 for c in check_results if c["status"] == "passed") failed = sum(1 for c in check_results if c["status"] == "failed") warnings = sum(1 for c in check_results if c["status"] == "warning") validation_status = "passed" if failed > 0: validation_status = "failed" elif warnings > 0: validation_status = "warnings" all_issues = _dedupe_validation_issues(all_issues) result = { "success": True, "file_path": str(path), "validation_status": validation_status, "checks_requested": len(check_names), "summary": {"passed": passed, "failed": failed, "warnings": warnings}, "checks": check_results, "issues": [issue.to_dict() for issue in all_issues], } return _with_meta(result, _t0, execution_context=ctx) except Exception as e: error_kwargs = {"execution_context": ctx} if ctx is None: error_kwargs.update(unresolved_policy_fields(enforcement_mode)) return _tool_error( str(e), file_path=file_path, start_time=_t0, **error_kwargs, )