check-consistency-plan.json•70.1 kB
{
"$schema": "./coderef/tool-implementation-template-schema.json",
"template_info": {
"name": "Tool Implementation Plan Template",
"version": "1.0.0",
"created_date": "2025-10-10",
"description": "Standardized template for planning MCP tool implementations in docs-mcp project",
"usage": "Copy this template, fill in all sections, and use task IDs (INFRA-NNN, SCAN-NNN, etc.) to track implementation progress",
"compliance": "Follows docs-mcp architecture patterns: ARCH-001, QUA-001, QUA-002, REF-002, REF-003, ARCH-003"
},
"document_info": {
"title": "check_consistency Implementation Plan",
"tool_id": 10,
"version": "1.4.0",
"created_date": "2025-10-10",
"status": "planning",
"estimated_effort": "8 hours",
"description": "Real-time consistency checker for new code - quality gate that validates code changes against established standards before commit/merge"
},
"executive_summary": {
"purpose": "Complete the Consistency Trilogy by providing a lightweight, fast quality gate that checks new/modified code for standards violations. Unlike audit_codebase (full codebase scan), this tool focuses only on changed files.",
"value_proposition": "Enables developers to catch standards violations in real-time (pre-commit) rather than after-the-fact (audit). Provides immediate feedback at the exact moment when fixing violations is cheapest and easiest.",
"real_world_analogy": "Like a spell-checker that underlines errors as you type, rather than waiting until you've written the entire document. Catches problems early when context is fresh and changes are small.",
"use_case": "Developer makes changes to 3 React components, runs git commit. Pre-commit hook triggers check_consistency, which scans only those 3 files (not the entire codebase), detects a missing ARIA label in one component, and prevents the commit with a clear error message. Developer fixes the issue immediately and commits successfully.",
"output": "Pass/fail status, list of violations with file:line locations, violation count by severity, exit code for CI/CD integration, and concise terminal-friendly output"
},
"risk_assessment": {
"overall_risk": "Low",
"complexity": "Medium",
"scope": "Medium - 7 files affected (server.py, tool_handlers.py, constants.py, type_defs.py, validation.py, generators/consistency_checker.py, plus documentation)",
"risk_factors": {
"file_system": "Low risk - Read-only file operations, no file writes except reading git state. Graceful fallback for non-git repos.",
"dependencies": "Low risk - Reuses existing AuditGenerator (composition pattern), minimal new code. Git CLI commands via subprocess.",
"performance": "Medium risk - Must achieve <1s for typical changes (3-10 files). Mitigated by: only scanning changed files, reusing fast AuditGenerator methods, optional caching layer (v1.5.0).",
"security": "Low risk - Follows SEC-001 (path canonicalization), validates file list to prevent path traversal, no credential storage, read-only git operations.",
"breaking_changes": "None - New tool, no changes to existing tools"
}
},
"current_state_analysis": {
"affected_files": [
"server.py - Add Tool definition at ~line 120 (after audit_codebase)",
"tool_handlers.py - Add handle_check_consistency at ~line 840, register at ~line 62 in TOOL_HANDLERS",
"constants.py - Add SeverityThreshold enum at ~line 100",
"type_defs.py - Add ConsistencyResultDict, CheckResultDict at ~line 220",
"validation.py - Add validate_severity_threshold, validate_file_list at ~line 280",
"generators/consistency_checker.py - New file (~300 lines)",
"README.md - Update Available Tools section, add example, update version to 1.4.0",
"CLAUDE.md - Add check_consistency to Tool Catalog section, update version to 1.4.0",
"examples/ - Create pre-commit-hook.sh, github-actions.yml, gitlab-ci.yml"
],
"dependencies": [
"Existing: AuditGenerator (composition - reuse violation detection methods)",
"Existing: parse_standards_documents() from AuditGenerator",
"Existing: detect_ui_violations(), detect_behavior_violations(), detect_ux_violations() from AuditGenerator",
"Existing: StandardsDataDict, ViolationDict from type_defs.py",
"New: ConsistencyChecker class in generators/consistency_checker.py",
"New: Git integration methods (detect_changed_files, is_git_repository)",
"System: Git CLI (via subprocess) - graceful fallback if not available"
],
"architecture_context": "Operates at the same layer as audit_codebase but with different scope. While audit_codebase is comprehensive and slow (scans all files), check_consistency is targeted and fast (scans only changed files). Both share the same violation detection logic via AuditGenerator composition."
},
"key_features": [
"Auto-detection of changed files via git diff (staged, unstaged, commit ranges)",
"Manual file list specification (for non-git projects or custom workflows)",
"Severity threshold filtering (fail only on critical, major, or all violations)",
"Scope filtering (check only UI, behavior, UX patterns, or all)",
"Exit code handling for CI/CD integration (0=pass, 1=fail)",
"Terminal-friendly output (concise, color-coded, file:line format)",
"Composition with AuditGenerator (zero code duplication)",
"Graceful fallback for non-git repositories",
"Integration examples (pre-commit hooks, GitHub Actions, GitLab CI)"
],
"tool_specification": {
"name": "check_consistency",
"description": "Check code changes against established standards for consistency violations. Lightweight quality gate for pre-commit checks and CI/CD pipelines. Only scans modified files. Auto-detects changes via git or accepts explicit file list.",
"input_schema": {
"type": "object",
"properties": {
"project_path": {
"type": "string",
"description": "Absolute path to project directory",
"required": true
},
"files": {
"type": "array",
"items": "string",
"description": "List of files to check (relative to project_path). If not provided, auto-detects git changes (staged files by default).",
"required": false,
"default": "auto-detect from git"
},
"standards_dir": {
"type": "string",
"description": "Path to standards directory (relative to project_path)",
"required": false,
"default": "coderef/standards"
},
"severity_threshold": {
"type": "string",
"enum": ["critical", "major", "minor"],
"description": "Fail if violations at or above this severity are found. 'critical'=only critical, 'major'=critical+major, 'minor'=all violations",
"required": false,
"default": "major"
},
"scope": {
"type": "array",
"items": {
"enum": ["ui_patterns", "behavior_patterns", "ux_patterns", "all"]
},
"description": "Which standards to check against. 'all' checks UI, behavior, and UX patterns.",
"required": false,
"default": ["all"]
},
"fail_on_violations": {
"type": "boolean",
"description": "Return error status (exit code 1) if violations found. Set false for reporting only.",
"required": false,
"default": true
}
},
"required": ["project_path"]
},
"output": "JSON with status (pass|fail), violations_found (int), violations (List[ViolationDict]), files_checked (int), files_list (List[str]), duration (float), severity_threshold (str), exit_code (0|1)"
},
"architecture_design": {
"data_flow_diagram": "Input (project_path, files?) → Git Detection (if no files) → Standards Parsing → File Loop (check each file) → Violation Detection (via AuditGenerator) → Severity Filtering → Summary Generation → Output (pass/fail + violations)",
"module_interactions": "Handler (tool_handlers.py) → ConsistencyChecker (generators/consistency_checker.py) → AuditGenerator (generators/audit_generator.py) → Git CLI (subprocess) → File System (read files) → Output (JSON + exit code)",
"file_structure_changes": [
"New files: generators/consistency_checker.py (~300 lines), examples/pre-commit-hook.sh, examples/github-actions.yml, examples/gitlab-ci.yml",
"Modified files: server.py (+35 lines), tool_handlers.py (+120 lines), constants.py (+10 lines), type_defs.py (+20 lines), validation.py (+30 lines), README.md (version + examples), CLAUDE.md (tool catalog entry)"
]
},
"implementation_phases": {
"phase_1_infrastructure": {
"title": "Core Infrastructure Setup",
"duration": "1 hour",
"tasks": [
{
"id": "INFRA-001",
"task": "Add check_consistency tool schema to server.py",
"location": "server.py:~120 (after audit_codebase tool definition)",
"details": "Add Tool definition with name='check_consistency', 6 input parameters (project_path, files, standards_dir, severity_threshold, scope, fail_on_violations). Follow existing tool schema pattern.",
"effort": "15 minutes"
},
{
"id": "INFRA-002",
"task": "Create handle_check_consistency handler in tool_handlers.py",
"location": "tool_handlers.py:~840 (new function after handle_audit_codebase)",
"details": "Create async def handle_check_consistency(arguments: dict) -> list[TextContent]. Follow standard handler pattern: validate inputs → log invocation → initialize ConsistencyChecker → detect files (if needed) → parse standards → check files → filter violations → generate summary → return result. Include full error handling (ValueError, FileNotFoundError, PermissionError, OSError, Exception).",
"effort": "30 minutes"
},
{
"id": "INFRA-003",
"task": "Register handler in TOOL_HANDLERS dict",
"location": "tool_handlers.py:~62 (TOOL_HANDLERS dict)",
"details": "Add 'check_consistency': handle_check_consistency to registry",
"effort": "1 minute"
},
{
"id": "INFRA-004",
"task": "Add constants to constants.py",
"location": "constants.py:~100 (after AuditScope enum)",
"details": "Add SeverityThreshold enum with CRITICAL='critical', MAJOR='major', MINOR='minor' values. Add helper method values() -> List[str]. Follow existing enum pattern (ChangeType, Severity, AuditSeverity).",
"effort": "10 minutes"
},
{
"id": "INFRA-005",
"task": "Create generators/consistency_checker.py skeleton",
"location": "generators/consistency_checker.py (new file)",
"details": "Create ConsistencyChecker class with __init__(project_path: Path, standards_dir: Path), method stubs for: detect_changed_files(mode='staged'), is_git_repository(), check_files(files, standards), filter_by_severity_threshold(violations, threshold), generate_check_summary(violations, files_checked, duration), format_violation_for_terminal(violation). Add docstrings for all methods. Import Path, List, subprocess, AuditGenerator, StandardsDataDict, ViolationDict.",
"effort": "20 minutes"
},
{
"id": "INFRA-006",
"task": "Add TypedDict definitions to type_defs.py",
"location": "type_defs.py:~220 (after AuditResultDict)",
"details": "Add ConsistencyResultDict (status: str, violations_found: int, violations: List[ViolationDict], files_checked: int, files_list: List[str], duration: float, severity_threshold: str, exit_code: int). Add CheckResultDict (file_path: str, violations: List[ViolationDict], clean: bool). Follow TypedDict pattern with total=False for optional fields (QUA-001).",
"effort": "10 minutes"
},
{
"id": "INFRA-007",
"task": "Add validation functions to validation.py",
"location": "validation.py:~280 (after validate_audit_scope)",
"details": "Add validate_severity_threshold(threshold: str) -> str: validates against SeverityThreshold enum values, raises ValueError if invalid. Add validate_file_list(files: list, project_path: Path) -> list: validates files is list of strings, all paths are relative (not absolute), no path traversal (../), raises ValueError if invalid. Follow existing validation function pattern (REF-003).",
"effort": "15 minutes"
}
]
},
"phase_2_git_integration": {
"title": "Git Integration",
"duration": "2 hours",
"approach": "Use subprocess to run git commands. Detect staged/unstaged files, support commit ranges. Graceful fallback if not a git repo or git not available.",
"tasks": [
{
"id": "GIT-001",
"task": "Implement detect_changed_files()",
"method": "def detect_changed_files(self, mode: str = 'staged') -> List[Path]",
"details": "Use git diff to detect modified/added files. Support modes: 'staged' (git diff --name-only --cached), 'unstaged' (git diff --name-only), 'all' (both staged + unstaged). Return list of Path objects relative to project root. Filter out deleted files. Handle subprocess errors gracefully. Log git commands executed.",
"effort": "1 hour"
},
{
"id": "GIT-002",
"task": "Implement get_file_content_from_git()",
"method": "def get_file_content_from_git(self, file_path: Path, ref: str = 'HEAD') -> str",
"details": "Get file content from git (git show ref:file_path). Used for comparing against standards. Return file content as string. Handle file not in git (return empty string). Handle binary files gracefully.",
"effort": "30 minutes"
},
{
"id": "GIT-003",
"task": "Implement is_git_repository()",
"method": "def is_git_repository(self) -> bool",
"details": "Check if project_path is a git repository (git rev-parse --is-inside-work-tree). Return True if git repo, False otherwise. Handle subprocess errors (git not installed, not a repo) by returning False. Log result.",
"effort": "15 minutes"
},
{
"id": "GIT-004",
"task": "Add git error handling",
"details": "Implement graceful fallback if not a git repo: if files parameter provided, use that list; if not provided and not a git repo, raise ValueError with clear message: 'Not a git repository and no files specified. Provide files parameter or run in a git repository.' Add logging for git operations and fallback scenarios.",
"effort": "15 minutes"
}
]
},
"phase_3_reuse_audit_logic": {
"title": "Reuse Audit Logic via Composition",
"duration": "1 hour",
"approach": "Import AuditGenerator and reuse its violation detection methods. ConsistencyChecker wraps AuditGenerator for targeted file scanning.",
"tasks": [
{
"id": "REUSE-001",
"task": "Import AuditGenerator in ConsistencyChecker",
"details": "Import AuditGenerator from generators.audit_generator. In __init__, create self.audit_generator = AuditGenerator(self.project_path, self.standards_dir). Reuse parse_standards_documents(), detect_ui_violations(), detect_behavior_violations(), detect_ux_violations().",
"effort": "15 minutes"
},
{
"id": "REUSE-002",
"task": "Implement check_files()",
"method": "def check_files(self, files: List[Path], standards: StandardsDataDict, scope: List[str]) -> List[ViolationDict]",
"details": "For each file in files: read file content → determine file type → call appropriate AuditGenerator methods based on scope (detect_ui_violations if 'ui_patterns' in scope, etc.) → collect violations → return combined list. Track files scanned. Handle file read errors gracefully (log and skip). Filter by scope (only run checks for requested pattern types).",
"effort": "30 minutes"
},
{
"id": "REUSE-003",
"task": "Implement filter_by_severity_threshold()",
"method": "def filter_by_severity_threshold(self, violations: List[ViolationDict], threshold: str) -> List[ViolationDict]",
"details": "Filter violations to only those >= severity_threshold. Severity hierarchy: critical > major > minor. If threshold='critical', return only critical. If threshold='major', return critical + major. If threshold='minor', return all. Return filtered list.",
"effort": "15 minutes"
}
]
},
"phase_4_output_formatting": {
"title": "Output Formatting for Terminal & CI/CD",
"duration": "1 hour",
"approach": "Generate concise, actionable terminal output. Support exit codes for CI/CD integration.",
"tasks": [
{
"id": "OUTPUT-001",
"task": "Implement generate_check_summary()",
"method": "def generate_check_summary(self, violations: List[ViolationDict], files_checked: int, duration: float) -> str",
"details": "Generate concise summary for terminal output. Format: Pass/Fail status (with emoji or [PASS]/[FAIL]), violation count by severity (X critical, Y major, Z minor), files checked, duration. If pass: 'All files comply with established standards.' If fail: list violations using format_violation_for_terminal(). Return as formatted string.",
"effort": "30 minutes"
},
{
"id": "OUTPUT-002",
"task": "Implement format_violation_for_terminal()",
"method": "def format_violation_for_terminal(self, violation: ViolationDict) -> str",
"details": "Format single violation for CLI display. Format: 'file:line - [severity] message'. Example: 'src/components/Button.tsx:42 - [major] Button uses non-standard size 'xl''. Use relative file paths. Optionally color-code severity (critical=red, major=yellow, minor=blue) if terminal supports it.",
"effort": "20 minutes"
},
{
"id": "OUTPUT-003",
"task": "Add exit code handling",
"details": "In handler, determine exit code: if fail_on_violations=True and violations found (after severity filtering), exit_code=1; else exit_code=0. Include exit_code in ConsistencyResultDict. Document that CI/CD tools can use this exit code to fail builds.",
"effort": "10 minutes"
}
]
},
"phase_5_integration_examples": {
"title": "Integration Examples for Git Hooks & CI/CD",
"duration": "1 hour",
"approach": "Create practical, copy-paste-ready examples for pre-commit hooks, GitHub Actions, GitLab CI.",
"tasks": [
{
"id": "EXAMPLE-001",
"task": "Create pre-commit hook example",
"location": "examples/pre-commit-hook.sh",
"details": "Create bash script that calls check_consistency via Python. Script should: 1) Check if standards exist (error if not), 2) Call check_consistency with project_path=., 3) Exit with returned exit code. Include comments explaining installation (copy to .git/hooks/pre-commit, chmod +x). Handle both MCP and direct Python invocation.",
"effort": "20 minutes"
},
{
"id": "EXAMPLE-002",
"task": "Create GitHub Actions workflow example",
"location": "examples/github-actions.yml",
"details": "Create .github/workflows/consistency-check.yml that: 1) Checks out code, 2) Sets up Python, 3) Installs dependencies, 4) Calls check_consistency on changed files in PR, 5) Fails workflow if violations found. Include comments explaining setup and customization options.",
"effort": "20 minutes"
},
{
"id": "EXAMPLE-003",
"task": "Create GitLab CI example",
"location": "examples/gitlab-ci.yml",
"details": "Create .gitlab-ci.yml snippet for consistency checking. Similar to GitHub Actions but GitLab syntax. Include comments for integration into existing pipelines.",
"effort": "20 minutes"
}
]
},
"phase_6_testing": {
"title": "Testing & Validation",
"duration": "1 hour",
"tasks": [
{
"id": "TEST-001",
"task": "Create integration test script",
"location": "test_check_consistency.py",
"details": "Create test script that: 1) Creates sample violations in test files, 2) Stages files in git, 3) Calls check_consistency, 4) Verifies violations detected, 5) Tests severity filtering, 6) Tests explicit file list. Use docs-mcp codebase as test subject.",
"effort": "30 minutes"
},
{
"id": "TEST-002",
"task": "Run integration tests",
"details": "Execute test_check_consistency.py. Verify: auto-detection works, severity filtering works, exit codes correct, output format correct, graceful fallback for non-git repos. Fix any issues found.",
"effort": "20 minutes"
},
{
"id": "TEST-003",
"task": "Manual validation with pre-commit hook",
"details": "Install pre-commit hook in test repo. Make changes that violate standards. Attempt commit. Verify hook prevents commit and shows clear violations. Fix violations and verify commit succeeds.",
"effort": "10 minutes"
}
]
},
"phase_7_documentation": {
"title": "Documentation Updates",
"duration": "1 hour",
"tasks": [
{
"id": "DOC-001",
"task": "Update README.md",
"location": "README.md",
"details": "Add check_consistency to Available Tools section (Tool #10). Add 'Pre-Commit Quality Gate' example showing check_consistency usage. Update Consistency Trilogy status to 3/3 complete. Update version to 1.4.0 in header and version history table.",
"effort": "20 minutes"
},
{
"id": "DOC-002",
"task": "Update CLAUDE.md",
"location": "CLAUDE.md",
"details": "Add check_consistency to Tool Catalog section with: Purpose, Input parameters, Output format, Usage patterns (Pattern 5: Pre-Commit Quality Gate), Examples (basic usage, severity threshold, CI/CD integration), Critical notes. Update version to 1.4.0. Add to Consistency Trilogy workflow section.",
"effort": "30 minutes"
},
{
"id": "DOC-003",
"task": "Add usage examples to documentation",
"location": "README.md and CLAUDE.md",
"details": "Add clear examples of check_consistency usage: 1) Auto-detect (basic), 2) Explicit files, 3) Severity threshold, 4) Pre-commit hook, 5) GitHub Actions integration. Include expected output for each.",
"effort": "10 minutes"
}
]
}
},
"code_structure": {
"handler_implementation": {
"file": "tool_handlers.py",
"function": "handle_check_consistency",
"pattern": "Standard handler pattern with validation, logging, error handling",
"imports_required": [
"from mcp.types import TextContent",
"from pathlib import Path",
"from constants import Paths, SeverityThreshold, AuditScope",
"from validation import validate_project_path_input, validate_severity_threshold, validate_file_list, validate_audit_scope",
"from error_responses import ErrorResponse",
"from logger_config import logger, log_tool_call, log_error, log_security_event",
"from type_defs import ConsistencyResultDict",
"from generators.consistency_checker import ConsistencyChecker",
"import json",
"import time"
],
"pseudocode": [
"1. Log tool invocation with log_tool_call('check_consistency', args_keys)",
"2. Validate inputs: project_path (validate_project_path_input), files (validate_file_list if provided), standards_dir, severity_threshold (validate_severity_threshold), scope (validate_audit_scope), fail_on_violations",
"3. Check standards directory exists (ErrorResponse.not_found if missing)",
"4. Log operation start with logger.info()",
"5. Initialize ConsistencyChecker(project_path, standards_dir)",
"6. Detect changed files if files not provided: files = checker.detect_changed_files(mode='staged'). If not git repo and no files, ErrorResponse.invalid_input('Not a git repository...')",
"7. Log files to check: logger.info(f'Checking {len(files)} files')",
"8. Parse standards: standards = checker.audit_generator.parse_standards_documents(standards_dir)",
"9. Start timer: start_time = time.time()",
"10. Check files: violations = checker.check_files(files, standards, scope)",
"11. Filter by severity: violations = checker.filter_by_severity_threshold(violations, severity_threshold)",
"12. Calculate duration: duration = time.time() - start_time",
"13. Determine status: status = 'pass' if len(violations) == 0 else 'fail'",
"14. Determine exit code: exit_code = 1 if (fail_on_violations and status == 'fail') else 0",
"15. Generate summary: summary = checker.generate_check_summary(violations, len(files), duration)",
"16. Create result dict (ConsistencyResultDict)",
"17. Log success with logger.info()",
"18. Return TextContent with JSON result",
"19. Handle errors with ErrorResponse factory (ARCH-001)"
],
"error_handling": {
"ValueError": "ErrorResponse.invalid_input() - Invalid inputs (severity_threshold, file_list, not a git repo)",
"FileNotFoundError": "ErrorResponse.not_found() - Standards directory missing",
"PermissionError": "ErrorResponse.permission_denied() - Cannot read files/directories",
"OSError": "ErrorResponse.io_error() - I/O errors (git operations, file reads)",
"Exception": "ErrorResponse.generic_error() - Unexpected errors"
}
},
"generator_class": {
"file": "generators/consistency_checker.py",
"class": "ConsistencyChecker",
"inherits_from": "None (standalone class, composes with AuditGenerator)",
"methods": [
{
"name": "__init__",
"signature": "def __init__(self, project_path: Path, standards_dir: Path)",
"description": "Initialize ConsistencyChecker with project path and standards directory. Create AuditGenerator instance for composition.",
"returns": "None"
},
{
"name": "is_git_repository",
"signature": "def is_git_repository(self) -> bool",
"description": "Check if project is a git repository by running 'git rev-parse --is-inside-work-tree'.",
"returns": "True if git repo, False otherwise"
},
{
"name": "detect_changed_files",
"signature": "def detect_changed_files(self, mode: str = 'staged') -> List[Path]",
"description": "Detect changed files using git diff. Supports modes: 'staged', 'unstaged', 'all'. Returns list of Path objects relative to project root.",
"returns": "List[Path] of changed files"
},
{
"name": "get_file_content_from_git",
"signature": "def get_file_content_from_git(self, file_path: Path, ref: str = 'HEAD') -> str",
"description": "Get file content from git at specific ref using 'git show ref:file_path'.",
"returns": "File content as string"
},
{
"name": "check_files",
"signature": "def check_files(self, files: List[Path], standards: StandardsDataDict, scope: List[str]) -> List[ViolationDict]",
"description": "Check specific files for violations using AuditGenerator methods. For each file, reads content and calls detect_ui/behavior/ux_violations based on scope. Collects and returns all violations.",
"returns": "List[ViolationDict] of violations found"
},
{
"name": "filter_by_severity_threshold",
"signature": "def filter_by_severity_threshold(self, violations: List[ViolationDict], threshold: str) -> List[ViolationDict]",
"description": "Filter violations to only those at or above severity threshold. Hierarchy: critical > major > minor.",
"returns": "List[ViolationDict] of filtered violations"
},
{
"name": "generate_check_summary",
"signature": "def generate_check_summary(self, violations: List[ViolationDict], files_checked: int, duration: float) -> str",
"description": "Generate concise summary for terminal output. Shows pass/fail status, violation counts by severity, files checked, duration. Lists violations using format_violation_for_terminal().",
"returns": "Formatted summary string"
},
{
"name": "format_violation_for_terminal",
"signature": "def format_violation_for_terminal(self, violation: ViolationDict) -> str",
"description": "Format single violation for CLI display. Format: 'file:line - [severity] message'.",
"returns": "Formatted violation string"
}
]
}
},
"integration_with_existing_system": {
"follows_patterns": [
"QUA-002: Handler registry pattern - Registered in TOOL_HANDLERS dict",
"ARCH-001: ErrorResponse factory for errors - All errors use ErrorResponse methods",
"REF-003: Input validation at boundaries - All inputs validated via validation.py functions",
"ARCH-003: Structured logging for all operations - log_tool_call, log_error, logger.info used throughout",
"QUA-001: TypedDict for complex return types - ConsistencyResultDict, CheckResultDict",
"REF-002: Constants/enums instead of magic strings - SeverityThreshold enum, Paths constants"
],
"constants_additions": {
"file": "constants.py",
"Paths": "None (reuses existing Paths.STANDARDS_DIR)",
"Files": "None (no new file name constants needed)",
"enums": {
"description": "SeverityThreshold enum for severity_threshold parameter validation",
"code": "class SeverityThreshold(str, Enum):\n CRITICAL = 'critical'\n MAJOR = 'major'\n MINOR = 'minor'\n\n @classmethod\n def values(cls) -> List[str]:\n return [e.value for e in cls]"
}
},
"type_defs_additions": {
"file": "type_defs.py",
"code": "class ConsistencyResultDict(TypedDict, total=False):\n status: str # 'pass' | 'fail'\n violations_found: int\n violations: List[ViolationDict]\n files_checked: int\n files_list: List[str]\n duration: float\n severity_threshold: str\n exit_code: int\n\nclass CheckResultDict(TypedDict, total=False):\n file_path: str\n violations: List[ViolationDict]\n clean: bool"
},
"validation_additions": {
"file": "validation.py",
"code": "def validate_severity_threshold(threshold: str) -> str:\n \"\"\"Validate severity_threshold parameter.\"\"\"\n from constants import SeverityThreshold\n valid_thresholds = SeverityThreshold.values()\n if threshold not in valid_thresholds:\n raise ValueError(f\"Invalid severity_threshold: {threshold}. Must be one of: {', '.join(valid_thresholds)}\")\n return threshold\n\ndef validate_file_list(files: list, project_path: Path) -> list:\n \"\"\"Validate file list parameter.\"\"\"\n if not isinstance(files, list):\n raise ValueError(\"files must be a list\")\n \n for file_path in files:\n if not isinstance(file_path, str):\n raise ValueError(f\"All file paths must be strings, got: {type(file_path)}\")\n \n # Check for absolute paths\n if Path(file_path).is_absolute():\n raise ValueError(f\"File paths must be relative to project_path, got absolute path: {file_path}\")\n \n # Check for path traversal\n if '..' in file_path:\n raise ValueError(f\"Path traversal detected in file path: {file_path}\")\n \n return files"
}
},
"testing_strategy": {
"unit_tests": [
{
"test": "test_detect_changed_files_staged",
"verifies": "detect_changed_files(mode='staged') returns only staged files",
"task_id": "TEST-001"
},
{
"test": "test_detect_changed_files_unstaged",
"verifies": "detect_changed_files(mode='unstaged') returns only unstaged files",
"task_id": "TEST-001"
},
{
"test": "test_filter_by_severity_threshold_critical",
"verifies": "filter_by_severity_threshold('critical') returns only critical violations",
"task_id": "TEST-001"
},
{
"test": "test_filter_by_severity_threshold_major",
"verifies": "filter_by_severity_threshold('major') returns critical + major violations",
"task_id": "TEST-001"
},
{
"test": "test_is_git_repository_true",
"verifies": "is_git_repository() returns True for git repos",
"task_id": "TEST-001"
},
{
"test": "test_is_git_repository_false",
"verifies": "is_git_repository() returns False for non-git directories",
"task_id": "TEST-001"
}
],
"integration_tests": [
{
"test": "test_check_consistency_with_violations",
"project": "docs-mcp codebase with intentional standards violations in test files",
"expected": "Status='fail', violations list populated, exit_code=1",
"task_id": "TEST-002"
},
{
"test": "test_check_consistency_clean_files",
"project": "docs-mcp codebase with compliant test files",
"expected": "Status='pass', violations list empty, exit_code=0",
"task_id": "TEST-002"
},
{
"test": "test_check_consistency_explicit_file_list",
"project": "docs-mcp codebase with specific files provided (not git auto-detect)",
"expected": "Only specified files checked, violations detected correctly",
"task_id": "TEST-002"
},
{
"test": "test_check_consistency_severity_filtering",
"project": "docs-mcp codebase with mixed severity violations",
"expected": "Only violations >= severity_threshold returned",
"task_id": "TEST-002"
},
{
"test": "test_check_consistency_non_git_repo",
"project": "Non-git directory with explicit file list",
"expected": "Graceful fallback, files checked correctly",
"task_id": "TEST-002"
}
],
"manual_validation": [
{
"step": "Install pre-commit hook in test repo, make changes violating standards, attempt commit",
"verify": "Commit blocked with clear violation messages",
"task_id": "TEST-003"
},
{
"step": "Fix violations and attempt commit again",
"verify": "Commit succeeds with pass message",
"task_id": "TEST-003"
},
{
"step": "Test GitHub Actions workflow in test PR",
"verify": "Workflow fails if violations found, passes if clean",
"task_id": "TEST-003"
}
],
"edge_cases": {
"description": "Comprehensive edge case testing to ensure robustness",
"test_scenarios": [
{
"scenario": "No standards directory",
"setup": "Delete or rename coderef/standards directory",
"expected_behavior": "ErrorResponse.not_found with message about missing standards",
"verify": [
"Clear error message returned",
"Suggests running establish_standards first",
"Exit code appropriate for CI/CD"
],
"error_handling": "FileNotFoundError → ErrorResponse.not_found"
},
{
"scenario": "Git not installed",
"setup": "Run on system without git or mock git not found",
"expected_behavior": "If files provided: proceed with explicit list. If files not provided: ErrorResponse.invalid_input",
"verify": [
"Graceful fallback when git unavailable",
"Clear error if both git and files missing",
"Logged appropriately"
],
"error_handling": "OSError → log and fallback or ErrorResponse.io_error"
},
{
"scenario": "Not a git repository",
"setup": "Run in directory without .git folder",
"expected_behavior": "If files provided: proceed. If files not provided: ErrorResponse.invalid_input",
"verify": [
"is_git_repository() returns False",
"Clear error message about git requirement",
"Suggests providing explicit file list"
],
"error_handling": "ValueError → ErrorResponse.invalid_input"
},
{
"scenario": "Empty file list (no changes)",
"setup": "Run in git repo with no staged files",
"expected_behavior": "Status='pass', violations_found=0, files_checked=0, message about no files to check",
"verify": [
"No errors raised",
"Pass status returned",
"Clear message about no files"
],
"error_handling": "No errors"
},
{
"scenario": "Invalid severity threshold",
"setup": "Pass severity_threshold='invalid'",
"expected_behavior": "ErrorResponse.invalid_input with valid options listed",
"verify": [
"validate_severity_threshold raises ValueError",
"Error message lists valid options: critical, major, minor",
"Tool does not proceed to checking"
],
"error_handling": "ValueError → ErrorResponse.invalid_input"
},
{
"scenario": "File list with path traversal",
"setup": "Pass files=['../../etc/passwd']",
"expected_behavior": "ErrorResponse.invalid_input about path traversal",
"verify": [
"validate_file_list detects ../",
"Security event logged",
"Tool does not proceed"
],
"error_handling": "ValueError → ErrorResponse.invalid_input + log_security_event"
},
{
"scenario": "Binary files in change list",
"setup": "Stage binary files (.png, .pdf, etc.)",
"expected_behavior": "Binary files skipped gracefully, only text files checked",
"verify": [
"No errors from reading binary content",
"Binary files excluded from files_checked count",
"Logged appropriately"
],
"error_handling": "Handle UnicodeDecodeError → skip file and log"
},
{
"scenario": "Very large file (>10MB)",
"setup": "Stage file larger than reasonable code file",
"expected_behavior": "File checked but with performance considerations (future: add size limit check)",
"verify": [
"File processed without crashing",
"Duration logged",
"Future enhancement: skip files >10MB with warning"
],
"error_handling": "No errors"
},
{
"scenario": "Deleted files in git diff",
"setup": "Delete files and stage deletions",
"expected_behavior": "Deleted files excluded from check list (can't check content that doesn't exist)",
"verify": [
"detect_changed_files filters out deleted files",
"No errors attempting to read deleted files",
"Only added/modified files checked"
],
"error_handling": "No errors - filtered at detection stage"
},
{
"scenario": "Permission denied on file read",
"setup": "Set file permissions to prevent reading",
"expected_behavior": "ErrorResponse.permission_denied with affected file name",
"verify": [
"PermissionError caught",
"Clear error message with file path",
"Security event logged"
],
"error_handling": "PermissionError → ErrorResponse.permission_denied + log_security_event"
}
]
}
},
"performance_monitoring": {
"description": "Performance monitoring strategy to achieve <1s target for typical changes (3-10 files)",
"metrics_to_track": [
{
"metric": "Total duration (end-to-end)",
"how_to_measure": "time.time() at start/end of handler, difference = duration",
"target": "<1s for 3-10 files, <3s for 50 files",
"logging": "logger.info(f'check_consistency completed in {duration:.2f}s', extra={'duration': duration, 'files_checked': len(files)})"
},
{
"metric": "Git detection time",
"how_to_measure": "time.time() before/after detect_changed_files()",
"target": "<100ms",
"logging": "logger.debug(f'Git detection: {git_duration:.2f}s', extra={'git_duration': git_duration})"
},
{
"metric": "Standards parsing time",
"how_to_measure": "time.time() before/after parse_standards_documents() (already tracked in AuditGenerator)",
"target": "<200ms (should be fast, standards are small documents)",
"logging": "Reuse AuditGenerator logging"
},
{
"metric": "Per-file check time",
"how_to_measure": "Track time for each file in check_files() loop",
"target": "<50ms per file average",
"logging": "logger.debug(f'Checked {file_path} in {file_duration:.2f}s', extra={'file_path': str(file_path), 'file_duration': file_duration})"
},
{
"metric": "Files checked count",
"how_to_measure": "len(files) at check time",
"target": "N/A (metric for context)",
"logging": "logger.info(f'Checking {len(files)} files', extra={'files_count': len(files)})"
}
],
"optimization_opportunities": [
{
"optimization": "Cache parsed standards (v1.5.0)",
"rationale": "Standards documents don't change between commits. Parsing them every time is wasteful. Caching can save ~200ms.",
"implementation": "Add cache layer in ConsistencyChecker: cache standards by (project_path, standards_dir, mtime). Check cache before parsing. Invalidate cache if standards files modified.",
"expected_improvement": "~200ms saved per check (parse time eliminated)"
},
{
"optimization": "Parallel file checking (v1.5.0)",
"rationale": "Checking multiple files is embarrassingly parallel. For large changesets (20+ files), parallel checking can significantly reduce total time.",
"implementation": "Use multiprocessing.Pool to check files in parallel. Each worker checks one file independently. Combine results.",
"expected_improvement": "Linear speedup with CPU cores (4 cores → ~4x faster for large changesets)"
},
{
"optimization": "Skip binary files early",
"rationale": "Attempting to read binary files wastes time. Detect binary files by extension or magic bytes before reading content.",
"implementation": "Add is_binary_file() check before reading. Skip binary files with log message.",
"expected_improvement": "~10-50ms per binary file avoided"
},
{
"optimization": "Lazy violation detection (only needed patterns)",
"rationale": "If scope=['ui_patterns'], don't run behavior/ux checks. Already planned but ensure implementation is efficient.",
"implementation": "In check_files(), conditionally call detect_*_violations based on scope parameter.",
"expected_improvement": "33-66% reduction in check time if limited scope"
}
],
"performance_targets": {
"small_input": "1-3 files: <500ms",
"medium_input": "5-10 files: <1s",
"large_input": "20-50 files: <3s"
}
},
"documentation_updates": {
"files_to_update": [
{
"file": "README.md",
"section": "Available Tools",
"addition": "10. **check_consistency** - Check code changes for standards violations (pre-commit quality gate)"
},
{
"file": "README.md",
"section": "Examples",
"addition": "Example 5: Pre-Commit Quality Gate - Use check_consistency to validate changed files before commit. Auto-detects git changes or accepts explicit file list. Integrates with pre-commit hooks and CI/CD pipelines."
},
{
"file": "README.md",
"section": "Version History",
"addition": "Version 1.4.0 - check_consistency tool, completes Consistency Trilogy (establish → audit → check)"
},
{
"file": "CLAUDE.md",
"section": "Tool Catalog",
"addition": "Complete AI usage guidance for check_consistency",
"detailed_content": {
"purpose": "Check code changes (new/modified files) against established standards for consistency violations. Lightweight quality gate for pre-commit checks and CI/CD pipelines.",
"when_to_use": [
"User asks to 'check my changes for consistency'",
"User wants to 'validate code before committing'",
"Setting up pre-commit hooks or CI/CD quality gates",
"Checking specific files for standards compliance"
],
"input_parameters": {
"project_path": {
"type": "string",
"required": true,
"description": "Absolute path to project directory"
},
"files": {
"type": "array of strings",
"required": false,
"description": "List of files to check (relative paths). Auto-detects from git if not provided."
},
"standards_dir": {
"type": "string",
"required": false,
"description": "Path to standards directory (default: coderef/standards)"
},
"severity_threshold": {
"type": "string",
"required": false,
"description": "Fail only on violations >= this severity (critical|major|minor, default: major)"
},
"scope": {
"type": "array",
"required": false,
"description": "Which patterns to check (ui_patterns|behavior_patterns|ux_patterns|all, default: all)"
},
"fail_on_violations": {
"type": "boolean",
"required": false,
"description": "Return error exit code if violations found (default: true)"
}
},
"example_usage": "mcp__docs_mcp__check_consistency(project_path='C:/path/to/project') # Auto-detects git changes\n\nmcp__docs_mcp__check_consistency(project_path='C:/path/to/project', files=['src/Button.tsx', 'src/Modal.tsx']) # Explicit files\n\nmcp__docs_mcp__check_consistency(project_path='C:/path/to/project', severity_threshold='critical') # Only fail on critical",
"critical_notes": [
"Requires standards to exist (run establish_standards first)",
"Auto-detects staged files by default (git required unless files provided)",
"Returns exit code for CI/CD integration (0=pass, 1=fail)",
"Much faster than audit_codebase (only checks changed files)",
"Designed for pre-commit hooks (<1s typical)"
]
}
}
]
},
"success_criteria": {
"description": "Quantifiable success metrics to validate implementation quality and completeness",
"functional_requirements": [
{
"requirement": "Git auto-detection works",
"metric": "detect_changed_files() returns correct file list",
"target": "Staged files detected accurately, unstaged files detected accurately",
"validation": "Integration test with staged files, verify returned list matches git diff --name-only --cached"
},
{
"requirement": "Explicit file list works",
"metric": "Accepts files parameter and checks only those files",
"target": "Only specified files checked, no git interaction",
"validation": "Integration test with files=['file1.tsx', 'file2.py'], verify only those files checked"
},
{
"requirement": "Violation detection works",
"metric": "Reuses AuditGenerator correctly, violations detected",
"target": "All violation types detected (UI, behavior, UX)",
"validation": "Integration test with intentional violations, verify all detected"
},
{
"requirement": "Severity filtering works",
"metric": "filter_by_severity_threshold() filters correctly",
"target": "critical threshold = only critical, major threshold = critical+major, minor threshold = all",
"validation": "Unit tests for each threshold level"
},
{
"requirement": "Exit codes correct",
"metric": "exit_code field in result",
"target": "exit_code=0 if pass, exit_code=1 if fail and fail_on_violations=true",
"validation": "Integration tests verify exit codes for pass/fail scenarios"
},
{
"requirement": "Performance target met",
"metric": "Duration for typical changes",
"target": "<1s for 3-10 files",
"validation": "Integration test measures duration, verify <1s"
},
{
"requirement": "Pre-commit hook works",
"metric": "Hook prevents commit if violations found",
"target": "Commit blocked with clear error, commit succeeds when clean",
"validation": "Manual test: install hook, make violation, attempt commit, verify blocked"
},
{
"requirement": "GitHub Actions integration works",
"metric": "Workflow fails on violations",
"target": "Workflow exit code reflects check result",
"validation": "Test workflow in sample repo PR"
}
],
"quality_requirements": [
{
"requirement": "Architecture compliance",
"metric": "Pattern adherence",
"target": "100% of code follows existing patterns (ARCH-001, QUA-001, QUA-002, REF-002, REF-003, ARCH-003)",
"validation": [
"ARCH-001: All errors use ErrorResponse factory ✓",
"QUA-001: All complex returns use TypedDict ✓ (ConsistencyResultDict)",
"QUA-002: Handler registered in TOOL_HANDLERS dict ✓",
"REF-002: No magic strings (use constants) ✓ (SeverityThreshold enum)",
"REF-003: All inputs validated at boundaries ✓ (validate_* functions)",
"ARCH-003: All operations logged ✓ (log_tool_call, logger.info)"
]
},
{
"requirement": "Logging coverage",
"metric": "Operations with logging",
"target": "100% of operations logged",
"validation": [
"log_tool_call() at handler start ✓",
"log_error() for all error scenarios ✓",
"logger.info() for operation progress (start, files detected, checking, completion) ✓",
"logger.debug() for detailed operations (git commands, per-file checks) ✓"
]
},
{
"requirement": "Error handling coverage",
"metric": "Error types handled",
"target": "All expected errors handled gracefully",
"validation": [
"ValueError → ErrorResponse.invalid_input ✓",
"FileNotFoundError → ErrorResponse.not_found ✓",
"PermissionError → ErrorResponse.permission_denied ✓",
"OSError → ErrorResponse.io_error ✓",
"Exception → ErrorResponse.generic_error ✓"
]
}
],
"performance_requirements": [
{
"requirement": "Fast enough for pre-commit hooks",
"metric": "End-to-end duration",
"target": "<1s for 3-10 files (typical commit)",
"validation": "Integration test with 5 files, measure duration, verify <1s"
},
{
"requirement": "Scales to large changesets",
"metric": "Duration for 50 files",
"target": "<3s for 50 files",
"validation": "Integration test with 50 files, measure duration, verify <3s"
}
],
"security_requirements": [
{
"requirement": "Path traversal protection",
"metric": "Rejects ../ in file paths",
"target": "validate_file_list detects and rejects path traversal",
"validation": "Unit test with files=['../../etc/passwd'], verify ValueError raised"
},
{
"requirement": "Absolute path rejection",
"metric": "Rejects absolute paths in files parameter",
"target": "validate_file_list detects and rejects absolute paths",
"validation": "Unit test with files=['/etc/passwd'], verify ValueError raised"
},
{
"requirement": "Path canonicalization",
"metric": "All paths resolved/canonicalized",
"target": "Path.resolve() called on all user-provided paths",
"validation": "Code review confirms Path.resolve() usage (SEC-001)"
}
]
},
"changelog_entry": {
"tool": "add_changelog_entry",
"parameters": {
"project_path": "C:/Users/willh/.mcp-servers/docs-mcp",
"version": "1.4.0",
"change_type": "feature",
"severity": "major",
"title": "Implemented check_consistency tool for pre-commit quality gate",
"description": "Added lightweight consistency checker that validates only changed files against established standards. Completes the Consistency Trilogy (establish → audit → check). Includes git auto-detection of staged/unstaged files, severity threshold filtering, CI/CD exit codes, terminal-friendly output, and integration examples (pre-commit hooks, GitHub Actions, GitLab CI). Achieves <1s performance target for typical changes (3-10 files) via composition with AuditGenerator (zero code duplication).",
"files": [
"server.py",
"tool_handlers.py",
"constants.py",
"type_defs.py",
"validation.py",
"generators/consistency_checker.py",
"examples/pre-commit-hook.sh",
"examples/github-actions.yml",
"examples/gitlab-ci.yml",
"README.md",
"CLAUDE.md"
],
"reason": "Complete the Consistency Trilogy by providing real-time quality gate for catching standards violations at commit time. Enables shift-left approach to consistency enforcement (catch issues when context is fresh rather than after-the-fact).",
"impact": "Developers can now catch consistency violations in real-time via pre-commit hooks or CI/CD pipelines. Fast (<1s) checks on only changed files provide immediate feedback without disrupting workflow. Integration examples enable easy adoption. Closes the loop on consistency management: establish standards, audit existing code, check new changes.",
"breaking": false,
"contributors": ["willh", "Claude Code AI"]
}
},
"troubleshooting_guide": {
"common_issues": [
{
"issue": "Tool says 'Not a git repository and no files specified'",
"symptom": "ErrorResponse.invalid_input when running check_consistency",
"causes": [
"Not running in a git repository",
"Git not installed on system",
"No files parameter provided"
],
"resolution": "Either: 1) Run in a git repository, 2) Install git, 3) Provide explicit files parameter: check_consistency(project_path='...', files=['file1.tsx', 'file2.py'])"
},
{
"issue": "Standards directory not found",
"symptom": "ErrorResponse.not_found about missing coderef/standards",
"causes": [
"establish_standards not run yet",
"Standards directory deleted or moved",
"Wrong project_path"
],
"resolution": "Run establish_standards(project_path='...') first to extract standards from codebase"
},
{
"issue": "Check says 'pass' but I know there are violations",
"symptom": "Status='pass' with violations_found=0, but code violates standards",
"causes": [
"Files not staged in git (auto-detection only checks staged by default)",
"Standards don't cover the specific violation type",
"File extensions not recognized for checking"
],
"resolution": "1) Stage files with git add, 2) Verify standards cover violation type by reading coderef/standards/*.md, 3) Use explicit files parameter to check specific files"
},
{
"issue": "Check is slow (>1s for small changes)",
"symptom": "Duration >1s for 3-10 files",
"causes": [
"Large files in changeset",
"Many patterns in standards documents",
"Disk I/O bottleneck"
],
"resolution": "1) Check duration log to identify bottleneck, 2) Consider caching standards (v1.5.0 feature), 3) Use severity_threshold or scope to reduce check overhead"
},
{
"issue": "Pre-commit hook not running",
"symptom": "Commit succeeds even with violations",
"causes": [
"Hook not installed (.git/hooks/pre-commit missing)",
"Hook not executable (missing chmod +x)",
"Hook script has errors"
],
"resolution": "1) Copy examples/pre-commit-hook.sh to .git/hooks/pre-commit, 2) Run chmod +x .git/hooks/pre-commit, 3) Test hook by running .git/hooks/pre-commit manually"
},
{
"issue": "GitHub Actions workflow failing unexpectedly",
"symptom": "Workflow fails but violations unclear",
"causes": [
"Standards not available in CI environment",
"Git checkout incomplete",
"Python dependencies not installed"
],
"resolution": "1) Ensure standards are committed to repo (coderef/standards/), 2) Use actions/checkout@v3 with fetch-depth: 0, 3) Install Python dependencies before running check"
}
]
},
"review_gates": {
"pre_implementation": {
"reviewer": "user",
"question": "Does this plan address all requirements and follow the template structure?",
"checkpoint": "Before starting Phase 1"
},
"post_phase_1": {
"reviewer": "user",
"question": "Is infrastructure setup correct? (Tool schema, handler, registry, constants, TypeDicts, validation)",
"checkpoint": "After INFRA-007, before GIT-001"
},
"post_phase_2": {
"reviewer": "user",
"question": "Does git integration work correctly? (Auto-detection, graceful fallback)",
"checkpoint": "After GIT-004, before REUSE-001"
},
"post_implementation": {
"reviewer": "user",
"question": "Does the implementation meet all success criteria? (Functional, quality, performance, security)",
"checkpoint": "After TEST-003, before DOC-001"
}
},
"implementation_checklist": {
"pre_implementation": [
"☐ Review plan for completeness",
"☐ Get user approval on approach",
"☐ Verify template alignment"
],
"phase_1_infrastructure": [
"☐ INFRA-001: Tool schema (server.py)",
"☐ INFRA-002: Handler implementation (tool_handlers.py)",
"☐ INFRA-003: Register handler (tool_handlers.py)",
"☐ INFRA-004: Constants (constants.py)",
"☐ INFRA-005: ConsistencyChecker skeleton (generators/consistency_checker.py)",
"☐ INFRA-006: TypeDicts (type_defs.py)",
"☐ INFRA-007: Validation functions (validation.py)",
"☐ Validate Python syntax (no errors)"
],
"phase_2_git_integration": [
"☐ GIT-001: detect_changed_files()",
"☐ GIT-002: get_file_content_from_git()",
"☐ GIT-003: is_git_repository()",
"☐ GIT-004: Git error handling and fallback",
"☐ Test git detection with sample repo"
],
"phase_3_reuse_audit_logic": [
"☐ REUSE-001: Import and compose with AuditGenerator",
"☐ REUSE-002: check_files() implementation",
"☐ REUSE-003: filter_by_severity_threshold() implementation",
"☐ Verify violation detection works"
],
"phase_4_output_formatting": [
"☐ OUTPUT-001: generate_check_summary()",
"☐ OUTPUT-002: format_violation_for_terminal()",
"☐ OUTPUT-003: Exit code handling",
"☐ Verify terminal output is clear and actionable"
],
"phase_5_integration_examples": [
"☐ EXAMPLE-001: pre-commit-hook.sh",
"☐ EXAMPLE-002: github-actions.yml",
"☐ EXAMPLE-003: gitlab-ci.yml",
"☐ Test pre-commit hook manually"
],
"phase_6_testing": [
"☐ TEST-001: Create integration test script",
"☐ TEST-002: Run integration tests (all pass)",
"☐ TEST-003: Manual validation with pre-commit hook",
"☐ Verify edge cases handled correctly",
"☐ Verify performance targets met (<1s for 3-10 files)"
],
"phase_7_documentation": [
"☐ DOC-001: Update README.md (tool list, examples, version)",
"☐ DOC-002: Update CLAUDE.md (tool catalog, usage patterns)",
"☐ DOC-003: Add usage examples to both docs",
"☐ Review documentation for clarity and completeness"
],
"finalization": [
"☐ Add changelog entry via add_changelog_entry (version 1.4.0)",
"☐ Update check-consistency-plan.json status to 'implemented'",
"☐ Run final validation (all success criteria met)",
"☐ Commit changes with descriptive message",
"☐ Push to origin/main",
"☐ Standby for user feedback"
]
},
"task_id_reference": {
"description": "Task ID prefixes and their usage",
"prefixes": {
"INFRA": "Infrastructure setup - files, imports, registrations, schema definitions",
"GIT": "Git integration - detection, commands, error handling",
"REUSE": "Code reuse - composition with existing generators",
"OUTPUT": "Output formatting - terminal display, summaries, messages",
"EXAMPLE": "Integration examples - hooks, workflows, scripts",
"TEST": "Testing - unit tests, integration tests, validation",
"DOC": "Documentation - README, CLAUDE.md, examples"
},
"usage_pattern": "Use PREFIX-NNN format (e.g., INFRA-001, GIT-002) for all implementation tasks",
"benefits": [
"Traceability - Links tasks to specific plan sections",
"Dependencies - Shows task ordering",
"Progress tracking - Checkbox-style completion",
"Cross-referencing - Easy to reference in discussions",
"Consistency - Mirrors architecture pattern naming"
]
},
"notes_and_considerations": {
"design_decisions": [
{
"decision": "Composition with AuditGenerator instead of inheritance",
"rationale": "Avoids code duplication while maintaining separation of concerns. ConsistencyChecker adds git integration and targeted file checking; AuditGenerator provides violation detection logic. Composition is more flexible than inheritance.",
"trade_off": "Slight indirection (checker.audit_generator.method()) vs. direct access, but gains flexibility and zero duplication"
},
{
"decision": "Auto-detect staged files by default",
"rationale": "Most common pre-commit workflow is: make changes → git add → git commit. Auto-detecting staged files matches developer expectations. Unstaged files can be checked with explicit mode parameter.",
"trade_off": "Requires git, but provides graceful fallback (explicit file list)"
},
{
"decision": "Severity threshold filtering in tool (not user responsibility)",
"rationale": "CI/CD users want simple pass/fail without post-processing violations. Filtering in tool provides better UX (set threshold, get filtered result).",
"trade_off": "More parameters in tool, but much better UX for primary use case"
},
{
"decision": "Exit codes for CI/CD integration",
"rationale": "CI/CD tools use exit codes to determine pass/fail. Providing exit_code in response and allowing fail_on_violations=false gives flexibility.",
"trade_off": "None - this is a clear requirement for CI/CD integration"
},
{
"decision": "Terminal-friendly output (not markdown report)",
"rationale": "Pre-commit checks show in terminal; developers need quick, scannable output. Markdown reports (like audit_codebase) are overkill for this use case.",
"trade_off": "Less detailed than audit reports, but appropriate for the use case (quick feedback)"
}
],
"potential_challenges": [
{
"challenge": "Performance target (<1s) may be difficult for large changesets",
"mitigation": "Focus optimization on common case (3-10 files). For large changesets, <3s is acceptable. Add performance logging to identify bottlenecks. Future: add caching layer (v1.5.0).",
"fallback": "If <1s not achievable, document performance characteristics and recommend using explicit file list for speed"
},
{
"challenge": "Git integration complexity (many modes, edge cases)",
"mitigation": "Start with simple staged file detection. Add unstaged/commit-range support incrementally. Comprehensive error handling and logging. Graceful fallback to explicit file list.",
"fallback": "If git integration proves problematic, prioritize explicit file list workflow (always reliable)"
},
{
"challenge": "Binary files causing errors when reading content",
"mitigation": "Catch UnicodeDecodeError when reading files, skip binary files with log message. Future: detect binary by extension/magic bytes before reading.",
"fallback": "Document that only text files are checked (binary files skipped)"
}
]
},
"future_enhancements": {
"v1_5_improvements": [
{
"feature": "Caching layer for parsed standards",
"description": "Cache parsed standards by (project_path, standards_dir, mtime). Check cache before parsing. Invalidate if standards files modified. Provides ~200ms speedup per check.",
"benefit": "Achieves consistent <1s performance even for first check. Reduces redundant I/O.",
"effort": "2 hours"
},
{
"feature": "Auto-fix capability (--fix flag)",
"description": "Automatically fix certain violation types (e.g., add ARIA labels, standardize button sizes, fix color hex codes). Similar to eslint --fix. Dry-run mode to preview fixes.",
"benefit": "Reduces developer friction - tool fixes issues instead of just reporting them. Speeds up consistency adoption.",
"effort": "8 hours"
},
{
"feature": "Violation suppression (.consistency-ignore file)",
"description": "Allow developers to suppress specific violations with inline comments or config file. Useful for intentional exceptions or legacy code. Track suppression usage.",
"benefit": "Prevents noisy false positives, allows gradual adoption, enables exceptions for valid reasons.",
"effort": "4 hours"
},
{
"feature": "Enhanced git modes (branch, PR, commit-range)",
"description": "Support checking all changes in: current branch vs. main, entire PR, specific commit range (commit1..commit2). Useful for PR-level consistency checks.",
"benefit": "More flexible git integration, better CI/CD use cases (check entire PR, not just latest commit).",
"effort": "3 hours"
},
{
"feature": "Progressive severity mode",
"description": "Gradually increase enforcement: week 1 = warnings only, week 2 = fail on critical, week 3 = fail on major. Helps teams adopt without disrupting workflow.",
"benefit": "Smoother onboarding, less pushback from developers, allows gradual ratcheting of standards.",
"effort": "2 hours"
},
{
"feature": "Parallel file checking (multiprocessing)",
"description": "Check multiple files in parallel using multiprocessing.Pool. Provides ~4x speedup on 4-core systems for large changesets (20+ files).",
"benefit": "Scales to large changesets without performance degradation. Maintains <1s for typical cases while improving large cases.",
"effort": "3 hours"
},
{
"feature": "Watch mode for continuous checking",
"description": "File watcher that runs check_consistency on save (similar to jest --watch). Provides instant feedback during development.",
"benefit": "Shift-left to development time (not just commit time). Immediate feedback loop.",
"effort": "4 hours"
},
{
"feature": "IDE integration (VS Code extension)",
"description": "VS Code extension that runs check_consistency and shows violations inline (like linter squiggles). Jump to violation location.",
"benefit": "Best possible developer experience - violations shown in editor, no separate tool needed.",
"effort": "16 hours (full extension development)"
}
],
"v2_0_improvements": [
{
"feature": "Machine learning violation detection",
"description": "Train ML model on historical violations and fixes. Detect subtle violations that regex can't catch. Suggest fixes based on past patterns.",
"benefit": "Catches violations that rule-based detection misses. Learns project-specific patterns over time.",
"effort": "40+ hours (research + implementation)"
},
{
"feature": "Cross-file consistency checks",
"description": "Detect violations that span multiple files (e.g., inconsistent naming across components, missing prop types, broken imports).",
"benefit": "Catches architectural violations, not just local style issues.",
"effort": "16 hours"
}
]
}
}