Skip to main content
Glama
srwlli

Documentation Generator MCP Server

by srwlli
test_workflow_documentation.py10.6 kB
#!/usr/bin/env python3 """ Workflow Documentation Tests (Phase 5.2). Tests that documentation provides clear guidance for the procedural AI review loop workflow. Since the review loop is AI-driven (not programmatic code), we validate that: 1. CLAUDE.md documents the review loop workflow (threshold ≥85, max 5 iterations) 2. Meta-plan shows review loop in data flow diagram 3. Examples demonstrate multi-iteration pattern Rationale: No code to test, but documentation must be clear enough for AI to follow. """ import sys from pathlib import Path import json # Add parent directory to path sys.path.insert(0, str(Path(__file__).parent)) # Module-level constants DOCS_MCP_PATH = Path(__file__).parent CLAUDE_MD_PATH = DOCS_MCP_PATH / 'CLAUDE.md' META_PLAN_PATH = DOCS_MCP_PATH / 'coderef' / 'planning-workflow' / 'planning-workflow-system-meta-plan.json' def test_claude_md_documents_review_loop(): """ Test that CLAUDE.md documents the review loop workflow. Validates that CLAUDE.md contains clear documentation about: - Approval threshold (score ≥ 90) - Maximum iterations (5) - Refinement process (validate -> check score -> refine if < threshold) - Re-validation after refinement This ensures AI agents have clear guidance to follow the review loop pattern. """ print("\n" + "="*70) print("TEST: CLAUDE.md Documents Review Loop") print("="*70) # Check file exists assert CLAUDE_MD_PATH.exists(), f"CLAUDE.md not found at {CLAUDE_MD_PATH}" print(f" [OK] CLAUDE.md found at {CLAUDE_MD_PATH}") # Read content claude_content = CLAUDE_MD_PATH.read_text(encoding='utf-8') # Check for approval threshold documentation (≥85 or ≥90) threshold_patterns = [ 'score >= 90', 'score ≥ 90', 'threshold: 90', 'approval threshold', 'minimum score of 90' ] threshold_found = any(pattern.lower() in claude_content.lower() for pattern in threshold_patterns) assert threshold_found, "CLAUDE.md should document approval threshold (>=90)" print(" [OK] Approval threshold documented") # Check for max iterations documentation iteration_patterns = [ 'max 5 iterations', 'maximum 5 iterations', 'up to 5 iterations', 'iterate up to 5 times', '5 iteration limit' ] iterations_found = any(pattern.lower() in claude_content.lower() for pattern in iteration_patterns) # Note: This may not be explicitly documented yet - test will inform us if iterations_found: print(" [OK] Maximum iterations (5) documented") else: print(" [WARNING] Maximum iterations not explicitly documented in CLAUDE.md") # Check for refinement process documentation refinement_patterns = [ 'validate', 'check score', 'refine', 're-validate', 'iterative', 'review loop' ] refinement_count = sum(1 for pattern in refinement_patterns if pattern.lower() in claude_content.lower()) assert refinement_count >= 4, f"CLAUDE.md should document refinement process (found {refinement_count}/6 keywords)" print(f" [OK] Refinement process documented ({refinement_count}/6 keywords found)") # Check for review loop workflow workflow_keywords = ['validate_implementation_plan', 'score', 'approved', 'issues'] workflow_count = sum(1 for kw in workflow_keywords if kw in claude_content) assert workflow_count >= 3, "CLAUDE.md should reference planning tools and workflow" print(f" [OK] Planning workflow tools referenced ({workflow_count}/4 keywords)") print("\n[PASS] CLAUDE.md provides review loop documentation") def test_meta_plan_shows_review_loop_in_workflow(): """ Test that meta-plan data flow diagram shows the review loop. Validates that planning-workflow-system-meta-plan.json contains: - Data flow diagram showing validate -> score -> refine -> re-validate cycle - References to Tools #3 and #4 (validate, review) - Mention of iterative refinement This ensures the system design explicitly includes the review loop concept. """ print("\n" + "="*70) print("TEST: Meta-Plan Shows Review Loop in Workflow") print("="*70) # Check file exists assert META_PLAN_PATH.exists(), f"Meta-plan not found at {META_PLAN_PATH}" print(f" [OK] Meta-plan found at {META_PLAN_PATH}") # Read and parse JSON with open(META_PLAN_PATH, 'r', encoding='utf-8') as f: meta_plan = json.load(f) # Convert to string for searching meta_plan_str = json.dumps(meta_plan, indent=2) # Check for data flow diagram section assert 'data_flow' in meta_plan_str.lower() or 'workflow' in meta_plan_str.lower(), \ "Meta-plan should contain data flow or workflow section" print(" [OK] Data flow/workflow section exists") # Check for validate tool reference (Tool #3) validate_patterns = ['validate_implementation_plan', 'Tool #3', 'validation', 'score'] validate_count = sum(1 for pattern in validate_patterns if pattern in meta_plan_str) assert validate_count >= 2, "Meta-plan should reference validation tool" print(f" [OK] Validation tool referenced ({validate_count}/4 patterns)") # Check for review/report tool reference (Tool #4) review_patterns = ['generate_plan_review_report', 'Tool #4', 'review report', 'markdown report'] review_count = sum(1 for pattern in review_patterns if pattern in meta_plan_str) assert review_count >= 2, "Meta-plan should reference review report tool" print(f" [OK] Review report tool referenced ({review_count}/4 patterns)") # Check for iterative/loop concepts iteration_keywords = ['iterate', 'loop', 'refine', 'repeat', 'until'] iteration_count = sum(1 for kw in iteration_keywords if kw in meta_plan_str.lower()) if iteration_count >= 2: print(f" [OK] Iterative concepts present ({iteration_count}/5 keywords)") else: print(f" [WARNING] Limited iterative concept documentation ({iteration_count}/5 keywords)") # Check for score threshold mention threshold_mentioned = 'threshold' in meta_plan_str.lower() or '90' in meta_plan_str or '85' in meta_plan_str if threshold_mentioned: print(" [OK] Score threshold mentioned in meta-plan") else: print(" [INFO] Score threshold not explicitly mentioned (may be in other docs)") print("\n[PASS] Meta-plan documents planning workflow with review tools") def test_workflow_examples_show_iteration_pattern(): """ Test that examples demonstrate multi-iteration pattern. Validates that somewhere in documentation (CLAUDE.md or meta-plan) there are: - Examples showing multiple validation attempts - Examples showing score improvement across iterations - References to checking score and refining This ensures AI agents can learn the pattern from examples, not just theory. """ print("\n" + "="*70) print("TEST: Workflow Examples Show Iteration Pattern") print("="*70) # Read both documentation sources claude_content = CLAUDE_MD_PATH.read_text(encoding='utf-8') meta_plan_content = "" if META_PLAN_PATH.exists(): meta_plan_content = META_PLAN_PATH.read_text(encoding='utf-8') combined_docs = claude_content + "\n" + meta_plan_content # Check for example patterns example_indicators = [ 'example', 'for instance', 'e.g.', 'demonstration', 'sample', 'illustration' ] example_count = sum(1 for indicator in example_indicators if indicator.lower() in combined_docs.lower()) assert example_count >= 2, "Documentation should contain examples" print(f" [OK] Examples present in documentation ({example_count} indicators)") # Check for iteration pattern in examples iteration_patterns = [ 'iteration 1', 'iteration 2', 'first attempt', 'second attempt', 'try again', 'retry', 'improve', 'refine and re-' ] iteration_example_count = sum(1 for pattern in iteration_patterns if pattern.lower() in combined_docs.lower()) if iteration_example_count >= 2: print(f" [OK] Iteration pattern examples found ({iteration_example_count}/8 patterns)") else: print(f" [INFO] Limited iteration examples ({iteration_example_count}/8 patterns) - may need more examples") # Check for score progression examples (e.g., "45 -> 75 -> 88") score_patterns = [ 'score', 'improve', 'increase', 'from', 'to', '/100' ] score_mentions = sum(1 for pattern in score_patterns if pattern in combined_docs.lower()) if score_mentions >= 4: print(f" [OK] Score progression concepts present ({score_mentions}/6 keywords)") else: print(f" [INFO] Score progression mentioned ({score_mentions}/6 keywords)") # Check for specific workflow steps mentioned together workflow_steps = ['validate', 'score', 'refine', 'improve'] steps_found = [step for step in workflow_steps if step in combined_docs.lower()] assert len(steps_found) >= 3, f"Documentation should mention workflow steps (found {len(steps_found)}/4)" print(f" [OK] Workflow steps documented ({len(steps_found)}/4 steps: {', '.join(steps_found)})") print("\n[PASS] Documentation includes workflow patterns and concepts") if __name__ == "__main__": print("\n" + "="*70) print("WORKFLOW DOCUMENTATION TEST SUITE (Phase 5.2)") print("="*70) try: # Run documentation tests test_claude_md_documents_review_loop() test_meta_plan_shows_review_loop_in_workflow() test_workflow_examples_show_iteration_pattern() print("\n" + "="*70) print("[PASS] ALL DOCUMENTATION TESTS PASSED") print("="*70) print("\nTest Coverage:") print(" [OK] CLAUDE.md documents review loop workflow") print(" [OK] Meta-plan shows review loop in system design") print(" [OK] Examples demonstrate iteration pattern") print("\nTotal: 3 test functions, all passing") print("\nValidation: Documentation is sufficient for AI to follow review loop workflow") exit(0) except AssertionError as e: print(f"\n[FAIL] TEST FAILED: {e}") import traceback traceback.print_exc() exit(1) except Exception as e: print(f"\n[FAIL] UNEXPECTED ERROR: {e}") import traceback traceback.print_exc() exit(1)

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/srwlli/docs-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server