Skip to main content
Glama
srwlli

Documentation Generator MCP Server

by srwlli
plan.json24.3 kB
{ "META_DOCUMENTATION": { "feature_name": "analysis-persistence", "version": "1.0.0", "status": "complete", "generated_by": "AI Assistant (Claude Code)", "generated_at": "2025-10-14", "has_context": true, "has_analysis": false, "source": "MCP-FEEDBACK.md - User-submitted workflow improvement request" }, "UNIVERSAL_PLANNING_STRUCTURE": { "0_preparation": { "foundation_docs": { "available": [ "README.md (root)", "ARCHITECTURE.md (coderef/foundation-docs)", "API.md (coderef/foundation-docs)", "COMPONENTS.md (coderef/foundation-docs)", "SCHEMA.md (coderef/foundation-docs)", "USER-GUIDE.md (coderef/foundation-docs)" ], "missing": [] }, "coding_standards": { "available": [ "BEHAVIOR-STANDARDS.md", "UI-STANDARDS.md", "UX-PATTERNS.md", "COMPONENT-INDEX.md" ], "missing": ["COMPONENT-PATTERN.md"] }, "reference_components": { "primary": "tool_handlers.py:handle_analyze_project_for_planning (lines 433-488)", "secondary": [ "tool_handlers.py:handle_add_changelog_entry (demonstrates file creation pattern)", "generators/planning_analyzer.py (PlanningAnalyzer class)", "error_responses.py (ErrorResponse patterns)", "logger_config.py (logging patterns)" ] }, "key_patterns_identified": [ "Error handling: try-catch with specific error types (ValueError, PermissionError, FileNotFoundError, OSError)", "File operations: Path.mkdir(parents=True, exist_ok=True) pattern", "Logging: logger.info with extra metadata dict", "Response metadata: Adding _metadata key to returned JSON", "Timestamp format: datetime.now().strftime('%Y%m%d-%H%M%S')", "Relative paths in responses: path.relative_to(project_path)", "JSON encoding: json.dump with indent=2 and UTF-8 encoding" ], "technology_stack": { "language": "Python", "framework": "MCP Server (Model Context Protocol)", "testing": "pytest", "build": "pyproject.toml" }, "project_structure": { "main_directories": [ "generators/ (generator classes)", "tool_handlers.py (MCP tool implementations)", "coderef/ (working and output directories)", "templates/power/ (POWER framework templates)" ], "organization_pattern": "modular (generators + handlers separation)" }, "gaps_and_risks": [ "No existing analysis-cache directory (will be created)", "File save operation adds I/O overhead (~10-50ms)", "Disk space usage grows with repeated analyses (future: add rotation)", "Windows vs Unix path handling differences (mitigated by pathlib)" ] }, "1_executive_summary": { "what": "Add automatic file persistence to the analyze_project_for_planning MCP tool, saving analysis results to coderef/analysis-cache/analysis-{timestamp}.json with metadata returned in the response.", "why": "Complete the workflow pattern consistency where context.json and plan.json are automatically saved, but analysis.json is currently not persisted. This creates gaps in auditability, reusability, and documentation. The missing persistence forces AI agents to manually create files, adding friction to the planning workflow.", "how": "Modify tool_handlers.py:handle_analyze_project_for_planning to create analysis-cache directory, save results to timestamped JSON file, and add _metadata key with file path and timestamp to the returned response. Follow existing patterns from handle_add_changelog_entry for file creation and error handling.", "impact": "Workflow becomes fully auditable with complete artifact trail (context → analysis → plan). Users can review what project state informed plan creation. Analysis results become reusable without re-running expensive scans. AI agents experience zero friction with automatic persistence matching other workflow tools.", "timeline": "Single phase implementation: 1-2 hours for development, testing, and documentation updates. Low complexity due to isolated change in single function with existing patterns to follow." }, "2_risk_assessment": { "technical_risks": [ { "risk": "File system permissions prevent directory/file creation", "likelihood": "low", "impact": "medium", "mitigation": "Wrap mkdir and file write in try-except with PermissionError. Return ErrorResponse.permission_denied with helpful message. Tool continues to return analysis data even if save fails." }, { "risk": "Disk space exhaustion from accumulated analysis files", "likelihood": "low", "impact": "low", "mitigation": "Out of scope for initial implementation. Future enhancement can add rotation policy (e.g., keep last 10 analyses). Document in code comments for future work." }, { "risk": "Windows vs Unix path separator issues", "likelihood": "very_low", "impact": "low", "mitigation": "Use pathlib.Path throughout (already standard in codebase). Path.relative_to and Path / operator handle cross-platform automatically." }, { "risk": "Performance degradation from file I/O", "likelihood": "low", "impact": "low", "mitigation": "File save is fast (<50ms for typical analysis JSON ~10-50KB). Async operation already handles I/O. Performance test validates <100ms overhead constraint." } ], "timeline_risks": [ { "risk": "Scope creep to implement feature-specific analysis files", "likelihood": "medium", "impact": "medium", "mitigation": "Explicitly mark as out-of-scope in context.json. Stick to timestamped cache pattern only. Future PR can add feature-specific routing." } ], "complexity_score": { "overall": "low", "breakdown": { "implementation": "low (isolated function change, ~15 lines of code)", "testing": "low (unit test for file creation, integration test for workflow)", "integration": "low (no API changes, backwards compatible)", "deployment": "low (no database changes, no migrations)" } } }, "3_current_state_analysis": { "existing_functionality": { "tool_handlers.py:handle_analyze_project_for_planning": "Currently validates project_path, creates PlanningAnalyzer, runs analysis, logs results, returns JSON. Does NOT save to file.", "generators/planning_analyzer.py": "PlanningAnalyzer.analyze() returns PreparationSummaryDict with 7 keys. No file operations.", "error_responses.py": "ErrorResponse factory provides permission_denied, io_error, generic_error methods.", "logger_config.py": "Provides logger.info with extra dict, log_error, log_security_event." }, "files_to_create": [ "coderef/analysis-cache/ directory (auto-created by mkdir)", "coderef/analysis-cache/analysis-{timestamp}.json (auto-created per tool invocation)" ], "files_to_modify": [ { "file": "tool_handlers.py", "location": "handle_analyze_project_for_planning function (lines 433-488)", "changes": "Add analysis cache directory creation, file save operation, metadata injection, error handling for file operations", "estimated_lines": "+20 lines (total function grows from ~55 to ~75 lines)" } ], "dependencies_identified": [ "pathlib.Path (already imported)", "json (already imported)", "datetime (need to import)", "logger from logger_config (already imported)", "ErrorResponse from error_responses (already imported)" ] }, "4_key_features": { "feature_1": { "name": "Analysis cache directory creation", "priority": "critical", "description": "Create coderef/analysis-cache/ directory using Path.mkdir(parents=True, exist_ok=True) pattern. Handles missing parent directories gracefully. No error if directory already exists.", "acceptance_criteria": "Directory created or already exists. No PermissionError raised if creation fails (handled gracefully).", "dependencies": [] }, "feature_2": { "name": "Timestamped file generation", "priority": "critical", "description": "Generate unique filename using datetime.now().strftime('%Y%m%d-%H%M%S') for timestamp. Format: analysis-{timestamp}.json. Prevents overwrites from multiple analyses.", "acceptance_criteria": "Filename includes YYYYMMDD-HHMMSS timestamp. Multiple analyses in same second get unique names (timestamp precision sufficient).", "dependencies": ["feature_1"] }, "feature_3": { "name": "JSON file persistence", "priority": "critical", "description": "Save analysis results to file using json.dump with indent=2 and UTF-8 encoding. Handle file write errors gracefully without breaking tool response.", "acceptance_criteria": "Analysis JSON saved to file. File contains valid JSON. Tool returns analysis data even if save fails. UTF-8 encoding used.", "dependencies": ["feature_1", "feature_2"] }, "feature_4": { "name": "Response metadata injection", "priority": "critical", "description": "Add _metadata key to returned analysis result with saved_to (relative path) and generated_at (ISO timestamp) fields. Allows AI to reference saved file.", "acceptance_criteria": "Response includes _metadata.saved_to and _metadata.generated_at. Path is relative to project root. Timestamp is ISO 8601 format.", "dependencies": ["feature_3"] }, "feature_5": { "name": "Error handling for file operations", "priority": "high", "description": "Wrap mkdir and file write in try-except for PermissionError, OSError. Log errors using log_security_event or log_error. Return ErrorResponse if critical failure. Tool degrades gracefully (returns data without file).", "acceptance_criteria": "PermissionError caught and logged. OSError caught and logged. Tool returns analysis data even if save fails. User gets helpful error message if file operations fail.", "dependencies": ["feature_3"] } }, "5_task_id_system": { "prefix": "AP", "description": "Analysis Persistence", "tasks": [ { "id": "AP-001", "description": "Import datetime module at top of tool_handlers.py for timestamp generation (if not already imported). Verify datetime.now() and strftime are available." }, { "id": "AP-002", "description": "Add analysis cache directory creation logic after analyzer.analyze() call. Use project_path_obj / 'coderef' / 'analysis-cache' pattern. Call mkdir(parents=True, exist_ok=True) with error handling." }, { "id": "AP-003", "description": "Generate timestamped filename using datetime.now().strftime('%Y%m%d-%H%M%S'). Format as analysis-{timestamp}.json. Construct full Path object for analysis file." }, { "id": "AP-004", "description": "Save analysis results to file using with open(analysis_file, 'w', encoding='utf-8') and json.dump(result, f, indent=2). Wrap in try-except for PermissionError and OSError." }, { "id": "AP-005", "description": "Add _metadata key to result dict with saved_to (relative path using analysis_file.relative_to(project_path_obj)) and generated_at (datetime.now().isoformat()) fields." }, { "id": "AP-006", "description": "Add logging for successful file save using logger.info with extra={'analysis_file': str(analysis_file), 'file_size_bytes': analysis_file.stat().st_size} metadata." }, { "id": "AP-007", "description": "Update error handling to catch PermissionError separately and call log_security_event('analysis_save_permission_denied', ...) before continuing with response (graceful degradation)." }, { "id": "AP-008", "description": "Write unit test for handle_analyze_project_for_planning that verifies analysis file is created, contains valid JSON, and response includes _metadata. Test both success and PermissionError cases." }, { "id": "AP-009", "description": "Write integration test that runs full analyze workflow and verifies coderef/analysis-cache/ directory exists with timestamped analysis file. Verify file can be read and parsed as JSON." }, { "id": "AP-010", "description": "Update CLAUDE.md Tool Catalog section for analyze_project_for_planning to document the new file persistence behavior and _metadata response fields." }, { "id": "AP-011", "description": "Update user-guide.md Planning Workflow section to mention that analysis is automatically saved to analysis-cache/ and can be referenced for plan regeneration." }, { "id": "AP-012", "description": "Add changelog entry for v1.2.2 documenting this enhancement with change_type='enhancement', severity='minor', and appropriate description of the workflow improvement." } ] }, "6_implementation_phases": { "phase_1": { "name": "Core Implementation", "description": "Implement file persistence logic in handle_analyze_project_for_planning function", "tasks": ["AP-001", "AP-002", "AP-003", "AP-004", "AP-005"], "deliverables": [ "Modified tool_handlers.py with analysis file saving", "Analysis files created in coderef/analysis-cache/", "Response includes _metadata with file path" ], "dependencies": [], "estimated_duration": "30-45 minutes", "complexity": "low", "effort_level": 2 }, "phase_2": { "name": "Error Handling and Logging", "description": "Add comprehensive error handling and structured logging for file operations", "tasks": ["AP-006", "AP-007"], "deliverables": [ "Graceful degradation on file save failures", "Security event logging for permission errors", "Success logging with file metadata" ], "dependencies": ["phase_1"], "estimated_duration": "15-20 minutes", "complexity": "low", "effort_level": 1 }, "phase_3": { "name": "Testing and Documentation", "description": "Write tests and update documentation to reflect new behavior", "tasks": ["AP-008", "AP-009", "AP-010", "AP-011", "AP-012"], "deliverables": [ "Unit test for file persistence", "Integration test for full workflow", "Updated CLAUDE.md and user-guide.md", "Changelog entry for v1.2.2" ], "dependencies": ["phase_1", "phase_2"], "estimated_duration": "30-40 minutes", "complexity": "low", "effort_level": 2 } }, "7_testing_strategy": { "unit_tests": [ { "test_name": "test_analyze_project_creates_analysis_file", "what_to_test": "Verify that handle_analyze_project_for_planning creates analysis file in coderef/analysis-cache/", "test_approach": "Call handler with valid project_path, check that file exists using Path.exists(), verify filename matches analysis-{timestamp}.json pattern", "expected_outcome": "Analysis file created with timestamped name in analysis-cache directory" }, { "test_name": "test_analysis_file_contains_valid_json", "what_to_test": "Verify saved analysis file is valid JSON and contains all expected keys", "test_approach": "Call handler, read saved file, parse with json.loads, assert keys match PreparationSummaryDict structure (foundation_docs, coding_standards, etc.)", "expected_outcome": "File contains valid JSON with all 7 required keys from analysis results" }, { "test_name": "test_response_includes_metadata", "what_to_test": "Verify response includes _metadata.saved_to and _metadata.generated_at fields", "test_approach": "Call handler, parse returned TextContent, assert _metadata key exists with saved_to (string) and generated_at (ISO timestamp) fields", "expected_outcome": "Response JSON includes _metadata with relative file path and ISO timestamp" }, { "test_name": "test_graceful_degradation_on_permission_error", "what_to_test": "Verify tool returns analysis data even when file save fails due to permissions", "test_approach": "Mock Path.mkdir or open() to raise PermissionError, call handler, assert response still includes analysis data (no _metadata but data present)", "expected_outcome": "Tool returns analysis results without _metadata. No exception raised. Permission error logged." } ], "integration_tests": [ { "test_name": "test_full_workflow_analysis_persistence", "scenario": "Run analyze_project_for_planning on docs-mcp project and verify complete workflow", "steps": [ "Call analyze_project_for_planning with docs-mcp project path", "Verify coderef/analysis-cache/ directory exists", "Verify analysis-{timestamp}.json file exists", "Read file and parse JSON", "Verify all expected analysis keys present", "Verify response includes _metadata.saved_to matching file path" ], "expected_result": "Complete workflow produces persisted analysis file that matches returned data" } ], "edge_cases": [ { "case": "Multiple analyses in rapid succession", "behavior": "Each analysis gets unique timestamp. Files don't overwrite each other. If same-second collision, timestamp precision (HHMMSS) should prevent overwrites in practice. Multiple files accumulate in analysis-cache/." }, { "case": "analysis-cache directory already exists", "behavior": "mkdir(exist_ok=True) succeeds without error. New analysis file created alongside existing files." }, { "case": "Project path is read-only", "behavior": "mkdir raises PermissionError. Caught and logged. Tool returns analysis data without _metadata. User sees helpful error message in logs." }, { "case": "Disk full during file write", "behavior": "open() or json.dump() raises OSError. Caught and logged. Tool returns analysis data without _metadata. User sees error in logs." }, { "case": "Analysis results contain non-ASCII characters", "behavior": "UTF-8 encoding handles international characters. File saves successfully. JSON parsing works correctly." }, { "case": "Project path contains spaces or special characters", "behavior": "pathlib handles special characters correctly. File path in _metadata.saved_to is properly escaped. Works on Windows and Unix." }, { "case": "Very large analysis results (>1MB JSON)", "behavior": "File save takes longer but completes. Performance test validates <100ms overhead. No errors from large data." }, { "case": "Concurrent analyses on same project", "behavior": "Each gets unique timestamp. No file locking issues (async MCP server handles concurrency). Files don't conflict." } ], "performance_targets": [ { "metric": "File save overhead", "target": "<100ms additional time beyond current analyze_project_for_planning duration", "measurement": "Time handler execution before and after implementation. Difference should be <100ms for typical 10-50KB analysis JSON." }, { "metric": "Disk space usage", "target": "~10-50KB per analysis file (negligible growth)", "measurement": "Check file sizes after integration test. Typical analysis JSON is small." } ] }, "8_success_criteria": { "must_haves": [ "Analysis file created in coderef/analysis-cache/analysis-{timestamp}.json", "File contains valid JSON matching analysis results", "Response includes _metadata.saved_to with relative path", "Response includes _metadata.generated_at with ISO timestamp", "Backward compatible - existing workflows still work", "PermissionError and OSError handled gracefully", "Tool returns analysis data even if file save fails", "Logging includes success and failure cases", "Unit tests pass for file creation and error handling", "Integration test validates full workflow", "Documentation updated in CLAUDE.md and user-guide.md", "Changelog entry added for v1.2.2" ], "nice_to_haves": [ "Analysis cache cleanup utility (future enhancement)", "Feature-specific analysis file routing (future enhancement)", "Custom output_path parameter (future enhancement)" ], "acceptance_criteria": [ "GIVEN a valid project path WHEN analyze_project_for_planning is called THEN analysis file is created in coderef/analysis-cache/", "GIVEN analysis file created WHEN response is returned THEN _metadata includes saved_to and generated_at", "GIVEN permission error during mkdir WHEN error is caught THEN tool returns analysis data and logs security event", "GIVEN disk full during file write WHEN error is caught THEN tool returns analysis data and logs I/O error", "GIVEN multiple analyses run WHEN each completes THEN unique timestamped files exist without overwrites", "GIVEN existing workflow using analyze WHEN feature is deployed THEN no breaking changes occur" ] }, "9_implementation_checklist": { "phase_1_checklist": [ "Import datetime at top of tool_handlers.py", "Add analysis_cache_dir creation after analyzer.analyze() call", "Generate timestamp using datetime.now().strftime('%Y%m%d-%H%M%S')", "Construct analysis file path: analysis_cache_dir / f'analysis-{timestamp}.json'", "Save result to file using json.dump with indent=2 and UTF-8", "Add _metadata key with saved_to and generated_at to result", "Verify code compiles without syntax errors", "Manually test: run analyze_project_for_planning and check file created" ], "phase_2_checklist": [ "Wrap mkdir in try-except for PermissionError", "Wrap file write in try-except for PermissionError and OSError", "Add logger.info for successful save with file metadata", "Add log_security_event for PermissionError", "Add log_error for OSError", "Verify graceful degradation: tool returns data even if save fails", "Manually test: simulate permission error and verify logging" ], "phase_3_checklist": [ "Write test_analyze_project_creates_analysis_file unit test", "Write test_analysis_file_contains_valid_json unit test", "Write test_response_includes_metadata unit test", "Write test_graceful_degradation_on_permission_error unit test", "Write test_full_workflow_analysis_persistence integration test", "Run all tests and verify they pass", "Update CLAUDE.md Tool Catalog with new behavior", "Update user-guide.md Planning Workflow section", "Add changelog entry for v1.2.2", "Commit all changes with descriptive message", "Verify MCP server restart loads changes correctly" ] } } }

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/srwlli/docs-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server