Skip to main content
Glama

Documentation Generator MCP Server

by srwlli
server.py32.6 kB
#!/usr/bin/env python3 """ Documentation Generation MCP Server Provides tools for generating project documentation using POWER framework templates. """ __version__ = "2.0.0" __schema_version__ = "1.0.0" __mcp_version__ = "1.0" import asyncio from pathlib import Path from mcp.server import Server from mcp.types import Tool, TextContent from mcp.server.stdio import stdio_server # Import generators from generators import FoundationGenerator, BaseGenerator import json import jsonschema # Import constants (REF-002) from constants import Paths, Files # Import validation functions (REF-003) from validation import ( validate_project_path_input, validate_version_format, validate_template_name_input, validate_changelog_inputs, VALID_TEMPLATE_SECTIONS ) # Import error response factory (ARCH-001) from error_responses import ErrorResponse # Import tool handlers (QUA-002) import tool_handlers # Import logging (ARCH-003) from logger_config import logger, log_tool_call # Get server directory SERVER_DIR = Path(__file__).parent TEMPLATES_DIR = SERVER_DIR / Paths.TEMPLATES_DIR # Initialize tool handlers with TEMPLATES_DIR tool_handlers.set_templates_dir(TEMPLATES_DIR) # Create MCP server app = Server("docs-mcp") # Log server initialization logger.info(f"MCP server starting", extra={'version': __version__, 'mcp_version': __mcp_version__}) @app.list_tools() async def list_tools() -> list[Tool]: """List available documentation tools.""" return [ Tool( name="list_templates", description="Lists all available documentation templates (README, ARCHITECTURE, API, COMPONENTS, SCHEMA)", inputSchema={ "type": "object", "properties": {}, "required": [] } ), Tool( name="get_template", description="Retrieves the content of a specific documentation template", inputSchema={ "type": "object", "properties": { "template_name": { "type": "string", "description": "Name of template: readme, architecture, api, components, my-guide, schema, or user-guide", "enum": ["readme", "architecture", "api", "components", "my-guide", "schema", "user-guide"] } }, "required": ["template_name"] } ), Tool( name="generate_foundation_docs", description="Generate foundation documentation (README, ARCHITECTURE, API, COMPONENTS, SCHEMA) for a project. Returns templates and generation plan - Claude will generate and save the actual documents.", inputSchema={ "type": "object", "properties": { "project_path": { "type": "string", "description": "Absolute path to the project directory" } }, "required": ["project_path"] } ), Tool( name="generate_individual_doc", description="Generate a single individual documentation file for a project. Returns the template - Claude will generate and save the document.", inputSchema={ "type": "object", "properties": { "project_path": { "type": "string", "description": "Absolute path to the project directory" }, "template_name": { "type": "string", "description": "Name of template to generate", "enum": ["readme", "architecture", "api", "components", "my-guide", "schema", "user-guide"] } }, "required": ["project_path", "template_name"] } ), Tool( name="get_changelog", description="Get project changelog with structured change history for agent context. Returns all changes or filtered by version/type.", inputSchema={ "type": "object", "properties": { "project_path": { "type": "string", "description": "Absolute path to project directory" }, "version": { "type": "string", "description": "Optional: Get specific version (e.g., '1.0.1'). Omit for all versions." }, "change_type": { "type": "string", "enum": ["bugfix", "enhancement", "feature", "breaking_change", "deprecation", "security"], "description": "Optional: Filter by change type" }, "breaking_only": { "type": "boolean", "description": "Optional: Show only breaking changes" } }, "required": ["project_path"] } ), Tool( name="add_changelog_entry", description="Add a new entry to the project changelog. Requires all change details including version, type, title, description, files, reason, and impact.", inputSchema={ "type": "object", "properties": { "project_path": { "type": "string", "description": "Absolute path to project directory" }, "version": { "type": "string", "description": "Version number (e.g., '1.0.2')", "pattern": "^[0-9]+\\.[0-9]+\\.[0-9]+$" }, "change_type": { "type": "string", "enum": ["bugfix", "enhancement", "feature", "breaking_change", "deprecation", "security"], "description": "Type of change" }, "severity": { "type": "string", "enum": ["critical", "major", "minor", "patch"], "description": "Severity level" }, "title": { "type": "string", "description": "Short title of the change" }, "description": { "type": "string", "description": "Detailed description of what changed" }, "files": { "type": "array", "items": {"type": "string"}, "description": "List of files affected" }, "reason": { "type": "string", "description": "Why this change was made" }, "impact": { "type": "string", "description": "Impact on users/system" }, "breaking": { "type": "boolean", "description": "Whether this is a breaking change", "default": False }, "migration": { "type": "string", "description": "Migration guide (if breaking)" }, "summary": { "type": "string", "description": "Version summary (for new versions)" }, "contributors": { "type": "array", "items": {"type": "string"}, "description": "List of contributors" } }, "required": ["project_path", "version", "change_type", "severity", "title", "description", "files", "reason", "impact"] } ), Tool( name="update_changelog", description="Agentic workflow tool that instructs the agent to analyze their recent changes and update the changelog using context. Agent reviews modified files, determines change details, and calls add_changelog_entry.", inputSchema={ "type": "object", "properties": { "project_path": { "type": "string", "description": "Absolute path to project directory" }, "version": { "type": "string", "description": "Version number for this change (e.g., '1.0.3')", "pattern": "^[0-9]+\\.[0-9]+\\.[0-9]+$" } }, "required": ["project_path", "version"] } ), Tool( name="generate_quickref_interactive", description="Interactive workflow to generate a universal quickref guide for ANY application (CLI, Web, API, Desktop, Library). Returns interview questions for AI to ask user. AI guides conversation, user answers in plain English, then AI generates scannable quickref.md (150-250 lines) following proven pattern.", inputSchema={ "type": "object", "properties": { "project_path": { "type": "string", "description": "Absolute path to project directory where quickref.md will be saved" }, "app_type": { "type": "string", "enum": ["cli", "web", "api", "desktop", "library"], "description": "Optional: Type of application (can be inferred from user responses)" } }, "required": ["project_path"] } ), Tool( name="establish_standards", description="Scan codebase to discover UI/UX/behavior patterns and generate standards documentation. Creates 4 markdown files in coderef/standards/. Run ONCE per project to establish baseline standards for consistency validation (Tools #9 and #10).", inputSchema={ "type": "object", "properties": { "project_path": { "type": "string", "description": "Absolute path to project directory" }, "scan_depth": { "type": "string", "enum": ["quick", "standard", "deep"], "description": "Analysis depth: quick (common patterns, ~1-2 min), standard (comprehensive, ~3-5 min), deep (exhaustive, ~10-15 min)", "default": "standard" }, "focus_areas": { "type": "array", "items": { "type": "string", "enum": ["ui_components", "behavior_patterns", "ux_flows", "all"] }, "description": "Areas to analyze: ui_components (buttons, modals, forms), behavior_patterns (errors, loading), ux_flows (navigation, permissions), or all", "default": ["all"] } }, "required": ["project_path"] } ), Tool( name="audit_codebase", description="Audit codebase for standards violations using established standards documents. Scans all source files, compares against standards, and generates comprehensive compliance report with violations, severity levels, and fix suggestions.", inputSchema={ "type": "object", "properties": { "project_path": { "type": "string", "description": "Absolute path to project directory" }, "standards_dir": { "type": "string", "description": "Path to standards directory (relative to project root)", "default": "coderef/standards" }, "severity_filter": { "type": "string", "enum": ["critical", "major", "minor", "all"], "description": "Filter violations by severity level", "default": "all" }, "scope": { "type": "array", "items": { "type": "string", "enum": ["ui_patterns", "behavior_patterns", "ux_patterns", "all"] }, "description": "Which areas to audit: ui_patterns, behavior_patterns, ux_patterns, or all", "default": ["all"] }, "generate_fixes": { "type": "boolean", "description": "Include automated fix suggestions in report", "default": True } }, "required": ["project_path"] } ), Tool( name="check_consistency", description="Check code changes against established standards for consistency violations. Lightweight quality gate for pre-commit checks and CI/CD pipelines. Only scans modified files. Auto-detects changes via git or accepts explicit file list.", inputSchema={ "type": "object", "properties": { "project_path": { "type": "string", "description": "Absolute path to project directory" }, "files": { "type": "array", "items": {"type": "string"}, "description": "List of files to check (relative to project_path). If not provided, auto-detects git changes (staged files by default)." }, "standards_dir": { "type": "string", "description": "Path to standards directory (relative to project root)", "default": "coderef/standards" }, "severity_threshold": { "type": "string", "enum": ["critical", "major", "minor"], "description": "Fail if violations at or above this severity are found. 'critical'=only critical, 'major'=critical+major, 'minor'=all violations", "default": "major" }, "scope": { "type": "array", "items": { "type": "string", "enum": ["ui_patterns", "behavior_patterns", "ux_patterns", "all"] }, "description": "Which standards to check against. 'all' checks UI, behavior, and UX patterns.", "default": ["all"] }, "fail_on_violations": { "type": "boolean", "description": "Return error status (exit code 1) if violations found. Set false for reporting only.", "default": True } }, "required": ["project_path"] } ), Tool( name='get_planning_template', description='Returns feature-implementation-planning-standard.json template content or specific sections for AI reference during implementation planning', inputSchema={ 'type': 'object', 'properties': { 'section': { 'type': 'string', 'enum': VALID_TEMPLATE_SECTIONS, 'description': 'Which section of the template to return (default: all)', 'default': 'all' } }, 'required': [] } ), Tool( name='analyze_project_for_planning', description='Analyzes project to discover foundation docs, coding standards, reference components, and patterns - automates section 0 (Preparation) of implementation plans. Optionally saves analysis to feature folder. Reduces prep time from 30-60 minutes to 30-60 seconds.', inputSchema={ 'type': 'object', 'properties': { 'project_path': { 'type': 'string', 'description': 'Absolute path to project directory to analyze' }, 'feature_name': { 'type': 'string', 'description': 'Optional: Feature name for saving analysis to coderef/working/{feature_name}/analysis.json. If omitted, analysis is returned without saving.', 'pattern': '^[a-zA-Z0-9_-]+$' } }, 'required': ['project_path'] } ), Tool( name='validate_implementation_plan', description='Validates implementation plan JSON against feature-implementation-planning-standard.json quality checklist. Scores plan 0-100 based on completeness, quality, and autonomy. Identifies issues by severity (critical/major/minor) with specific fix suggestions. Enables iterative review loop - AI validates plan, refines based on feedback, re-validates until score >= 85 before presenting to user.', inputSchema={ 'type': 'object', 'properties': { 'project_path': { 'type': 'string', 'description': 'Absolute path to project directory containing the plan file' }, 'plan_file_path': { 'type': 'string', 'description': 'Relative path to plan JSON file within project (e.g., feature-auth-plan.json)' } }, 'required': ['project_path', 'plan_file_path'] } ), Tool( name='generate_plan_review_report', description='Transforms validation results from validate_implementation_plan into human-readable markdown review reports. Creates comprehensive report with score, grade, issue breakdown by severity, recommendations, and approval status. Saves to coderef/reviews/ directory.', inputSchema={ 'type': 'object', 'properties': { 'project_path': { 'type': 'string', 'description': 'Absolute path to project directory' }, 'plan_file_path': { 'type': 'string', 'description': 'Relative path to plan JSON file within project (e.g., feature-auth-plan.json)' }, 'output_path': { 'type': 'string', 'description': 'Optional: Custom output path for review report (relative to project root). Default: coderef/reviews/review-{planname}-{timestamp}.md' } }, 'required': ['project_path', 'plan_file_path'] } ), Tool( name='create_plan', description='Create implementation plan by synthesizing context, analysis, and template. Generates complete 10-section plan.json file in batch mode. Saves partial plan with TODOs if generation fails. Bridges workflow gap between analyze-for-planning and validate-plan.', inputSchema={ 'type': 'object', 'properties': { 'project_path': { 'type': 'string', 'description': 'Absolute path to project directory' }, 'feature_name': { 'type': 'string', 'description': 'Feature name (alphanumeric, hyphens, underscores only). Max 100 characters.', 'pattern': '^[a-zA-Z0-9_-]+$' } }, 'required': ['project_path', 'feature_name'] } ), Tool( name='inventory_manifest', description='Generate comprehensive project file inventory manifest. Creates detailed catalog of all project files with metadata (size, lines, category, risk level, dependencies), categorizes files using universal taxonomy, calculates project metrics, and saves manifest to coderef/inventory/manifest.json.', inputSchema={ 'type': 'object', 'properties': { 'project_path': { 'type': 'string', 'description': 'Absolute path to project directory to inventory' }, 'analysis_depth': { 'type': 'string', 'enum': ['quick', 'standard', 'deep'], 'description': 'Analysis depth: quick (basic metadata only), standard (+ categorization & basic dependencies), deep (+ full dependency parsing). Default: standard', 'default': 'standard' }, 'exclude_dirs': { 'type': 'array', 'items': {'type': 'string'}, 'description': 'Optional: List of directory names to exclude (e.g., node_modules, .git). Default: common exclusions' }, 'max_file_size': { 'type': 'integer', 'description': 'Optional: Maximum file size to process in bytes. Default: 10MB', 'minimum': 0 } }, 'required': ['project_path'] } ), Tool( name='dependency_inventory', description='Analyze project dependencies across multiple ecosystems (npm, pip, cargo, composer). Scans for security vulnerabilities via OSV API, checks for outdated packages, detects licenses, and generates comprehensive dependency manifest with metrics. Saves to coderef/inventory/dependencies.json.', inputSchema={ 'type': 'object', 'properties': { 'project_path': { 'type': 'string', 'description': 'Absolute path to project directory to analyze' }, 'scan_security': { 'type': 'boolean', 'description': 'Whether to scan for security vulnerabilities and check latest versions. Default: true', 'default': True }, 'ecosystems': { 'type': 'array', 'items': { 'type': 'string', 'enum': ['npm', 'pip', 'cargo', 'composer', 'all'] }, 'description': 'Which package ecosystems to analyze. Default: all detected ecosystems', 'default': ['all'] }, 'include_transitive': { 'type': 'boolean', 'description': 'Whether to include transitive (indirect) dependencies. Default: false', 'default': False } }, 'required': ['project_path'] } ), Tool( name='api_inventory', description='Discover API endpoints across multiple frameworks (FastAPI, Flask, Express, GraphQL). Extracts REST/GraphQL endpoints from source code using AST parsing, parses OpenAPI/Swagger documentation, calculates documentation coverage, and generates comprehensive API manifest. Saves to coderef/inventory/api.json.', inputSchema={ 'type': 'object', 'properties': { 'project_path': { 'type': 'string', 'description': 'Absolute path to project directory to analyze' }, 'frameworks': { 'type': 'array', 'items': { 'type': 'string', 'enum': ['fastapi', 'flask', 'express', 'graphql', 'all'] }, 'description': 'Which API frameworks to detect. Default: all supported frameworks', 'default': ['all'] }, 'include_graphql': { 'type': 'boolean', 'description': 'Whether to parse GraphQL schemas. Default: false', 'default': False }, 'scan_documentation': { 'type': 'boolean', 'description': 'Whether to scan for OpenAPI/Swagger documentation files. Default: true', 'default': True } }, 'required': ['project_path'] } ), Tool( name='database_inventory', description='Discover database schemas across multiple systems (PostgreSQL, MySQL, MongoDB, SQLite). Extracts table/collection definitions from ORM models (SQLAlchemy, Sequelize, Mongoose) and migration files (Alembic, Knex), parses column/field metadata with relationships, and generates comprehensive database manifest. Saves to coderef/inventory/database.json.', inputSchema={ 'type': 'object', 'properties': { 'project_path': { 'type': 'string', 'description': 'Absolute path to project directory to analyze' }, 'database_systems': { 'type': 'array', 'items': { 'type': 'string', 'enum': ['postgresql', 'mysql', 'mongodb', 'sqlite', 'all'] }, 'description': 'Which database systems to detect. Default: all supported systems', 'default': ['all'] }, 'include_migrations': { 'type': 'boolean', 'description': 'Whether to parse migration files for schema definitions. Default: true', 'default': True } }, 'required': ['project_path'] } ), Tool( name='config_inventory', description='Discover and analyze configuration files across multiple formats (JSON, YAML, TOML, INI, ENV). Detects sensitive values (API keys, passwords, tokens), automatically masks them with [REDACTED], and generates comprehensive configuration manifest with security logging. Saves to coderef/inventory/config.json.', inputSchema={ 'type': 'object', 'properties': { 'project_path': { 'type': 'string', 'description': 'Absolute path to project directory to analyze' }, 'formats': { 'type': 'array', 'items': { 'type': 'string', 'enum': ['json', 'yaml', 'toml', 'ini', 'env', 'all'] }, 'description': 'Which configuration formats to analyze. Default: all formats', 'default': ['all'] }, 'mask_sensitive': { 'type': 'boolean', 'description': 'Whether to mask sensitive values with [REDACTED]. Default: true', 'default': True } }, 'required': ['project_path'] } ), Tool( name='test_inventory', description='Discover test files, detect test frameworks (pytest, unittest, jest, mocha, vitest), analyze coverage metrics if available, and identify untested source files. Generates comprehensive test infrastructure manifest. Saves to coderef/inventory/tests.json.', inputSchema={ 'type': 'object', 'properties': { 'project_path': { 'type': 'string', 'description': 'Absolute path to project directory to analyze' }, 'frameworks': { 'type': 'array', 'items': { 'type': 'string', 'enum': ['pytest', 'unittest', 'jest', 'mocha', 'vitest', 'all'] }, 'description': 'Which test frameworks to detect. Default: all frameworks', 'default': ['all'] }, 'include_coverage': { 'type': 'boolean', 'description': 'Whether to analyze coverage data if available (.coverage, coverage.json, lcov.info). Default: true', 'default': True } }, 'required': ['project_path'] } ), Tool( name='documentation_inventory', description='Discover documentation files across multiple formats (Markdown, RST, AsciiDoc, HTML, Org-mode), analyze quality metrics (freshness, completeness, coverage), and generate comprehensive documentation manifest. Saves to coderef/inventory/documentation.json.', inputSchema={ 'type': 'object', 'properties': { 'project_path': { 'type': 'string', 'description': 'Absolute path to project directory to analyze' } }, 'required': ['project_path'] } ) ] @app.call_tool() async def call_tool(name: str, arguments: dict) -> list[TextContent]: """ Handle tool calls by dispatching to registered handlers (QUA-002). This function now uses a registry pattern for clean separation of concerns. Each tool has its own handler function in tool_handlers.py for better testability and maintainability. """ # Log tool invocation (ARCH-003) log_tool_call(name, args_keys=list(arguments.keys())) handler = tool_handlers.TOOL_HANDLERS.get(name) if not handler: logger.error(f"Unknown tool requested: {name}") raise ValueError(f"Unknown tool: {name}") return await handler(arguments) async def main() -> None: """Run the server using stdio transport.""" logger.info("Starting MCP server main loop") try: async with stdio_server() as (read_stream, write_stream): await app.run( read_stream, write_stream, app.create_initialization_options() ) except Exception as e: logger.error(f"Server error: {str(e)}", exc_info=True) raise if __name__ == "__main__": asyncio.run(main())

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/srwlli/docs-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server