Skip to main content
Glama

analyze_repository

Analyze repository structure, dependencies, and documentation needs to identify gaps and generate recommendations for documentation deployment.

Instructions

Analyze repository structure, dependencies, and documentation needs

Input Schema

TableJSON Schema
NameRequiredDescriptionDefault
pathYesPath to the repository to analyze
depthNostandard

Implementation Reference

  • Main handler function for the 'analyze_repository' MCP tool. Parses input with Zod schema, performs multi-phase analysis (structure, deps, docs, recommendations), integrates with Knowledge Graph, and returns formatted MCP response.
    export async function analyzeRepository( args: unknown, context?: any, ): Promise<{ content: any[]; isError?: boolean }> { const startTime = Date.now(); const { path: repoPath, depth } = inputSchema.parse(args); try { // Report initial progress if (context?.meta?.progressToken) { await context.meta.reportProgress?.({ progress: 0, total: 100, }); } await context?.info?.("šŸ” Starting repository analysis..."); // Verify path exists and is accessible await context?.info?.(`šŸ“‚ Verifying access to ${repoPath}...`); await fs.access(repoPath, fs.constants.R_OK); // Try to read the directory to catch permission issues early try { await fs.readdir(repoPath); } catch (error: any) { if (error.code === "EACCES" || error.code === "EPERM") { throw new Error(`Permission denied: Cannot read directory ${repoPath}`); } throw error; } if (context?.meta?.progressToken) { await context.meta.reportProgress?.({ progress: 10, total: 100, }); } // Phase 1.2: Get historical context from Knowledge Graph await context?.info?.( "šŸ“Š Retrieving historical context from Knowledge Graph...", ); let projectContext; try { projectContext = await getProjectContext(repoPath); if (projectContext.previousAnalyses > 0) { await context?.info?.( `✨ Found ${projectContext.previousAnalyses} previous analysis(es) of this project`, ); } } catch (error) { console.warn("Failed to retrieve project context:", error); projectContext = { previousAnalyses: 0, lastAnalyzed: null, knownTechnologies: [], similarProjects: [], }; } if (context?.meta?.progressToken) { await context.meta.reportProgress?.({ progress: 20, total: 100, }); } await context?.info?.("šŸ”Ž Analyzing repository structure..."); const structure = await analyzeStructure(repoPath, depth); if (context?.meta?.progressToken) { await context.meta.reportProgress?.({ progress: 40, total: 100, }); } await context?.info?.("šŸ“¦ Analyzing dependencies..."); const dependencies = await analyzeDependencies(repoPath); if (context?.meta?.progressToken) { await context.meta.reportProgress?.({ progress: 60, total: 100, }); } await context?.info?.("šŸ“ Analyzing documentation..."); const documentation = await analyzeDocumentation(repoPath); if (context?.meta?.progressToken) { await context.meta.reportProgress?.({ progress: 75, total: 100, }); } await context?.info?.("šŸ’” Generating recommendations..."); const recommendations = await generateRecommendations(repoPath); const analysis: RepositoryAnalysis = { id: generateAnalysisId(), timestamp: new Date().toISOString(), path: repoPath, structure, dependencies, documentation, recommendations, }; // Phase 1.2: Store project in Knowledge Graph if (context?.meta?.progressToken) { await context.meta.reportProgress?.({ progress: 85, total: 100, }); } await context?.info?.("šŸ’¾ Storing analysis in Knowledge Graph..."); try { await createOrUpdateProject(analysis); } catch (error) { console.warn("Failed to store project in Knowledge Graph:", error); } if (context?.meta?.progressToken) { await context.meta.reportProgress?.({ progress: 90, total: 100, }); } // Phase 1.3: Get intelligent analysis enrichment await context?.info?.("🧠 Enriching analysis with historical insights..."); let intelligentAnalysis; let documentationHealth; try { const { getProjectInsights, getSimilarProjects } = await import( "../memory/index.js" ); const { getKnowledgeGraph } = await import("../memory/kg-integration.js"); const insights = await getProjectInsights(repoPath); const similar = await getSimilarProjects(analysis, 5); // Check documentation health from KG try { const kg = await getKnowledgeGraph(); const allEdges = await kg.getAllEdges(); // Find outdated documentation const outdatedEdges = allEdges.filter((e) => e.type === "outdated_for"); // Find documentation coverage const documentsEdges = allEdges.filter((e) => e.type === "documents"); const totalCodeFiles = allEdges.filter( (e) => e.type === "depends_on" && e.target.startsWith("code_file:"), ).length; const documentedFiles = new Set(documentsEdges.map((e) => e.source)) .size; const coveragePercent = totalCodeFiles > 0 ? Math.round((documentedFiles / totalCodeFiles) * 100) : 0; documentationHealth = { outdatedCount: outdatedEdges.length, coveragePercent, totalCodeFiles, documentedFiles, }; } catch (error) { console.warn("Failed to calculate documentation health:", error); } intelligentAnalysis = { insights, similarProjects: similar.slice(0, 3).map((p: any) => ({ path: p.projectPath, similarity: Math.round((p.similarity || 0) * 100) + "%", technologies: p.technologies?.join(", ") || "unknown", })), ...(documentationHealth && { documentationHealth }), recommendations: [ // Documentation health recommendations ...(documentationHealth && documentationHealth.outdatedCount > 0 ? [ `${documentationHealth.outdatedCount} documentation section(s) may be outdated - code has changed since docs were updated`, ] : []), ...(documentationHealth && documentationHealth.coveragePercent < 50 && documentationHealth.totalCodeFiles > 0 ? [ `Documentation covers only ${documentationHealth.coveragePercent}% of code files - consider documenting more`, ] : []), // Only suggest creating README if it truly doesn't exist // Don't suggest improvements yet - that requires deeper analysis ...(analysis.documentation.hasReadme ? [] : ["Consider creating a README.md for project documentation"]), // Only suggest docs structure if no docs folder exists at all ...(analysis.structure.hasDocs ? [] : analysis.documentation.existingDocs.length === 0 ? [ "Consider setting up documentation structure using Diataxis framework", ] : []), // Infrastructure recommendations are safe as they're objective ...(analysis.structure.hasTests ? [] : ["Consider adding test coverage to improve reliability"]), ...(analysis.structure.hasCI ? [] : ["Consider setting up CI/CD pipeline for automation"]), ], }; } catch (error) { console.warn("Failed to get intelligent analysis:", error); } // Enhance response with historical context const contextInfo: string[] = []; if (projectContext.previousAnalyses > 0) { contextInfo.push( `šŸ“Š Previously analyzed ${projectContext.previousAnalyses} time(s)`, ); if (projectContext.lastAnalyzed) { const lastDate = new Date( projectContext.lastAnalyzed, ).toLocaleDateString(); contextInfo.push(`šŸ“… Last analyzed: ${lastDate}`); } } if (projectContext.knownTechnologies.length > 0) { contextInfo.push( `šŸ’” Known technologies: ${projectContext.knownTechnologies.join(", ")}`, ); } if (projectContext.similarProjects.length > 0) { contextInfo.push( `šŸ”— Found ${projectContext.similarProjects.length} similar project(s) in knowledge graph`, ); } if (context?.meta?.progressToken) { await context.meta.reportProgress?.({ progress: 100, total: 100, }); } const executionTime = Date.now() - startTime; await context?.info?.( `āœ… Analysis complete! Processed ${ analysis.structure.totalFiles } files in ${Math.round(executionTime / 1000)}s`, ); const response: MCPToolResponse<RepositoryAnalysis> = { success: true, data: analysis, metadata: { toolVersion: "1.0.0", executionTime, timestamp: new Date().toISOString(), analysisId: analysis.id, ...(intelligentAnalysis && { intelligentAnalysis }), }, recommendations: [ { type: "info", title: "Analysis Complete", description: `Successfully analyzed ${analysis.structure.totalFiles} files across ${analysis.structure.totalDirectories} directories`, }, ...(contextInfo.length > 0 ? [ { type: "info" as const, title: "Historical Context", description: contextInfo.join("\n"), }, ] : []), ...(intelligentAnalysis?.recommendations && intelligentAnalysis.recommendations.length > 0 ? [ { type: "info" as const, title: "AI Recommendations", description: intelligentAnalysis.recommendations.join("\n• "), }, ] : []), ...(intelligentAnalysis?.similarProjects && intelligentAnalysis.similarProjects.length > 0 ? [ { type: "info" as const, title: "Similar Projects", description: intelligentAnalysis.similarProjects .map( (p: any) => `${p.path} (${p.similarity} similar, ${p.technologies})`, ) .join("\n"), }, ] : []), ], nextSteps: [ ...(analysis.documentation.hasReadme ? [ { action: "Analyze README Quality", toolRequired: "analyze_readme", description: "Evaluate README completeness and suggest improvements", priority: "medium" as const, }, ] : []), { action: "Get SSG Recommendation", toolRequired: "recommend_ssg", description: `Use analysis ID: ${analysis.id}`, priority: "high", }, ], }; return formatMCPResponse(response); } catch (error) { const errorResponse: MCPToolResponse = { success: false, error: { code: "ANALYSIS_FAILED", message: `Failed to analyze repository: ${error}`, resolution: "Ensure the repository path exists and is accessible", }, metadata: { toolVersion: "1.0.0", executionTime: Date.now() - startTime, timestamp: new Date().toISOString(), }, }; return formatMCPResponse(errorResponse); } }
  • Zod input schema validation for the tool parameters: repository path and optional analysis depth.
    const inputSchema = z.object({ path: z.string(), depth: z.enum(["quick", "standard", "deep"]).optional().default("standard"), });
  • Helper function that analyzes repository structure: file counts, languages, presence of tests/CI/docs.
    async function analyzeStructure( repoPath: string, depth: "quick" | "standard" | "deep", ): Promise<RepositoryAnalysis["structure"]> { const stats = { totalFiles: 0, totalDirectories: 0, languages: {} as Record<string, number>, hasTests: false, hasCI: false, hasDocs: false, }; const maxDepth = depth === "quick" ? 2 : depth === "standard" ? 5 : 10; async function walkDirectory( dirPath: string, currentDepth: number = 0, ): Promise<void> { if (currentDepth > maxDepth) return; try { const entries = await fs.readdir(dirPath, { withFileTypes: true }); for (const entry of entries) { const fullPath = path.join(dirPath, entry.name); if (entry.isDirectory()) { stats.totalDirectories++; // Check for special directories if ( entry.name.includes("test") || entry.name.includes("spec") || entry.name === "__tests__" ) { stats.hasTests = true; } if ( entry.name === ".github" || entry.name === ".gitlab-ci" || entry.name === ".circleci" ) { stats.hasCI = true; } if ( entry.name === "docs" || entry.name === "documentation" || entry.name === "doc" ) { stats.hasDocs = true; } // Skip node_modules and other common ignored directories if ( ![ "node_modules", ".git", "dist", "build", ".next", ".nuxt", ].includes(entry.name) ) { await walkDirectory(fullPath, currentDepth + 1); } } else if (entry.isFile()) { // Skip hidden files (starting with .) if (!entry.name.startsWith(".")) { stats.totalFiles++; // Track languages by file extension const ext = path.extname(entry.name).toLowerCase(); if (ext && getLanguageFromExtension(ext)) { stats.languages[ext] = (stats.languages[ext] || 0) + 1; } // Check for CI files if ( entry.name.match(/\.(yml|yaml)$/) && entry.name.includes("ci") ) { stats.hasCI = true; } // Check for test files if (entry.name.includes("test") || entry.name.includes("spec")) { stats.hasTests = true; } } } } } catch (error) { // Skip directories we can't read } } await walkDirectory(repoPath); return stats; }
  • Supporting helper functions for dependency analysis, documentation assessment, recommendations, language detection, and ID generation.
    }
  • TypeScript interface defining the structure of the analysis output returned by the tool.
    export interface RepositoryAnalysis { id: string; timestamp: string; path: string; structure: { totalFiles: number; totalDirectories: number; languages: Record<string, number>; hasTests: boolean; hasCI: boolean; hasDocs: boolean; }; dependencies: { ecosystem: "javascript" | "python" | "ruby" | "go" | "unknown"; packages: string[]; devPackages: string[]; }; documentation: { hasReadme: boolean; hasContributing: boolean; hasLicense: boolean; existingDocs: string[]; estimatedComplexity: "simple" | "moderate" | "complex"; extractedContent?: ExtractedContent; }; recommendations: { primaryLanguage: string; projectType: string; teamSize: "solo" | "small" | "medium" | "large"; }; }

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/tosin2013/documcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server