generate_insights_report
Aggregate codebase health metrics into a narrative snapshot—risk hotspots, bottlenecks, and gap counts—in a single call to quickly orient without chaining multiple tools.
Instructions
Single-call narrative health snapshot: god files (PageRank), architectural bridges (edge bottlenecks), risk hotspots (complexity × churn), edge resolution-tier breakdown, and gap counts (dead exports, untested, cycles). Aggregates already-computed metrics into ~2K tokens of Markdown plus a structured payload. Use at the start of a session to orient yourself instead of chaining get_pagerank + get_risk_hotspots + get_edge_bottlenecks + self_audit. Read-only. Returns JSON: { generated_at, totals, resolution_tiers, god_files, bridges, hotspots, gaps, markdown }.
Input Schema
| Name | Required | Description | Default |
|---|---|---|---|
| top_n | No | Items per section (default: 5) |
Implementation Reference
- Main handler function `generateInsightsReport` — aggregates PageRank (god files), edge bottlenecks (architectural bridges), risk hotspots (complexity × churn), self-audit gaps, and edge resolution tiers into a structured InsightsReport with a Markdown rendering. Called by both the MCP tool registration and the project-insights resource.
export function generateInsightsReport( store: Store, opts: GenerateOptions = {}, ): TraceMcpResult<InsightsReport> { const topN = opts.topN ?? 5; const cwd = opts.cwd ?? process.cwd(); const stats = store.getStats(); const resolution = countResolutionTiers(store); const pageRank: PageRankResult[] = getPageRank(store).slice(0, topN); const godFiles = pageRank.map((p) => ({ file: p.file, score: p.score, in_degree: p.in_degree, out_degree: p.out_degree, })); const bottlenecksResult = getEdgeBottlenecks(store, { topN, sampling: 'auto' }); const bridges: InsightsReport['bridges'] = bottlenecksResult.isOk() ? bottlenecksResult.value.edges.slice(0, topN).map((e: EdgeBottleneck) => ({ source: e.sourceFile, target: e.targetFile, bottleneck_score: e.bottleneckScore, is_bridge: e.isBridge, co_change_weight: e.coChangeWeight, })) : []; const hotspotEntries = getHotspots(store, cwd, { limit: topN }).slice(0, topN); const hotspots = hotspotEntries.map((h) => ({ file: h.file, score: h.score, max_cyclomatic: h.max_cyclomatic, commits: h.commits, })); const audit = selfAudit(store); const gaps: InsightsReport['gaps'] = { dead_exports: audit.summary.dead_exports, untested_exports: audit.summary.untested_exports, dependency_cycles: audit.summary.dependency_cycles, unstable_modules: audit.summary.unstable_modules, dead_exports_examples: audit.dead_exports_top10.slice(0, topN).map((d) => ({ name: d.name, file: d.file, })), untested_examples: audit.untested_top10.slice(0, topN).map((u) => ({ name: u.name, file: u.file, })), }; const base: Omit<InsightsReport, 'markdown'> = { generated_at: new Date().toISOString(), totals: { files: stats.totalFiles, symbols: stats.totalSymbols, edges: stats.totalEdges, }, resolution_tiers: resolution, god_files: godFiles, bridges, hotspots, gaps, }; return ok({ ...base, markdown: renderMarkdown(base) }); } - `InsightsReport` interface — output schema defining the structured payload: generated_at, totals, resolution_tiers, god_files, bridges, hotspots, gaps, and markdown.
export interface InsightsReport { generated_at: string; totals: { files: number; symbols: number; edges: number; }; resolution_tiers: { lsp_resolved: number; ast_resolved: number; ast_inferred: number; text_matched: number; /** % of edges that are text_matched — high values mean noisy analysis */ text_matched_pct: number; }; god_files: { file: string; score: number; in_degree: number; out_degree: number }[]; bridges: { source: string; target: string; bottleneck_score: number; is_bridge: boolean; co_change_weight: number; }[]; hotspots: { file: string; score: number; max_cyclomatic: number; commits: number }[]; gaps: { dead_exports: number; untested_exports: number; dependency_cycles: number; unstable_modules: number; dead_exports_examples: { name: string; file: string }[]; untested_examples: { name: string; file: string }[]; }; /** Markdown rendering of the same data — ~1.5–2K tokens, designed for direct agent consumption */ markdown: string; } - `GenerateOptions` interface — input options with cwd (project root) and topN (items per section, default 5).
interface GenerateOptions { cwd?: string; topN?: number; } - src/tools/register/analysis.ts:222-235 (registration)MCP tool registration in `registerAnalysisTools` — registers 'generate_insights_report' with optional `top_n` schema, calls `generateInsightsReport` handler on invocation.
server.tool( 'generate_insights_report', 'Single-call narrative health snapshot: god files (PageRank), architectural bridges (edge bottlenecks), risk hotspots (complexity × churn), edge resolution-tier breakdown, and gap counts (dead exports, untested, cycles). Aggregates already-computed metrics into ~2K tokens of Markdown plus a structured payload. Use at the start of a session to orient yourself instead of chaining get_pagerank + get_risk_hotspots + get_edge_bottlenecks + self_audit. Read-only. Returns JSON: { generated_at, totals, resolution_tiers, god_files, bridges, hotspots, gaps, markdown }.', { top_n: z.number().int().min(1).max(20).optional().describe('Items per section (default: 5)'), }, async ({ top_n }) => { const result = generateInsightsReport(store, { cwd: projectRoot, topN: top_n }); if (result.isErr()) { return { content: [{ type: 'text', text: j(formatToolError(result.error)) }] }; } return { content: [{ type: 'text', text: j(result.value) }] }; }, ); - `countResolutionTiers` — helper that queries the edges table, groups by resolution_tier (lsp_resolved, ast_resolved, ast_inferred, text_matched), and computes text_matched_pct.
function countResolutionTiers(store: Store): InsightsReport['resolution_tiers'] { const rows = store.db .prepare( `SELECT resolution_tier, COUNT(*) AS cnt FROM edges GROUP BY resolution_tier`, ) .all() as Array<{ resolution_tier: string; cnt: number }>; const tiers = { lsp_resolved: 0, ast_resolved: 0, ast_inferred: 0, text_matched: 0 }; let total = 0; for (const row of rows) { if (row.resolution_tier in tiers) { tiers[row.resolution_tier as keyof typeof tiers] = row.cnt; } total += row.cnt; } const textPct = total > 0 ? Math.round((tiers.text_matched / total) * 1000) / 10 : 0; return { ...tiers, text_matched_pct: textPct }; } - `renderMarkdown` — helper that renders the structured report into ~1.5–2K tokens of Markdown with sections for totals, edge resolution, god files, architectural bridges, risk hotspots, and gaps.
function renderMarkdown(report: Omit<InsightsReport, 'markdown'>): string { const lines: string[] = []; lines.push(`# Project insights — ${report.generated_at}`); lines.push(''); lines.push( `**Totals:** ${report.totals.files} files · ${report.totals.symbols} symbols · ${report.totals.edges} edges`, ); lines.push(''); lines.push('## Edge resolution'); const t = report.resolution_tiers; lines.push( `- lsp_resolved: ${t.lsp_resolved} · ast_resolved: ${t.ast_resolved} · ast_inferred: ${t.ast_inferred} · text_matched: ${t.text_matched}`, ); if (t.text_matched_pct >= 5) { lines.push( `- ⚠ ${t.text_matched_pct}% of edges are text_matched (fuzzy). Treat raw counts in find_usages / get_change_impact with care; prefer high-tier results.`, ); } else { lines.push( `- ✓ text_matched share is low (${t.text_matched_pct}%) — graph is mostly resolved.`, ); } lines.push(''); lines.push('## God files (PageRank)'); if (report.god_files.length === 0) { lines.push('- (no files ranked)'); } else { for (const g of report.god_files) { lines.push( `- \`${g.file}\` — score ${g.score.toFixed(4)} (in:${g.in_degree} out:${g.out_degree})`, ); } } lines.push(''); lines.push('## Architectural bridges'); if (report.bridges.length === 0) { lines.push('- (no bottleneck edges found)'); } else { for (const b of report.bridges) { const badge = b.is_bridge ? ' **[bridge]**' : ''; lines.push( `- \`${b.source}\` → \`${b.target}\`${badge} — score ${b.bottleneck_score.toFixed(3)}, co-change ${b.co_change_weight.toFixed(2)}`, ); } } lines.push(''); lines.push('## Risk hotspots (complexity × churn)'); if (report.hotspots.length === 0) { lines.push('- (no hotspots — git unavailable or no high-complexity files)'); } else { for (const h of report.hotspots) { lines.push( `- \`${h.file}\` — score ${h.score.toFixed(2)} (cyclomatic ${h.max_cyclomatic}, ${h.commits} commits)`, ); } } lines.push(''); lines.push('## Gaps'); const g = report.gaps; lines.push( `- dead exports: ${g.dead_exports} · untested exports: ${g.untested_exports} · cycles: ${g.dependency_cycles} · unstable modules: ${g.unstable_modules}`, ); if (g.dead_exports_examples.length > 0) { lines.push( `- dead examples: ${g.dead_exports_examples.map((e) => `\`${e.name}\` (${e.file})`).join(', ')}`, ); } if (g.untested_examples.length > 0) { lines.push( `- untested examples: ${g.untested_examples.map((e) => `\`${e.name}\` (${e.file})`).join(', ')}`, ); } lines.push(''); return lines.join('\n'); }