Skip to main content
Glama

Smart Code Search MCP Server

code_quality.py16.3 kB
""" Aggregate Code Quality Analyzer Combines all metrics for comprehensive code quality assessment """ from pathlib import Path from typing import Dict, List, Any, Optional, Tuple from dataclasses import dataclass, field import json from .base_metrics import FileMetrics, FunctionMetrics, ClassMetrics from .complexity_analyzer import ComplexityAnalyzer from .maintainability import MaintainabilityAnalyzer from .coupling_analyzer import CouplingAnalyzer @dataclass class QualityReport: """Comprehensive code quality report""" file_path: str quality_grade: str # A-F grade metrics: FileMetrics recommendations: List[str] = field(default_factory=list) code_smells: List[str] = field(default_factory=list) refactoring_opportunities: List[str] = field(default_factory=list) def to_dict(self) -> Dict[str, Any]: """Convert to dictionary for JSON serialization""" return { 'file_path': self.file_path, 'quality_grade': self.quality_grade, 'metrics': { 'lines_of_code': self.metrics.lines_of_code, 'cyclomatic_complexity': self.metrics.cyclomatic_complexity, 'cognitive_complexity': self.metrics.cognitive_complexity, 'maintainability_index': self.metrics.maintainability_index, 'coupling_afferent': self.metrics.coupling_afferent, 'coupling_efferent': self.metrics.coupling_efferent, 'instability': self.metrics.instability, 'comment_ratio': self.metrics.comment_ratio, 'quality_grade': self.metrics.quality_grade }, 'issues': self.metrics.issues, 'recommendations': self.recommendations, 'code_smells': self.code_smells, 'refactoring_opportunities': self.refactoring_opportunities } class CodeQualityAnalyzer: """Comprehensive code quality analyzer combining all metrics""" # Thresholds for code smells SMELL_THRESHOLDS = { 'long_method': 50, # lines 'large_class': 300, # lines 'too_many_parameters': 5, 'deep_nesting': 4, 'high_complexity': 10, 'low_cohesion': 0.5, 'high_coupling': 10, 'low_maintainability': 65, 'low_comment_ratio': 0.1, 'god_class_methods': 20, 'duplicate_threshold': 10 # lines } def __init__(self, language: str = "python", project_root: Path = None): """ Initialize code quality analyzer Args: language: Programming language project_root: Root directory for project """ self.language = language self.project_root = project_root or Path.cwd() # Initialize sub-analyzers self.complexity_analyzer = ComplexityAnalyzer(language) self.maintainability_analyzer = MaintainabilityAnalyzer(language) self.coupling_analyzer = CouplingAnalyzer(language, project_root) def analyze_file(self, file_path: Path, content: str) -> QualityReport: """ Perform comprehensive quality analysis on a file Args: file_path: Path to the file content: File content Returns: QualityReport with all metrics and recommendations """ # Get complexity metrics complexity_metrics = self.complexity_analyzer.analyze_file(file_path, content) # Get maintainability metrics maintain_metrics = self.maintainability_analyzer.analyze_file(file_path, content) # Get coupling metrics coupling_metrics = self.coupling_analyzer.analyze_file(file_path, content) # Merge metrics metrics = self._merge_metrics(complexity_metrics, maintain_metrics, coupling_metrics) # Detect code smells code_smells = self._detect_code_smells(metrics) # Generate recommendations recommendations = self._generate_recommendations(metrics, code_smells) # Identify refactoring opportunities refactoring_ops = self._identify_refactoring_opportunities(metrics) # Create quality report report = QualityReport( file_path=str(file_path), quality_grade=metrics.quality_grade, metrics=metrics, recommendations=recommendations, code_smells=code_smells, refactoring_opportunities=refactoring_ops ) return report def _merge_metrics(self, complexity: FileMetrics, maintainability: FileMetrics, coupling: FileMetrics) -> FileMetrics: """Merge metrics from different analyzers""" # Start with complexity metrics as base merged = complexity # Add maintainability metrics merged.maintainability_index = maintainability.maintainability_index merged.halstead_volume = maintainability.halstead_volume merged.halstead_difficulty = maintainability.halstead_difficulty merged.halstead_effort = maintainability.halstead_effort # Add coupling metrics merged.coupling_afferent = coupling.coupling_afferent merged.coupling_efferent = coupling.coupling_efferent merged.instability = coupling.instability # Merge class metrics if coupling.classes: # Update class coupling info for coupling_class in coupling.classes: for merged_class in merged.classes: if merged_class.name == coupling_class.name: merged_class.coupling_afferent = coupling_class.coupling_afferent merged_class.coupling_efferent = coupling_class.coupling_efferent merged_class.cohesion = coupling_class.cohesion break return merged def _detect_code_smells(self, metrics: FileMetrics) -> List[str]: """Detect code smells based on metrics""" smells = [] # File-level smells if metrics.lines_of_code > self.SMELL_THRESHOLDS['large_class']: smells.append(f"Large file: {metrics.lines_of_code} lines") if metrics.cyclomatic_complexity > self.SMELL_THRESHOLDS['high_complexity'] * 3: smells.append(f"Very complex file: complexity={metrics.cyclomatic_complexity}") if metrics.maintainability_index < self.SMELL_THRESHOLDS['low_maintainability']: smells.append(f"Low maintainability: {metrics.maintainability_index:.1f}") if metrics.comment_ratio < self.SMELL_THRESHOLDS['low_comment_ratio']: smells.append(f"Insufficient comments: {metrics.comment_ratio:.1%}") if metrics.coupling_efferent > self.SMELL_THRESHOLDS['high_coupling']: smells.append(f"High coupling: {metrics.coupling_efferent} dependencies") # Function-level smells for func in metrics.functions: if func.lines_of_code > self.SMELL_THRESHOLDS['long_method']: smells.append(f"Long method '{func.name}': {func.lines_of_code} lines") if func.parameters > self.SMELL_THRESHOLDS['too_many_parameters']: smells.append(f"Too many parameters in '{func.name}': {func.parameters}") if func.cyclomatic_complexity > self.SMELL_THRESHOLDS['high_complexity']: smells.append(f"Complex method '{func.name}': complexity={func.cyclomatic_complexity}") if func.nesting_depth > self.SMELL_THRESHOLDS['deep_nesting']: smells.append(f"Deep nesting in '{func.name}': depth={func.nesting_depth}") # Class-level smells for cls in metrics.classes: if cls.methods_count > self.SMELL_THRESHOLDS['god_class_methods']: smells.append(f"God class '{cls.name}': {cls.methods_count} methods") if cls.cohesion > self.SMELL_THRESHOLDS['low_cohesion']: smells.append(f"Low cohesion in '{cls.name}': LCOM={cls.cohesion:.2f}") if cls.coupling_efferent > self.SMELL_THRESHOLDS['high_coupling']: smells.append(f"High coupling in '{cls.name}': {cls.coupling_efferent} dependencies") return smells def _generate_recommendations(self, metrics: FileMetrics, code_smells: List[str]) -> List[str]: """Generate actionable recommendations based on metrics""" recommendations = [] # Complexity recommendations if metrics.cyclomatic_complexity > 20: recommendations.append("Consider breaking down complex functions into smaller, focused ones") if metrics.cognitive_complexity > metrics.cyclomatic_complexity * 1.5: recommendations.append("Simplify nested conditions and control flow for better readability") # Maintainability recommendations if metrics.maintainability_index < 65: if metrics.lines_of_code > 200: recommendations.append("Consider splitting this file into smaller modules") if metrics.comment_ratio < 0.15: recommendations.append("Add more documentation and inline comments") if metrics.cyclomatic_complexity > 15: recommendations.append("Reduce complexity by extracting methods or using design patterns") # Coupling recommendations if metrics.coupling_efferent > 10: recommendations.append("Reduce dependencies by using dependency injection or interfaces") if metrics.instability > 0.8: recommendations.append("This module is highly unstable; consider stabilizing the API") if metrics.coupling_afferent > 10: recommendations.append("Many modules depend on this file; be careful with changes") # Function-specific recommendations long_functions = [f for f in metrics.functions if f.lines_of_code > 30] if long_functions: recommendations.append(f"Extract methods from {len(long_functions)} long functions") complex_functions = [f for f in metrics.functions if f.cyclomatic_complexity > 10] if complex_functions: names = ", ".join(f.name for f in complex_functions[:3]) recommendations.append(f"Simplify complex functions: {names}") # Class-specific recommendations for cls in metrics.classes: if cls.methods_count > 15: recommendations.append(f"Consider splitting '{cls.name}' using Single Responsibility Principle") if cls.cohesion > 0.5: recommendations.append(f"Improve cohesion in '{cls.name}' by grouping related methods") if cls.inheritance_depth > 3: recommendations.append(f"Deep inheritance in '{cls.name}'; consider composition over inheritance") return recommendations def _identify_refactoring_opportunities(self, metrics: FileMetrics) -> List[str]: """Identify specific refactoring opportunities""" opportunities = [] # Extract Method opportunities for func in metrics.functions: if func.lines_of_code > 30: opportunities.append(f"Extract Method: Split '{func.name}' into smaller functions") if func.nesting_depth > 3: opportunities.append(f"Replace Nested Conditionals: Simplify '{func.name}'") if func.parameters > 4: opportunities.append(f"Introduce Parameter Object: Reduce parameters in '{func.name}'") # Extract Class opportunities for cls in metrics.classes: if cls.methods_count > 15: opportunities.append(f"Extract Class: Split responsibilities from '{cls.name}'") if cls.cohesion > 0.6: opportunities.append(f"Move Method: Reorganize methods in '{cls.name}'") # File-level refactoring if metrics.lines_of_code > 500: opportunities.append("Extract Module: Split this file into multiple modules") if metrics.coupling_efferent > 15: opportunities.append("Introduce Facade: Simplify external dependencies") # Pattern-based refactoring if metrics.cyclomatic_complexity > 30: opportunities.append("Replace Conditional with Polymorphism: Use design patterns") return opportunities def analyze_project(self, files: List[Tuple[Path, str]]) -> Dict[str, Any]: """ Analyze quality for entire project Args: files: List of (file_path, content) tuples Returns: Project-wide quality metrics """ reports = [] total_issues = 0 total_smells = 0 grade_counts = {'A': 0, 'B': 0, 'C': 0, 'D': 0, 'F': 0} for file_path, content in files: report = self.analyze_file(file_path, content) reports.append(report) total_issues += len(report.metrics.issues) total_smells += len(report.code_smells) grade = report.quality_grade if grade in grade_counts: grade_counts[grade] += 1 # Calculate project metrics avg_maintainability = sum(r.metrics.maintainability_index for r in reports) / len(reports) if reports else 0 avg_complexity = sum(r.metrics.cyclomatic_complexity for r in reports) / len(reports) if reports else 0 # Identify worst files worst_files = sorted(reports, key=lambda r: r.metrics.maintainability_index)[:5] # Identify most complex files complex_files = sorted(reports, key=lambda r: r.metrics.cyclomatic_complexity, reverse=True)[:5] return { 'total_files': len(reports), 'total_issues': total_issues, 'total_code_smells': total_smells, 'average_maintainability': avg_maintainability, 'average_complexity': avg_complexity, 'grade_distribution': grade_counts, 'worst_files': [r.file_path for r in worst_files], 'most_complex_files': [r.file_path for r in complex_files], 'reports': reports } def generate_report_json(self, report: QualityReport) -> str: """Generate JSON report""" return json.dumps(report.to_dict(), indent=2) def generate_report_summary(self, report: QualityReport) -> str: """Generate human-readable summary""" lines = [ f"Code Quality Report: {report.file_path}", "=" * 50, f"Overall Grade: {report.quality_grade}", "", "Metrics:", f" Lines of Code: {report.metrics.lines_of_code}", f" Cyclomatic Complexity: {report.metrics.cyclomatic_complexity}", f" Maintainability Index: {report.metrics.maintainability_index:.1f}", f" Coupling (In/Out): {report.metrics.coupling_afferent}/{report.metrics.coupling_efferent}", f" Comment Ratio: {report.metrics.comment_ratio:.1%}", "" ] if report.code_smells: lines.extend([ "Code Smells Detected:", *[f" • {smell}" for smell in report.code_smells], "" ]) if report.recommendations: lines.extend([ "Recommendations:", *[f" • {rec}" for rec in report.recommendations], "" ]) if report.refactoring_opportunities: lines.extend([ "Refactoring Opportunities:", *[f" • {opp}" for opp in report.refactoring_opportunities] ]) return "\n".join(lines)

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/stevenjjobson/scs-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server