We provide all the information about MCP servers via our MCP API.
curl -X GET 'https://glama.ai/api/mcp/v1/servers/stevenjjobson/scs-mcp'
If you have feedback or need assistance with the MCP directory API, please join our Discord server
"""
Base classes and data structures for code metrics
"""
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from typing import List, Dict, Any, Optional
from pathlib import Path
@dataclass
class FunctionMetrics:
"""Metrics for a single function/method"""
name: str
line_number: int
cyclomatic_complexity: int = 0
cognitive_complexity: int = 0
lines_of_code: int = 0
parameters: int = 0
nesting_depth: int = 0
return_points: int = 0
fan_in: int = 0 # Number of calls to this function
fan_out: int = 0 # Number of functions this calls
@dataclass
class ClassMetrics:
"""Metrics for a single class"""
name: str
line_number: int
methods_count: int = 0
fields_count: int = 0
inheritance_depth: int = 0
coupling_afferent: int = 0 # Classes that depend on this
coupling_efferent: int = 0 # Classes this depends on
cohesion: float = 0.0 # LCOM (Lack of Cohesion of Methods)
weighted_methods: int = 0 # Sum of complexities of all methods
@dataclass
class FileMetrics:
"""Aggregated metrics for an entire file"""
file_path: str
lines_of_code: int = 0
source_lines_of_code: int = 0
comment_lines: int = 0
blank_lines: int = 0
cyclomatic_complexity: int = 0
cognitive_complexity: int = 0
maintainability_index: float = 0.0
halstead_volume: float = 0.0
halstead_difficulty: float = 0.0
halstead_effort: float = 0.0
coupling_afferent: int = 0
coupling_efferent: int = 0
instability: float = 0.0
max_nesting_depth: int = 0
functions: List[FunctionMetrics] = field(default_factory=list)
classes: List[ClassMetrics] = field(default_factory=list)
issues: List[str] = field(default_factory=list)
@property
def comment_ratio(self) -> float:
"""Calculate comment to code ratio"""
total = self.lines_of_code
if total == 0:
return 0.0
return self.comment_lines / total
@property
def average_complexity(self) -> float:
"""Average cyclomatic complexity per function"""
if not self.functions:
return 0.0
return sum(f.cyclomatic_complexity for f in self.functions) / len(self.functions)
@property
def quality_grade(self) -> str:
"""Calculate overall quality grade A-F"""
score = 0
count = 0
# Maintainability (0-100, higher is better)
if self.maintainability_index > 0:
score += self.maintainability_index
count += 1
# Complexity (lower is better, normalize to 0-100)
if self.average_complexity > 0:
complexity_score = max(0, 100 - (self.average_complexity * 5))
score += complexity_score
count += 1
# Comment ratio (0.2 is good)
comment_score = min(100, self.comment_ratio * 500)
score += comment_score
count += 1
if count == 0:
return "N/A"
avg_score = score / count
if avg_score >= 90:
return "A"
elif avg_score >= 80:
return "B"
elif avg_score >= 70:
return "C"
elif avg_score >= 60:
return "D"
else:
return "F"
@dataclass
class CodeDuplication:
"""Information about duplicated code blocks"""
file_path_1: str
line_start_1: int
line_end_1: int
file_path_2: str
line_start_2: int
line_end_2: int
similarity_score: float
duplicate_lines: int
class BaseMetricsAnalyzer(ABC):
"""Abstract base class for metrics analyzers"""
def __init__(self, language: str = "python"):
"""
Initialize analyzer for specific language
Args:
language: Programming language to analyze
"""
self.language = language
self.supported_languages = ["python", "javascript", "typescript"]
@abstractmethod
def analyze_file(self, file_path: Path, content: str) -> FileMetrics:
"""
Analyze a single file and return metrics
Args:
file_path: Path to the file
content: File content as string
Returns:
FileMetrics object with calculated metrics
"""
pass
@abstractmethod
def calculate_cyclomatic_complexity(self, code: str) -> int:
"""
Calculate cyclomatic complexity for code
Args:
code: Source code to analyze
Returns:
Cyclomatic complexity score
"""
pass
@abstractmethod
def calculate_cognitive_complexity(self, code: str) -> int:
"""
Calculate cognitive complexity for code
Args:
code: Source code to analyze
Returns:
Cognitive complexity score
"""
pass
def count_lines(self, content: str) -> Dict[str, int]:
"""
Count different types of lines in code
Args:
content: File content
Returns:
Dictionary with line counts
"""
lines = content.split('\n')
total = len(lines)
blank = sum(1 for line in lines if not line.strip())
# Language-specific comment detection
comment = 0
in_multiline = False
for line in lines:
stripped = line.strip()
if self.language == "python":
if stripped.startswith('#'):
comment += 1
elif stripped.startswith('"""') or stripped.startswith("'''"):
if in_multiline:
in_multiline = False
else:
in_multiline = True
comment += 1
elif in_multiline:
comment += 1
elif self.language in ["javascript", "typescript"]:
if stripped.startswith('//'):
comment += 1
elif stripped.startswith('/*'):
in_multiline = True
comment += 1
elif '*/' in stripped:
in_multiline = False
comment += 1
elif in_multiline:
comment += 1
return {
'total': total,
'blank': blank,
'comment': comment,
'code': total - blank - comment
}
def detect_issues(self, metrics: FileMetrics) -> List[str]:
"""
Detect potential issues based on metrics
Args:
metrics: Calculated file metrics
Returns:
List of detected issues
"""
issues = []
# Check overall complexity
if metrics.cyclomatic_complexity > 50:
issues.append(f"Very high file complexity: {metrics.cyclomatic_complexity}")
elif metrics.cyclomatic_complexity > 20:
issues.append(f"High file complexity: {metrics.cyclomatic_complexity}")
# Check function-level issues
for func in metrics.functions:
if func.cyclomatic_complexity > 10:
issues.append(
f"Complex function '{func.name}': "
f"cyclomatic={func.cyclomatic_complexity}"
)
if func.nesting_depth > 4:
issues.append(
f"Deep nesting in '{func.name}': depth={func.nesting_depth}"
)
if func.parameters > 5:
issues.append(
f"Too many parameters in '{func.name}': {func.parameters}"
)
if func.lines_of_code > 50:
issues.append(
f"Long function '{func.name}': {func.lines_of_code} lines"
)
# Check maintainability
if 0 < metrics.maintainability_index < 65:
issues.append(f"Low maintainability index: {metrics.maintainability_index:.1f}")
# Check coupling
if metrics.coupling_efferent > 10:
issues.append(f"High efferent coupling: {metrics.coupling_efferent}")
if metrics.instability > 0.8:
issues.append(f"High instability: {metrics.instability:.2f}")
# Check comment ratio
if metrics.comment_ratio < 0.1:
issues.append(f"Low comment ratio: {metrics.comment_ratio:.2%}")
return issues