#!/usr/bin/env python3
"""Quality analysis utilities for session management.
This module provides quality assessment and analysis functionality following
crackerjack architecture patterns with single responsibility principle.
"""
from __future__ import annotations
from contextlib import suppress
from typing import Any
def _extract_score_from_content(content: str) -> float | None:
"""Extract score from reflection content."""
with suppress(ValueError, TypeError, AttributeError):
# Parse common quality score formats
if "quality score:" in content:
# Extract score after "quality score:"
parts = content.split("quality score:")
if len(parts) > 1:
score_text = parts[1].split()[0] # Get first word after
return _parse_score_text(score_text)
return None
def _extract_score_from_metadata(reflection: dict[str, Any]) -> float | None:
"""Extract score from reflection metadata."""
with suppress(ValueError, TypeError, AttributeError):
metadata = reflection.get("metadata", {})
if "quality_score" in metadata:
score = float(metadata["quality_score"])
if 0 <= score <= 100:
return score
return None
def _parse_score_text(score_text: str) -> float | None:
"""Parse various score text formats into normalized 0-100 score."""
with suppress(ValueError, TypeError, IndexError):
# Handle formats like "85/100", "0.85", "85"
if "/" in score_text:
numerator = float(score_text.split("/", maxsplit=1)[0])
denominator = float(score_text.split("/")[1])
score = (numerator / denominator) * 100
elif "." in score_text and float(score_text) <= 1.0:
score = float(score_text) * 100
else:
score = float(score_text)
if 0 <= score <= 100:
return score
return None
def _extract_quality_scores(reflections: list[dict[str, Any]]) -> list[float]:
"""Extract quality scores from reflection data."""
scores = []
for reflection in reflections:
try:
# Look for quality score in reflection content
content = reflection.get("content", "").lower()
score = _extract_score_from_content(content)
if score is not None:
scores.append(score)
continue
# Check metadata for score
score = _extract_score_from_metadata(reflection)
if score is not None:
scores.append(score)
except (ValueError, TypeError, AttributeError):
# Skip malformed scores
continue
return scores
def _analyze_quality_trend(quality_scores: list[float]) -> tuple[str, list[str], bool]:
"""Analyze quality trend from historical scores."""
if len(quality_scores) < 2:
return "insufficient_data", ["Not enough data to analyze trend"], False
# Calculate trend
recent_scores = quality_scores[-5:] # Last 5 scores
older_scores = (
quality_scores[-10:-5] if len(quality_scores) >= 10 else quality_scores[:-5]
)
if not older_scores:
return "stable", ["Initial quality baseline established"], True
recent_avg = sum(recent_scores) / len(recent_scores)
older_avg = sum(older_scores) / len(older_scores)
difference = recent_avg - older_avg
insights = []
improving = False
if difference > 5:
trend = "improving"
improving = True
insights.extend(
[
f"๐ Quality improving: +{difference:.1f} points",
"๐ฏ Continue current development practices",
],
)
elif difference < -5:
trend = "declining"
insights.extend(
[
f"๐ Quality declining: {difference:.1f} points",
"โ ๏ธ Review recent changes and processes",
],
)
else:
trend = "stable"
improving = True
insights.extend(
[
f"๐ Quality stable: {difference:+.1f} points variation",
"โ
Maintaining consistent development standards",
],
)
# Add specific recommendations based on score level
current_score = recent_scores[-1] if recent_scores else 0
if current_score < 70:
insights.append("๐ง Focus on code quality improvements")
elif current_score > 90:
insights.append("โญ Excellent quality standards maintained")
return trend, insights, improving
# Remove the duplicate function
def _generate_quality_trend_recommendations(scores: list[float]) -> list[str]:
"""Generate specific recommendations based on quality trend analysis."""
if not scores:
return ["๐ Start tracking quality metrics for trend analysis"]
recommendations = []
current_score = scores[-1]
# Score-based recommendations
if current_score < 60:
recommendations.extend(
[
"๐จ Critical: Immediate quality improvement needed",
"โข Run comprehensive code review and testing",
"โข Focus on reducing technical debt",
"โข Consider pair programming for complex changes",
],
)
elif current_score < 75:
recommendations.extend(
[
"โ ๏ธ Quality below target: Focus on improvement",
"โข Increase test coverage and documentation",
"โข Review and refactor complex code sections",
],
)
elif current_score < 90:
recommendations.extend(
[
"โ
Good quality: Minor optimizations available",
"โข Fine-tune linting and formatting rules",
"โข Enhance error handling and logging",
],
)
else:
recommendations.extend(
[
"โญ Excellent quality: Maintain current standards",
"โข Share best practices with team",
"โข Document successful patterns for reuse",
],
)
# Trend-based recommendations
if len(scores) >= 3:
recent_trend = scores[-3:]
if all(
recent_trend[i] < recent_trend[i + 1] for i in range(len(recent_trend) - 1)
):
recommendations.append("๐ Positive trend: Continue current practices")
elif all(
recent_trend[i] > recent_trend[i + 1] for i in range(len(recent_trend) - 1)
):
recommendations.append("๐ Declining trend: Review recent changes")
return recommendations
def _get_time_based_recommendations(hour: int) -> list[str]:
"""Generate recommendations based on current time of day."""
recommendations = []
if 6 <= hour < 12: # Morning
recommendations.extend(
[
"๐
Morning session: Good time for complex problem-solving",
"โข Focus on architecture and design decisions",
"โข Plan day's development priorities",
],
)
elif 12 <= hour < 17: # Afternoon
recommendations.extend(
[
"โ๏ธ Afternoon session: Peak productivity time",
"โข Implement planned features and fixes",
"โข Conduct code reviews and testing",
],
)
elif 17 <= hour < 21: # Evening
recommendations.extend(
[
"๐ Evening session: Good for documentation and cleanup",
"โข Update documentation and comments",
"โข Refactor and optimize existing code",
],
)
else: # Late night/early morning
recommendations.extend(
[
"๐ Late session: Focus on simple, well-tested changes",
"โข Avoid complex architectural changes",
"โข Consider shorter development sessions",
],
)
return recommendations
def _ensure_default_recommendations(priority_actions: list[str]) -> list[str]:
"""Ensure there are always some recommendations available."""
if not priority_actions:
return [
"๐ฏ Focus on current development goals",
"๐ Keep documentation updated",
"๐งช Maintain test coverage",
"๐ Regular code quality checks",
]
return priority_actions
def _get_intelligence_error_result(error: Exception) -> dict[str, Any]:
"""Generate error result for intelligence system failures."""
return {
"success": False,
"error": f"Intelligence system error: {error}",
"recommendations": [
"โ ๏ธ Intelligence features temporarily unavailable",
"โข Basic session management tools still functional",
"โข Manual quality assessment recommended",
"โข Check system dependencies and configuration",
],
"fallback_mode": True,
}