"""
Reviewer agent for code quality and security analysis
"""
import asyncio
from typing import Dict, Any, List, Optional
from datetime import datetime
import structlog
from src.agents.base_agent import BaseAgent
logger = structlog.get_logger()
class ReviewerAgent(BaseAgent):
"""Specialized agent for code review and quality analysis"""
def __init__(self, correlation_id: Optional[str] = None):
super().__init__(agent_type="reviewer", correlation_id=correlation_id)
def _get_model_preferences(self) -> Dict[str, str]:
"""Reviewer-optimized model preferences"""
return {
"code_review": "gpt-4", # Best for detailed code analysis
"security_analysis": "claude-3-sonnet", # Strong security reasoning
"performance_analysis": "gpt-4", # Excellent performance insights
"quality_metrics": "claude-3-sonnet" # Good for quality assessment
}
def _initialize_prompt_templates(self) -> Dict[str, str]:
"""Initialize reviewer-specific prompt templates"""
return {
"code_review": """
You are an expert Code Reviewer with deep knowledge of software engineering best practices.
Focus on:
- Code quality and maintainability
- Design patterns and architecture
- Performance optimization opportunities
- Error handling and edge cases
- Test coverage and testability
- Documentation and readability
- Adherence to coding standards
""",
"security_review": """
You are an expert Security Auditor specializing in application security.
Focus on:
- Common security vulnerabilities (OWASP Top 10)
- Authentication and authorization flaws
- Input validation and sanitization
- Data protection and privacy
- Secure coding practices
- Dependency vulnerabilities
- Security misconfigurations
""",
"performance_review": """
You are an expert Performance Analyst specializing in application optimization.
Focus on:
- Algorithm efficiency and complexity
- Database query optimization
- Memory usage and leaks
- Caching strategies
- Network optimization
- Resource utilization
- Scalability considerations
""",
"default": """
You are an expert Code Reviewer with comprehensive knowledge of software quality.
Provide thorough, constructive feedback to improve code quality and security.
"""
}
def _get_agent_capabilities(self) -> List[str]:
"""Reviewer agent capabilities"""
return [
"Comprehensive code quality analysis",
"Security vulnerability assessment",
"Performance bottleneck identification",
"Best practices compliance checking",
"Test coverage analysis",
"Documentation quality review",
"Architecture and design review",
"Dependency security scanning",
"Code metrics calculation",
"Automated fix recommendations"
]
async def analyze_project_health(
self,
project_id: str,
focus_areas: List[str] = None
) -> Dict[str, Any]:
"""
Analyze project health across multiple dimensions
"""
if focus_areas is None:
focus_areas = ["functionality", "security", "performance", "code_quality"]
self.logger.info(
"starting_project_health_analysis",
project_id=project_id,
focus_areas=focus_areas
)
# Simulate analysis
await asyncio.sleep(1.5)
# Simulate finding some issues
issues = [
{
"type": "security",
"severity": "medium",
"description": "Potential SQL injection vulnerability",
"file": "src/database/queries.py",
"line": 45,
"recommendation": "Use parameterized queries"
},
{
"type": "performance",
"severity": "low",
"description": "Inefficient loop detected",
"file": "src/utils/processor.py",
"line": 23,
"recommendation": "Consider list comprehension"
}
]
health_score = 0.85 # Simulate health score
result = {
"project_id": project_id,
"health_score": health_score,
"issues_found": len(issues) > 0,
"issues": issues,
"focus_areas_analyzed": focus_areas,
"analysis_timestamp": datetime.utcnow().isoformat(),
"recommendations": [
"Address security vulnerabilities first",
"Optimize performance bottlenecks",
"Improve code documentation"
],
"project_context": {
"total_files": 25,
"lines_of_code": 2500,
"test_coverage": 78.5
}
}
self.logger.info(
"project_health_analysis_completed",
project_id=project_id,
health_score=health_score,
issues_count=len(issues)
)
return result
async def generate_fix_recommendations(
self,
issues: List[Dict[str, Any]],
project_context: Dict[str, Any]
) -> List[Dict[str, Any]]:
"""
Generate automated fix recommendations for identified issues
"""
self.logger.info(
"generating_fix_recommendations",
issues_count=len(issues)
)
# Simulate generating fixes
await asyncio.sleep(1)
fix_recommendations = []
for issue in issues:
fix = {
"issue_id": f"fix_{hash(issue['description']) % 10000}",
"issue_type": issue["type"],
"severity": issue["severity"],
"automated_fix": True,
"fix_description": f"Automated fix for {issue['description']}",
"fix_commands": [
f"# Fix for {issue['file']} line {issue['line']}",
"# Apply recommended changes",
issue["recommendation"]
],
"estimated_time": "5 minutes",
"risk_level": "low" if issue["severity"] == "low" else "medium"
}
fix_recommendations.append(fix)
self.logger.info(
"fix_recommendations_generated",
recommendations_count=len(fix_recommendations)
)
return fix_recommendations
async def verify_fixes(
self,
project_id: str,
applied_fixes: List[Dict[str, Any]]
) -> Dict[str, Any]:
"""
Verify that applied fixes actually resolved the issues
"""
self.logger.info(
"verifying_fixes",
project_id=project_id,
fixes_count=len(applied_fixes)
)
# Simulate verification
await asyncio.sleep(1)
successful_fixes = []
failed_fixes = []
for fix in applied_fixes:
# Simulate some fixes being successful
if fix.get("risk_level") == "low":
successful_fixes.append(fix)
else:
failed_fixes.append(fix)
all_fixes_successful = len(failed_fixes) == 0
result = {
"project_id": project_id,
"verification_timestamp": datetime.utcnow().isoformat(),
"all_fixes_successful": all_fixes_successful,
"successful_fixes": successful_fixes,
"failed_fixes": failed_fixes,
"success_rate": len(successful_fixes) / len(applied_fixes) if applied_fixes else 0,
"post_fix_health_score": 0.92 if all_fixes_successful else 0.87
}
self.logger.info(
"fix_verification_completed",
project_id=project_id,
success_rate=result["success_rate"],
all_successful=all_fixes_successful
)
return result