Skip to main content
Glama
bee4come

Plan-MCP

by bee4come
code_reviewer.py3.83 kB
"""Code review tool using Gemini AI.""" from ..api.gemini_client import GeminiClient from ..models import CodeReview from ..prompts.system_prompts import CODE_REVIEWER_PROMPT from ..utils.logger import logger class CodeReviewer: """Tool for reviewing code using Gemini.""" def __init__(self, gemini_client: GeminiClient | None = None): """Initialize the code reviewer. Args: gemini_client: Optional Gemini client instance """ self.client = gemini_client or GeminiClient() async def review_code( self, code: str, language: str, context: str | None = None, focus_areas: list[str] | None = None, previous_feedback: str | None = None, ) -> CodeReview: """Review code and provide feedback. Args: code: The code to review language: Programming language context: Optional context about the code focus_areas: Specific areas to focus on previous_feedback: Previous review feedback if this is a revision Returns: Code review results """ logger.info(f"Reviewing {language} code ({len(code)} characters)") # Build the prompt prompt_parts = [ f"Please review the following {language} code:", f"```{language}", code, "```", ] if context: prompt_parts.insert(1, f"Context: {context}") if focus_areas: prompt_parts.append("\nFocus Areas:\n" + "\n".join(f"- {area}" for area in focus_areas)) if previous_feedback: prompt_parts.append(f"\nThis is a revision. Previous feedback:\n{previous_feedback}") prompt = "\n".join(prompt_parts) try: # Generate the review review = await self.client.generate_json( prompt=prompt, response_model=CodeReview, system_prompt=CODE_REVIEWER_PROMPT, temperature=0.7, ) logger.info( f"Code review completed: {review.overall_quality}, " f"{len(review.issues)} issues, {len(review.suggestions)} suggestions" ) return review except Exception as e: logger.error(f"Failed to review code: {str(e)}") raise async def compare_implementations( self, code1: str, code2: str, language: str, comparison_criteria: list[str] | None = None, ) -> str: """Compare two code implementations. Args: code1: First implementation code2: Second implementation language: Programming language comparison_criteria: Specific criteria to compare Returns: Comparison analysis """ logger.info(f"Comparing two {language} implementations") prompt = f"""Compare these two {language} implementations: Implementation 1: ```{language} {code1} ``` Implementation 2: ```{language} {code2} ``` {f"Comparison Criteria: {', '.join(comparison_criteria)}" if comparison_criteria else ""} Please provide a detailed comparison covering: 1. Functionality differences 2. Performance implications 3. Code quality and readability 4. Best practices adherence 5. Recommendation on which to use and why""" try: comparison = await self.client.generate_content( prompt=prompt, system_prompt=CODE_REVIEWER_PROMPT, temperature=0.7, ) logger.info("Code comparison completed") return comparison except Exception as e: logger.error(f"Failed to compare implementations: {str(e)}") raise

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/bee4come/plan-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server