analyze_peer_review_quality
Analyze peer review comment quality and content in Canvas assignments to assess feedback effectiveness and identify improvement areas.
Instructions
Analyze the quality and content of peer review comments.
Args:
course_identifier: Canvas course code or ID
assignment_id: Canvas assignment ID
analysis_criteria: JSON string of custom analysis criteria (optional)
generate_report: Whether to generate detailed analysis report
Input Schema
TableJSON Schema
| Name | Required | Description | Default |
|---|---|---|---|
| analysis_criteria | No | ||
| assignment_id | Yes | ||
| course_identifier | Yes | ||
| generate_report | No |
Implementation Reference
- src/canvas_mcp/server.py:52-54 (registration)Calls register_peer_review_comment_tools(mcp) during server tool registration setup, which defines and registers the MCP tool.register_peer_review_tools(mcp) register_peer_review_comment_tools(mcp) register_messaging_tools(mcp)
- MCP handler function decorated with @mcp.tool(). Handles input parameters, resolves course ID, instantiates analyzer, delegates to core implementation, and formats JSON response.@mcp.tool() @validate_params async def analyze_peer_review_quality( course_identifier: str | int, assignment_id: str | int, analysis_criteria: str | None = None, generate_report: bool = True ) -> str: """ Analyze the quality and content of peer review comments. Args: course_identifier: Canvas course code or ID assignment_id: Canvas assignment ID analysis_criteria: JSON string of custom analysis criteria (optional) generate_report: Whether to generate detailed analysis report """ try: course_id = await get_course_id(course_identifier) analyzer = PeerReviewCommentAnalyzer() # Parse analysis criteria if provided criteria = None if analysis_criteria: try: criteria = json.loads(analysis_criteria) except json.JSONDecodeError: return "Error: analysis_criteria must be valid JSON" result = await analyzer.analyze_peer_review_quality( course_id=course_id, assignment_id=int(assignment_id), analysis_criteria=criteria, generate_report=generate_report ) if "error" in result: return f"Error analyzing peer review quality: {result['error']}" return json.dumps(result, indent=2) except Exception as e: return f"Error in analyze_peer_review_quality: {str(e)}"
- Core implementation of the analysis logic. Fetches peer review comments, computes quality scores using keyword analysis, calculates statistics on word counts, constructiveness, sentiment, flags low-quality reviews, and generates recommendations.async def analyze_peer_review_quality( self, course_id: int, assignment_id: int, analysis_criteria: dict[str, Any] | None = None, generate_report: bool = True ) -> dict[str, Any]: """ Analyze the quality and content of peer review comments. Args: course_id: Canvas course ID assignment_id: Canvas assignment ID analysis_criteria: Custom analysis criteria (optional) generate_report: Whether to generate detailed analysis report Returns: Dict containing comprehensive quality analysis """ try: # First get all comments comments_data = await self.get_peer_review_comments( course_id, assignment_id, anonymize_students=True ) if "error" in comments_data: return comments_data reviews = comments_data.get("peer_reviews", []) if not reviews: return {"error": "No peer reviews found for analysis"} # Extract comment texts for analysis comment_texts = [] word_counts = [] quality_scores = [] flagged_reviews = [] for review in reviews: content = review.get("review_content", {}) comment_text = content.get("comment_text", "") word_count = content.get("word_count", 0) comment_texts.append(comment_text) word_counts.append(word_count) # Calculate quality score for this comment quality_score = self._calculate_quality_score(comment_text) quality_scores.append(quality_score) # Flag problematic reviews if quality_score < 2.0 or word_count < 5: flagged_reviews.append({ "review_id": review.get("review_id"), "flag_reason": "low_quality" if quality_score < 2.0 else "extremely_short", "comment": comment_text[:100] + "..." if len(comment_text) > 100 else comment_text, "word_count": word_count, "quality_score": round(quality_score, 1) }) # Calculate statistics total_reviews = len(reviews) word_count_stats = self._calculate_word_count_stats(word_counts) constructiveness_analysis = self._analyze_constructiveness(comment_texts) sentiment_analysis = self._analyze_sentiment(comment_texts) # Quality distribution high_quality = sum(1 for score in quality_scores if score >= 4.0) medium_quality = sum(1 for score in quality_scores if 2.0 <= score < 4.0) low_quality = sum(1 for score in quality_scores if score < 2.0) avg_quality_score = statistics.mean(quality_scores) if quality_scores else 0 # Generate recommendations recommendations = self._generate_recommendations( flagged_reviews, word_count_stats, constructiveness_analysis ) result = { "overall_analysis": { "total_reviews_analyzed": total_reviews, "quality_distribution": { "high_quality": high_quality, "medium_quality": medium_quality, "low_quality": low_quality }, "average_quality_score": round(avg_quality_score, 1) }, "detailed_metrics": { "word_count_stats": word_count_stats, "constructiveness_analysis": constructiveness_analysis, "sentiment_analysis": sentiment_analysis }, "flagged_reviews": flagged_reviews[:20], # Limit to top 20 "recommendations": recommendations } return result