get_rubric_details
Retrieve detailed rubric criteria and scoring information from Canvas courses to understand assessment requirements and grading standards.
Instructions
Get detailed rubric criteria and scoring information.
Args:
course_identifier: The Canvas course code (e.g., badm_554_120251_246794) or ID
rubric_id: The Canvas rubric ID
Input Schema
TableJSON Schema
| Name | Required | Description | Default |
|---|---|---|---|
| course_identifier | Yes | ||
| rubric_id | Yes |
Implementation Reference
- src/canvas_mcp/tools/rubrics.py:473-547 (handler)The core handler function for the 'get_rubric_details' MCP tool. Fetches rubric details via Canvas API endpoint /courses/{course_id}/rubrics/{rubric_id}, parses the response, and formats a detailed textual summary of the rubric including criteria, ratings, points, and metadata.@validate_params async def get_rubric_details(course_identifier: str | int, rubric_id: str | int) -> str: """Get detailed rubric criteria and scoring information. Args: course_identifier: The Canvas course code (e.g., badm_554_120251_246794) or ID rubric_id: The Canvas rubric ID """ course_id = await get_course_id(course_identifier) rubric_id_str = str(rubric_id) # Get detailed rubric information response = await make_canvas_request( "get", f"/courses/{course_id}/rubrics/{rubric_id_str}", params={"include[]": ["assessments", "associations"]} ) if "error" in response: return f"Error fetching rubric details: {response['error']}" # Extract rubric details title = response.get("title", "Untitled Rubric") context_code = response.get("context_code", "") context_type = response.get("context_type", "") points_possible = response.get("points_possible", 0) reusable = response.get("reusable", False) read_only = response.get("read_only", False) data = response.get("data", []) course_display = await get_course_code(course_id) or course_identifier result = f"Detailed Rubric Information for Course {course_display}:\n\n" result += f"Title: {title}\n" result += f"Rubric ID: {rubric_id}\n" result += f"Context: {context_type} ({context_code})\n" result += f"Total Points: {points_possible}\n" result += f"Reusable: {'Yes' if reusable else 'No'}\n" result += f"Read Only: {'Yes' if read_only else 'No'}\n\n" # Detailed criteria and ratings if data: result += "Detailed Criteria and Ratings:\n" result += "=" * 50 + "\n" for i, criterion in enumerate(data, 1): criterion_id = criterion.get("id", "N/A") description = criterion.get("description", "No description") long_description = criterion.get("long_description", "") points = criterion.get("points", 0) ratings = criterion.get("ratings", []) result += f"\nCriterion #{i}: {description}\n" result += f"ID: {criterion_id}\n" result += f"Points: {points}\n" if long_description: result += f"Description: {truncate_text(long_description, 200)}\n" if ratings: result += f"Rating Levels ({len(ratings)}):\n" for j, rating in enumerate(ratings): rating_description = rating.get("description", "No description") rating_points = rating.get("points", 0) rating_id = rating.get("id", "N/A") result += f" {j+1}. {rating_description} ({rating_points} pts) [ID: {rating_id}]\n" if rating.get("long_description"): result += f" {truncate_text(rating.get('long_description'), 100)}\n" result += "\n" return result
- src/canvas_mcp/server.py:51-51 (registration)Top-level registration call in register_all_tools that invokes register_rubric_tools(mcp), which defines and registers the get_rubric_details tool using @mcp.tool() decorator.register_rubric_tools(mcp)
- Helper function used by rubric tools to validate and parse rubric criteria JSON structure.def validate_rubric_criteria(criteria_json: str) -> dict[str, Any]: """Validate and parse rubric criteria JSON structure. Args: criteria_json: JSON string containing rubric criteria Returns: Parsed criteria dictionary Raises: ValueError: If JSON is invalid or structure is incorrect """ # Preprocess the string to handle common issues cleaned_json = preprocess_criteria_string(criteria_json) try: criteria = json.loads(cleaned_json) except json.JSONDecodeError as e: # Try alternative parsing methods if JSON fails try: # Maybe it's a Python literal string representation import ast criteria = ast.literal_eval(cleaned_json) if isinstance(criteria, dict): # Successfully parsed as Python literal, continue with validation pass else: raise ValueError("Parsed result is not a dictionary") except (ValueError, SyntaxError): # Both JSON and literal_eval failed, provide detailed error error_msg = f"Invalid JSON format: {str(e)}\n" error_msg += f"Original string length: {len(criteria_json)}\n" error_msg += f"Cleaned string length: {len(cleaned_json)}\n" error_msg += f"First 200 characters of original: {repr(criteria_json[:200])}\n" error_msg += f"First 200 characters of cleaned: {repr(cleaned_json[:200])}\n" if len(cleaned_json) > 200: error_msg += f"Last 100 characters of cleaned: {repr(cleaned_json[-100:])}" error_msg += "\nAlso failed to parse as Python literal. Please ensure the criteria is valid JSON." raise ValueError(error_msg) from e if not isinstance(criteria, dict): raise ValueError("Criteria must be a JSON object (dictionary)") # Validate each criterion for criterion_key, criterion_data in criteria.items(): if not isinstance(criterion_data, dict): raise ValueError(f"Criterion {criterion_key} must be an object") if "description" not in criterion_data: raise ValueError(f"Criterion {criterion_key} must have a 'description' field") if "points" not in criterion_data: raise ValueError(f"Criterion {criterion_key} must have a 'points' field") try: points = float(criterion_data["points"]) if points < 0: raise ValueError(f"Criterion {criterion_key} points must be non-negative") except (ValueError, TypeError) as err: raise ValueError(f"Criterion {criterion_key} points must be a valid number") from err # Validate ratings if present - handle both object and array formats if "ratings" in criterion_data: ratings = criterion_data["ratings"] # Handle both object and array formats if isinstance(ratings, dict): # Object format: {"1": {...}, "2": {...}} for rating_key, rating_data in ratings.items(): if not isinstance(rating_data, dict): raise ValueError(f"Rating {rating_key} in criterion {criterion_key} must be an object") if "description" not in rating_data: raise ValueError(f"Rating {rating_key} in criterion {criterion_key} must have a 'description' field") if "points" not in rating_data: raise ValueError(f"Rating {rating_key} in criterion {criterion_key} must have a 'points' field") try: rating_points = float(rating_data["points"]) if rating_points < 0: raise ValueError(f"Rating {rating_key} points must be non-negative") except (ValueError, TypeError) as err: raise ValueError(f"Rating {rating_key} points must be a valid number") from err elif isinstance(ratings, list): # Array format: [{"description": ..., "points": ...}, ...] for i, rating_data in enumerate(ratings): if not isinstance(rating_data, dict): raise ValueError(f"Rating {i} in criterion {criterion_key} must be an object") if "description" not in rating_data: raise ValueError(f"Rating {i} in criterion {criterion_key} must have a 'description' field") if "points" not in rating_data: raise ValueError(f"Rating {i} in criterion {criterion_key} must have a 'points' field") try: rating_points = float(rating_data["points"]) if rating_points < 0: raise ValueError(f"Rating {i} points must be non-negative") except (ValueError, TypeError) as err: raise ValueError(f"Rating {i} points must be a valid number") from err else: raise ValueError(f"Criterion {criterion_key} ratings must be an object or array") return criteria
- src/canvas_mcp/tools/__init__.py:7-7 (registration)Imports the register_rubric_tools function, enabling its use in server.py for tool registration.from .rubrics import register_rubric_tools