related_concepts
Discover semantically related concepts using ConceptNet's similarity algorithms to expand exploration, find related terms, and understand semantic neighborhoods.
Instructions
Find concepts semantically related to a given concept using ConceptNet's embeddings.
This tool uses ConceptNet's semantic similarity algorithms to discover
concepts that are related to the input term. Results are ranked by
similarity score and include comprehensive analysis.
Features:
- Semantic similarity discovery using advanced algorithms
- Ranked results with detailed similarity analysis
- Default English language filtering (can be disabled or changed)
- Statistical analysis and categorization
- Format control: minimal (~96% smaller) vs verbose (full metadata)
Format Options:
- verbose=false (default): Returns minimal format optimized for LLM consumption
- verbose=true: Returns comprehensive format with full ConceptNet metadata
- Backward compatibility maintained with existing tools
Similarity Analysis:
- Similarity scores from 0.0 (unrelated) to 1.0 (very similar)
- Descriptive categories (very strong, strong, moderate, weak, very weak)
- Relationship context and likely connections
- Language distribution and statistical summaries
Use this when you need to:
- Discover semantically similar concepts
- Expand concept exploration and brainstorming
- Find related terms and ideas
- Understand semantic neighborhoods
Input Schema
TableJSON Schema
| Name | Required | Description | Default |
|---|---|---|---|
| term | Yes | ||
| language | No | en | |
| filter_language | No | en | |
| limit | No | ||
| verbose | No |
Implementation Reference
- Core handler function executing the tool logic: validates inputs, queries ConceptNet /related endpoint, processes responses into minimal or verbose formats with similarity analysis and statistics.async def related_concepts( term: str, ctx: Context, language: str = "en", filter_language: Optional[str] = None, limit: int = 100, verbose: bool = False ) -> Dict[str, Any]: """ Find concepts semantically related to the given concept. This tool uses ConceptNet's semantic similarity algorithms to find concepts that are related to the input term. Results are ranked by similarity score and can be filtered by language. By default, returns a minimal format optimized for LLM consumption. Args: term: The concept term to find related concepts for (e.g., "dog", "happiness") language: Language code for the input term (default: "en" for English) filter_language: Language to filter results to (default: None, which defaults to "en" for English) limit: Maximum number of related concepts to return (default: 100, max: 100) verbose: If True, returns detailed format with full metadata (default: False) Returns: Related concepts with similarity scores (minimal format) or comprehensive analysis with statistical metadata (verbose format). Examples: - related_concepts("dog") -> Minimal format with similarity scores - related_concepts("dog", verbose=True) -> Full detailed format with analysis - related_concepts("perro", "es") -> Spanish concepts related to "perro" - related_concepts("cat", limit=10) -> Top 10 concepts related to "cat" """ start_time = datetime.now(timezone.utc) try: # Default filter_language to "en" if not provided if filter_language is None: filter_language = "en" # Log the incoming request await ctx.info(f"Finding related concepts for: '{term}' (language: {language})") # 1. Parameter validation await _validate_parameters(term, language, filter_language, limit, ctx) # 2. Normalize the input term normalized_term = normalize_concept_text(term, language) if normalized_term != term: await ctx.debug(f"Normalized term: '{term}' -> '{normalized_term}'") # 3. Query ConceptNet API for related concepts await ctx.info(f"Querying ConceptNet /related endpoint for '{normalized_term}'...") async with ConceptNetClient() as client: try: response = await client.get_related( term=normalized_term, language=language, filter_language=filter_language, limit=limit ) except ConceptNotFoundError: return _create_not_found_response(term, language, normalized_term) except ConceptNetAPIError as e: return _create_api_error_response(term, language, str(e)) # 4. Return appropriate format based on verbose parameter if verbose: # Return detailed format with full metadata (existing behavior) enhanced_response = await _create_enhanced_response( response, term, normalized_term, language, filter_language, limit, start_time, ctx ) total_found = enhanced_response.get("summary", {}).get("total_found", 0) await ctx.info(f"Successfully found {total_found} related concepts for '{term}' (verbose format)") return enhanced_response else: # Return minimal format optimized for LLMs # Create a mock processed response for the minimal formatter mock_response = {"related_concepts": []} # Process raw related concepts data related_concepts_raw = response.get("related", []) for i, concept_data in enumerate(related_concepts_raw): concept_id = concept_data.get("@id", "") weight = concept_data.get("weight", 0.0) # Extract term from URI term_text = "" if concept_id: parts = concept_id.split('/') if len(parts) >= 4 and parts[1] == 'c': raw_term = parts[3].replace('_', ' ') # Apply POS tag filtering to remove "/Wn/..." patterns term_text = _clean_concept_term(raw_term) if term_text: mock_response["related_concepts"].append({ "concept": { "term": term_text, "normalized_display": term_text }, "similarity": { "score": weight } }) minimal_response = ResponseProcessor().create_minimal_related_response( mock_response, term ) total_found = minimal_response.get("summary", {}).get("total_found", 0) await ctx.info(f"Successfully found {total_found} related concepts for '{term}' (minimal format)") return minimal_response except MCPValidationError as e: # Handle validation errors specifically return { "error": "validation_error", "message": f"Validation error for field '{e.field}': {e.value} (expected: {e.expected})", "field": e.field, "value": e.value, "expected": e.expected, "term": term, "language": language, "query_time": start_time.isoformat() + "Z" } except ConceptNotFoundError: return _create_not_found_response(term, language, term) except ConceptNetAPIError as e: return _create_api_error_response(term, language, str(e)) except Exception as e: logger.error(f"Unexpected error in related_concepts: {e}") return { "error": "unexpected_error", "message": f"An unexpected error occurred: {str(e)}", "term": term, "language": language, "query_time": start_time.isoformat() + "Z" }
- Pydantic model defining the structure of RelatedConcepts API responses from ConceptNet, used for type-safe parsing and validation.class RelatedConceptsResponse(BaseModel): """ Response model for related concepts queries. Contains a list of concepts related to the query concept, typically with similarity scores. """ related: List[Dict[str, Any]] = Field( default_factory=list, description="List of related concepts with similarity scores" ) @property def related_count(self) -> int: """Return the number of related concepts.""" return len(self.related) def get_top_related(self, n: int = 5) -> List[Dict[str, Any]]: """ Get the top N most related concepts. Args: n: Number of top concepts to return Returns: List of the most related concepts """ # Sort by weight/score if available, otherwise return first n sorted_related = sorted( self.related, key=lambda x: x.get('weight', 0), reverse=True ) return sorted_related[:n] def __str__(self) -> str: """Return a human-readable string representation.""" return f"RelatedConceptsResponse with {self.related_count} related concepts"
- src/conceptnet_mcp/server.py:277-344 (registration)FastMCP tool registration decorator and wrapper function that registers 'related_concepts' tool, defines input parameters via type hints, and delegates to the core handler.@mcp.tool( name="related_concepts", description=""" Find concepts semantically related to a given concept using ConceptNet's embeddings. This tool uses ConceptNet's semantic similarity algorithms to discover concepts that are related to the input term. Results are ranked by similarity score and include comprehensive analysis. Features: - Semantic similarity discovery using advanced algorithms - Ranked results with detailed similarity analysis - Default English language filtering (can be disabled or changed) - Statistical analysis and categorization - Format control: minimal (~96% smaller) vs verbose (full metadata) Format Options: - verbose=false (default): Returns minimal format optimized for LLM consumption - verbose=true: Returns comprehensive format with full ConceptNet metadata - Backward compatibility maintained with existing tools Similarity Analysis: - Similarity scores from 0.0 (unrelated) to 1.0 (very similar) - Descriptive categories (very strong, strong, moderate, weak, very weak) - Relationship context and likely connections - Language distribution and statistical summaries Use this when you need to: - Discover semantically similar concepts - Expand concept exploration and brainstorming - Find related terms and ideas - Understand semantic neighborhoods """, tags={"conceptnet", "semantic", "similarity", "related", "discovery"} ) async def related_concepts_tool( term: str, ctx: Context, language: str = "en", filter_language: Optional[str] = "en", limit: int = 100, verbose: bool = False ) -> Dict[str, Any]: """ MCP tool wrapper for finding related concepts functionality. Args: term: The concept term to find related concepts for (e.g., "dog", "happiness") language: Language code for the input term (default: "en" for English) filter_language: Language to filter results to (default: "en" for English, use None for no filtering) limit: Maximum number of related concepts to return (default: 100, max: 100) verbose: If True, returns detailed format with full metadata (default: False) Returns: Related concepts with similarity scores (minimal format) or comprehensive analysis with statistical metadata (verbose format) """ try: return await related_concepts( term=term, ctx=ctx, language=language, filter_language=filter_language, limit=limit, verbose=verbose ) except Exception as e: return await handle_server_error(e, "related_concepts")
- Helper function to clean concept terms by stripping internal WordNet/POS annotations for user-friendly display.def _clean_concept_term(term: str) -> str: """ Clean concept terms by removing WordNet and POS tag annotations. Removes technical annotations like /Wn/Food, /Wn/Substance, /N, /V, etc. that are used internally by ConceptNet but should not appear in user-facing results. Args: term: Original concept term that may contain POS tags Returns: Cleaned term without POS tag annotations """ if not term or not isinstance(term, str): return term # Remove WordNet-derived tags like /Wn/Food, /Wn/Substance, etc. # Pattern matches: /Wn/ followed by any word characters import re wn_pattern = r'/Wn/[\w]*' cleaned = re.sub(wn_pattern, '', term) # Remove part-of-speech tags like /N, /V, /A, /ADJ, /ADV, etc. # Pattern matches: slash followed by uppercase letters/common POS tags pos_pattern = r'/[A-Z][A-Z]*\b' cleaned = re.sub(pos_pattern, '', cleaned) # Remove trailing slashes (edge case) cleaned = re.sub(r'/$', '', cleaned) # Clean up any remaining whitespace cleaned = cleaned.strip() return cleaned if cleaned else term
- Input parameter validation helper ensuring valid term, languages, and limits before API query.async def _validate_parameters( term: str, language: str, filter_language: Optional[str], limit: int, ctx: Context ) -> None: """Validate all input parameters.""" # Validate term if not term or not term.strip(): raise MCPValidationError("term", term, "Non-empty string") if len(term.strip()) > 200: # Reasonable length limit raise MCPValidationError("term", term, "Term length must be 200 characters or less") # Validate language if not language or not isinstance(language, str): raise MCPValidationError("language", language, "Valid language code (e.g., 'en', 'es')") if not validate_language_code(language): await ctx.warning(f"Language code '{language}' may not be supported by ConceptNet") # Validate filter_language if provided if filter_language: if not isinstance(filter_language, str): raise MCPValidationError("filter_language", filter_language, "Valid language code or None") if not validate_language_code(filter_language): await ctx.warning(f"Filter language code '{filter_language}' may not be supported by ConceptNet") # Validate limit if not isinstance(limit, int) or limit < 1 or limit > 100: raise MCPValidationError("limit", limit, "Integer between 1 and 100")