error_handling.py•12.4 kB
"""
Enhanced Error Handling for MCP Server
Provides descriptive, user-friendly error messages for better debugging
"""
import logging
import traceback
from typing import Dict, Any, Optional
from fastapi import HTTPException
import json
logger = logging.getLogger(__name__)
class MCPError(Exception):
"""Base exception for MCP-specific errors"""
def __init__(self, message: str, error_code: str = "MCP_ERROR", details: Optional[Dict[str, Any]] = None):
self.message = message
self.error_code = error_code
self.details = details or {}
super().__init__(self.message)
class ValidationError(MCPError):
"""Raised when input validation fails"""
def __init__(self, message: str, field: str = None, value: Any = None):
details = {}
if field:
details["field"] = field
if value is not None:
details["provided_value"] = str(value)
super().__init__(message, "VALIDATION_ERROR", details)
class LLMError(MCPError):
"""Raised when LLM operations fail"""
def __init__(self, message: str, provider: str = None, model: str = None):
details = {}
if provider:
details["provider"] = provider
if model:
details["model"] = model
super().__init__(message, "LLM_ERROR", details)
class DatabaseError(MCPError):
"""Raised when database operations fail"""
def __init__(self, message: str, query: str = None, database: str = None):
details = {}
if query:
details["query"] = query[:200] + "..." if len(query) > 200 else query
if database:
details["database"] = database
super().__init__(message, "DATABASE_ERROR", details)
class ServiceError(MCPError):
"""Raised when service operations fail"""
def __init__(self, message: str, service: str = None, operation: str = None):
details = {}
if service:
details["service"] = service
if operation:
details["operation"] = operation
super().__init__(message, "SERVICE_ERROR", details)
def create_error_response(error: Exception, include_traceback: bool = False) -> Dict[str, Any]:
"""Create a standardized error response"""
if isinstance(error, MCPError):
response = {
"error": {
"type": error.error_code,
"message": error.message,
"details": error.details
}
}
elif isinstance(error, HTTPException):
response = {
"error": {
"type": "HTTP_ERROR",
"message": str(error.detail),
"details": {
"status_code": error.status_code
}
}
}
else:
response = {
"error": {
"type": "UNEXPECTED_ERROR",
"message": str(error),
"details": {}
}
}
if include_traceback:
response["error"]["traceback"] = traceback.format_exc()
logger.error(f"Error response created: {response}")
return response
def validate_chat_request(request: Dict[str, Any]) -> None:
"""Validate chat request parameters"""
# Check for required fields
if not request:
raise ValidationError("Request body is empty or invalid JSON")
# Validate question/message
question = request.get("question") or request.get("message")
if not question:
available_fields = list(request.keys())
raise ValidationError(
"Either 'question' or 'message' field is required",
field="question/message",
value=f"Available fields: {available_fields}"
)
if not isinstance(question, str):
raise ValidationError(
"Question/message must be a string",
field="question/message",
value=type(question).__name__
)
if not question.strip():
raise ValidationError(
"Question/message cannot be empty or only whitespace",
field="question/message",
value=f"Length: {len(question)}"
)
if len(question.strip()) > 10000:
raise ValidationError(
"Question/message is too long (max 10,000 characters)",
field="question/message",
value=f"Length: {len(question)}"
)
# Validate optional fields
session_id = request.get("session_id")
if session_id is not None and not isinstance(session_id, str):
raise ValidationError(
"session_id must be a string",
field="session_id",
value=type(session_id).__name__
)
model = request.get("model")
if model is not None and not isinstance(model, str):
raise ValidationError(
"model must be a string",
field="model",
value=type(model).__name__
)
temperature = request.get("temperature")
if temperature is not None:
if not isinstance(temperature, (int, float)):
raise ValidationError(
"temperature must be a number",
field="temperature",
value=type(temperature).__name__
)
if not 0.0 <= temperature <= 2.0:
raise ValidationError(
"temperature must be between 0.0 and 2.0",
field="temperature",
value=temperature
)
max_tokens = request.get("max_tokens")
if max_tokens is not None:
if not isinstance(max_tokens, int):
raise ValidationError(
"max_tokens must be an integer",
field="max_tokens",
value=type(max_tokens).__name__
)
if max_tokens <= 0:
raise ValidationError(
"max_tokens must be greater than 0",
field="max_tokens",
value=max_tokens
)
if max_tokens > 100000:
raise ValidationError(
"max_tokens cannot exceed 100,000",
field="max_tokens",
value=max_tokens
)
def validate_conversation_request(request: Dict[str, Any]) -> None:
"""Validate conversation chat request parameters"""
validate_chat_request(request)
# Additional validation for conversation-specific fields
conversation_id = request.get("conversation_id")
if conversation_id and not isinstance(conversation_id, str):
raise ValidationError(
"conversation_id must be a string",
field="conversation_id",
value=type(conversation_id).__name__
)
include_context = request.get("include_context")
if include_context is not None and not isinstance(include_context, bool):
raise ValidationError(
"include_context must be a boolean",
field="include_context",
value=type(include_context).__name__
)
context_limit = request.get("context_limit")
if context_limit is not None:
if not isinstance(context_limit, int):
raise ValidationError(
"context_limit must be an integer",
field="context_limit",
value=type(context_limit).__name__
)
if context_limit <= 0:
raise ValidationError(
"context_limit must be greater than 0",
field="context_limit",
value=context_limit
)
if context_limit > 100:
raise ValidationError(
"context_limit cannot exceed 100",
field="context_limit",
value=context_limit
)
def handle_llm_error(error: Exception, provider: str = None, model: str = None) -> LLMError:
"""Convert various LLM errors to descriptive LLMError"""
error_msg = str(error).lower()
if "connection" in error_msg or "timeout" in error_msg:
return LLMError(
f"Cannot connect to LLM provider. Please check if the service is running.",
provider=provider,
model=model
)
elif "unauthorized" in error_msg or "api key" in error_msg:
return LLMError(
f"Authentication failed. Please check your API key configuration.",
provider=provider,
model=model
)
elif "rate limit" in error_msg or "quota" in error_msg:
return LLMError(
f"Rate limit exceeded. Please wait before making more requests.",
provider=provider,
model=model
)
elif "model" in error_msg and "not found" in error_msg:
return LLMError(
f"Model '{model}' not found or not available for provider '{provider}'.",
provider=provider,
model=model
)
elif "context length" in error_msg or "token limit" in error_msg:
return LLMError(
f"Input too long for model '{model}'. Please reduce the message length.",
provider=provider,
model=model
)
else:
return LLMError(
f"LLM request failed: {str(error)}",
provider=provider,
model=model
)
def handle_database_error(error: Exception, query: str = None, database: str = None) -> DatabaseError:
"""Convert database errors to descriptive DatabaseError"""
error_msg = str(error).lower()
if "connection" in error_msg:
return DatabaseError(
f"Cannot connect to database '{database}'. Please check the connection settings.",
query=query,
database=database
)
elif "does not exist" in error_msg:
return DatabaseError(
f"Database object does not exist. Please check table/column names.",
query=query,
database=database
)
elif "permission denied" in error_msg or "access denied" in error_msg:
return DatabaseError(
f"Permission denied. Please check database user permissions.",
query=query,
database=database
)
elif "syntax error" in error_msg:
return DatabaseError(
f"SQL syntax error. Please check your query syntax.",
query=query,
database=database
)
else:
return DatabaseError(
f"Database operation failed: {str(error)}",
query=query,
database=database
)
def create_helpful_suggestions(error: MCPError) -> list[str]:
"""Generate helpful suggestions based on error type"""
suggestions = []
if error.error_code == "VALIDATION_ERROR":
if "question" in error.message.lower():
suggestions.extend([
"Make sure to include either 'question' or 'message' in your request",
"Check that the field contains actual text, not just whitespace",
"Verify the request is valid JSON format"
])
elif "temperature" in error.message.lower():
suggestions.extend([
"Set temperature between 0.0 (focused) and 2.0 (creative)",
"Use 0.7 for balanced responses"
])
elif "max_tokens" in error.message.lower():
suggestions.extend([
"Set max_tokens between 1 and 100,000",
"Use 1000-4000 for typical responses"
])
elif error.error_code == "LLM_ERROR":
if "connect" in error.message.lower():
suggestions.extend([
"Check if Ollama is running: http://localhost:11434",
"Verify your API keys are configured correctly",
"Test connection with: curl http://localhost:11434/api/tags"
])
elif "model" in error.message.lower():
suggestions.extend([
"List available models: curl http://localhost:11434/api/tags",
"Pull a model: ollama pull mistral",
"Use 'mistral:latest' as a reliable default"
])
elif error.error_code == "DATABASE_ERROR":
if "connect" in error.message.lower():
suggestions.extend([
"Check if PostgreSQL is running",
"Verify database connection string",
"Test with: psql -h localhost -U postgres"
])
return suggestions