"""
Logging configuration for GeoSight MCP Server.
Uses structlog for structured, JSON-friendly logging.
"""
import logging
import sys
from typing import Optional
import structlog
def setup_logging(
level: str = "INFO",
json_format: bool = False,
log_file: Optional[str] = None,
) -> None:
"""
Configure structured logging for the application.
Args:
level: Log level (DEBUG, INFO, WARNING, ERROR)
json_format: Whether to output JSON format (for production)
log_file: Optional file path to write logs
"""
# Convert string level to logging constant
numeric_level = getattr(logging, level.upper(), logging.INFO)
# Configure standard library logging
logging.basicConfig(
format="%(message)s",
stream=sys.stderr,
level=numeric_level,
)
# Define processors
shared_processors = [
structlog.contextvars.merge_contextvars,
structlog.processors.add_log_level,
structlog.processors.TimeStamper(fmt="iso"),
structlog.stdlib.ExtraAdder(),
]
if json_format:
# Production: JSON output
processors = shared_processors + [
structlog.processors.dict_tracebacks,
structlog.processors.JSONRenderer(),
]
else:
# Development: Pretty console output
processors = shared_processors + [
structlog.dev.ConsoleRenderer(
colors=True,
exception_formatter=structlog.dev.plain_traceback,
),
]
# Configure structlog
structlog.configure(
processors=processors,
wrapper_class=structlog.stdlib.BoundLogger,
context_class=dict,
logger_factory=structlog.stdlib.LoggerFactory(),
cache_logger_on_first_use=True,
)
# Add file handler if specified
if log_file:
file_handler = logging.FileHandler(log_file)
file_handler.setLevel(numeric_level)
if json_format:
file_handler.setFormatter(logging.Formatter("%(message)s"))
else:
file_handler.setFormatter(
logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
)
logging.getLogger().addHandler(file_handler)
def get_logger(name: str) -> structlog.BoundLogger:
"""
Get a logger instance for a module.
Args:
name: Logger name (typically __name__)
Returns:
Configured structlog logger
"""
return structlog.get_logger(name)
class LogContext:
"""
Context manager for adding temporary context to logs.
Usage:
with LogContext(request_id="abc123", user="john"):
logger.info("Processing request")
"""
def __init__(self, **kwargs):
self.context = kwargs
self._token = None
def __enter__(self):
self._token = structlog.contextvars.bind_contextvars(**self.context)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
structlog.contextvars.unbind_contextvars(*self.context.keys())
return False
def log_execution_time(logger: structlog.BoundLogger, operation: str):
"""
Decorator to log execution time of functions.
Args:
logger: Logger instance
operation: Name of the operation being timed
"""
import functools
import time
def decorator(func):
@functools.wraps(func)
async def async_wrapper(*args, **kwargs):
start = time.perf_counter()
try:
result = await func(*args, **kwargs)
elapsed = time.perf_counter() - start
logger.info(
f"{operation}_completed",
duration_ms=round(elapsed * 1000, 2),
)
return result
except Exception as e:
elapsed = time.perf_counter() - start
logger.error(
f"{operation}_failed",
duration_ms=round(elapsed * 1000, 2),
error=str(e),
)
raise
@functools.wraps(func)
def sync_wrapper(*args, **kwargs):
start = time.perf_counter()
try:
result = func(*args, **kwargs)
elapsed = time.perf_counter() - start
logger.info(
f"{operation}_completed",
duration_ms=round(elapsed * 1000, 2),
)
return result
except Exception as e:
elapsed = time.perf_counter() - start
logger.error(
f"{operation}_failed",
duration_ms=round(elapsed * 1000, 2),
error=str(e),
)
raise
import asyncio
if asyncio.iscoroutinefunction(func):
return async_wrapper
return sync_wrapper
return decorator