# src/fctr_okta_mcp/utils/logger.py
"""
Centralized Logging for Okta MCP Server
Single log file + console output with different log levels:
- DEBUG: Detailed diagnostic info
- INFO: General operational messages
- WARNING: Rate limits, retries, potential issues
- ERROR: Failures and exceptions
Usage:
from fctr_okta_mcp.utils.logger import get_logger, log_audit_event
logger = get_logger(__name__)
logger.info("Processing request")
logger.warning("Rate limit approaching")
logger.error("Request failed", exc_info=True)
"""
import os
import sys
import json
import time
import logging
from datetime import datetime, timezone
from pathlib import Path
from typing import Any, Dict, Optional
from logging.handlers import RotatingFileHandler
# Public API
__all__ = [
"setup_logging",
"get_logger",
"log_audit_event",
"AuditContext",
]
# ============================================================
# Configuration
# ============================================================
def _get_log_dir() -> Path:
"""Get log directory - repo's logs folder or home directory."""
current_file = Path(__file__).resolve()
repo_root = current_file.parent.parent.parent.parent
if (repo_root / "pyproject.toml").exists():
return repo_root / "logs"
return Path.home() / ".fctr-okta-mcp" / "logs"
LOG_DIR = Path(os.getenv("OKTA_MCP_LOG_DIR", str(_get_log_dir())))
LOG_LEVEL = os.getenv("OKTA_MCP_LOG_LEVEL", "INFO").upper()
LOG_FILE = "okta_mcp.log"
MAX_LOG_SIZE = int(os.getenv("OKTA_MCP_MAX_LOG_SIZE", 10 * 1024 * 1024)) # 10MB
BACKUP_COUNT = int(os.getenv("OKTA_MCP_LOG_BACKUP_COUNT", 5))
CONSOLE_LOGGING = os.getenv("OKTA_MCP_CONSOLE_LOGGING", "true").lower() == "true"
# ============================================================
# Formatters
# ============================================================
class JSONFormatter(logging.Formatter):
"""JSON formatter for log file - machine readable."""
def format(self, record: logging.LogRecord) -> str:
log_data = {
"timestamp": datetime.now(timezone.utc).isoformat(),
"level": record.levelname,
"logger": record.name,
"message": record.getMessage(),
"module": record.module,
"function": record.funcName,
"line": record.lineno,
}
if record.exc_info:
log_data["exception"] = self.formatException(record.exc_info)
# Include extra fields for audit events
for attr in ["action", "status", "details", "duration_ms", "user"]:
if hasattr(record, attr):
log_data[attr] = getattr(record, attr)
return json.dumps(log_data, default=str)
class ConsoleFormatter(logging.Formatter):
"""Human-readable colored console output."""
COLORS = {
"DEBUG": "\033[36m", # Cyan
"INFO": "\033[32m", # Green
"WARNING": "\033[33m", # Yellow
"ERROR": "\033[31m", # Red
"CRITICAL": "\033[35m", # Magenta
"RESET": "\033[0m",
}
def format(self, record: logging.LogRecord) -> str:
use_colors = hasattr(sys.stderr, "isatty") and sys.stderr.isatty()
if use_colors:
color = self.COLORS.get(record.levelname, self.COLORS["RESET"])
reset = self.COLORS["RESET"]
level = f"{color}{record.levelname:8}{reset}"
else:
level = f"{record.levelname:8}"
timestamp = datetime.now().strftime("%H:%M:%S")
# Shorten logger name for console
name = record.name.replace("fctr_okta_mcp.", "")
if len(name) > 25:
name = "..." + name[-22:]
return f"{timestamp} | {level} | {name:25} | {record.getMessage()}"
# ============================================================
# Singleton Logger Setup
# ============================================================
_initialized = False
_root_logger: Optional[logging.Logger] = None
def setup_logging() -> None:
"""
Initialize the centralized logging system.
Creates ONE log file and optional console output.
Safe to call multiple times - subsequent calls are no-ops.
"""
global _initialized, _root_logger
if _initialized:
return
# Create log directory
try:
LOG_DIR.mkdir(parents=True, exist_ok=True)
except Exception as e:
print(f"Warning: Could not create log directory {LOG_DIR}: {e}", file=sys.stderr)
# Configure the root logger for our package
_root_logger = logging.getLogger("fctr_okta_mcp")
_root_logger.setLevel(getattr(logging, LOG_LEVEL, logging.INFO))
# Remove any existing handlers
_root_logger.handlers.clear()
# File handler - JSON format, all levels
try:
file_handler = RotatingFileHandler(
LOG_DIR / LOG_FILE,
maxBytes=MAX_LOG_SIZE,
backupCount=BACKUP_COUNT,
encoding="utf-8"
)
file_handler.setFormatter(JSONFormatter())
file_handler.setLevel(logging.DEBUG) # Capture everything in file
_root_logger.addHandler(file_handler)
except Exception as e:
print(f"Warning: Could not create log file: {e}", file=sys.stderr)
# Console handler - colored, configurable level
if CONSOLE_LOGGING:
console_handler = logging.StreamHandler(sys.stderr)
console_handler.setFormatter(ConsoleFormatter())
console_handler.setLevel(getattr(logging, LOG_LEVEL, logging.INFO))
_root_logger.addHandler(console_handler)
# Prevent propagation to root
_root_logger.propagate = False
_initialized = True
def get_logger(name: str) -> logging.Logger:
"""
Get a logger instance. All loggers are children of fctr_okta_mcp.
Args:
name: Logger name (typically __name__)
Returns:
Logger instance that inherits from centralized config
"""
# Ensure logging is initialized
if not _initialized:
setup_logging()
# All loggers inherit from our root
logger = logging.getLogger(name)
# Don't add handlers - inherit from parent
# This ensures all logs go to the same file
return logger
# ============================================================
# Audit Logging Helper
# ============================================================
def log_audit_event(
action: str,
user: str = "mcp_client",
status: str = "success",
details: Optional[Dict[str, Any]] = None,
duration_ms: Optional[float] = None,
error: Optional[Exception] = None
) -> None:
"""
Log an audit event with structured data.
Args:
action: Operation name (e.g., "execute_code", "okta_user_list")
user: User/client identifier
status: "success", "error", "started", "completed"
details: Additional context
duration_ms: Operation duration
error: Exception if failed
"""
logger = get_logger("fctr_okta_mcp.audit")
# Build message
message = f"AUDIT: {action} - {status}"
if duration_ms:
message += f" ({duration_ms:.0f}ms)"
# Create record with extra fields
level = logging.ERROR if status == "error" or error else logging.INFO
extra = {
"action": action,
"user": user,
"status": status.upper(),
"details": details or {},
}
if duration_ms:
extra["duration_ms"] = round(duration_ms, 2)
# Log with extra data
record = logger.makeRecord(
name=logger.name,
level=level,
fn="",
lno=0,
msg=message,
args=(),
exc_info=(type(error), error, error.__traceback__) if error else None
)
for key, value in extra.items():
setattr(record, key, value)
logger.handle(record)
# ============================================================
# Context Manager for Timed Operations
# ============================================================
class AuditContext:
"""
Context manager for timing operations.
Usage:
with AuditContext("execute_code", details={"is_test": True}) as ctx:
result = do_work()
ctx.add_detail("count", len(result))
"""
def __init__(self, action: str, user: str = "mcp_client", details: Optional[Dict] = None):
self.action = action
self.user = user
self.details = details or {}
self.start_time: Optional[float] = None
def add_detail(self, key: str, value: Any):
self.details[key] = value
def __enter__(self):
self.start_time = time.time()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
duration_ms = (time.time() - self.start_time) * 1000 if self.start_time else None
log_audit_event(
action=self.action,
user=self.user,
status="error" if exc_val else "completed",
details=self.details,
duration_ms=duration_ms,
error=exc_val if exc_val else None
)
return False # Don't suppress exceptions
# Auto-initialize on import
setup_logging()