"""Structured logging framework for Oizom IoT edge devices.
Provides a centralized, configurable logging system with context-based filtering,
timezone-aware timestamps, ANSI color output, and safe rotating file handlers.
All modules in the Oizom hardware stack use this logger for consistent,
machine-parseable log output.
The logging pipeline is built from composable components:
- :class:`LoggerConfig` -- immutable dataclass holding all configuration
- :class:`ContextFilter` -- injects a context token (e.g., ``"INIT"``, ``"READ"``)
into every log record for easy grep-based filtering
- :class:`StructuredFormatter` -- pipe-delimited format with optional timezone
conversion and country indicators
- :class:`ColorFormatter` -- ANSI color wrapper respecting ``NO_COLOR`` / TTY detection
- :class:`SafeRotatingFileHandler` -- rotation that tolerates missing backup files
- :class:`OizomLogger` -- public API combining all of the above
Example output formats::
INFO | SENSOR | Temperature reading: 25.3C
2025-12-10 14:30:45.123456 | INFO | SENSOR | Temperature reading: 25.3C
Typical usage::
from utils.oizom_logger import OizomLogger
context_logger = OizomLogger(__name__)
context_logger.info_with_context("INIT", "Sensor setup complete")
basic_logger = OizomLogger(__name__).get()
basic_logger.info("Standard log message")
Note:
This module is imported by virtually every file in the codebase. Keep it
free of circular imports -- it must not import from ``Sensor``, ``Manager``,
or any ``OzWrapper`` module.
"""
import logging
import logging.handlers
import os
import sys
from dataclasses import dataclass
from typing import Any
# Handle timezone support for Python 3.9+
try:
from datetime import datetime
from zoneinfo import ZoneInfo
except ImportError:
ZoneInfo = None # type: ignore
# ===========================================================================
# TIMEZONE TO COUNTRY MAPPING
# ===========================================================================
# Maps timezone names to country codes and emoji flags for global deployments.
# This helps identify where logs are coming from in distributed systems.
TIMEZONE_COUNTRY_MAP: dict[str, dict[str, str]] = {
"UTC": {"emoji": "🌍", "code": "UTC"}, # Global/Universal
# United States
"US/Eastern": {"emoji": "🇺🇸", "code": "US"},
"US/Central": {"emoji": "🇺🇸", "code": "US"},
"US/Mountain": {"emoji": "🇺🇸", "code": "US"},
"US/Pacific": {"emoji": "🇺🇸", "code": "US"},
"America/New_York": {"emoji": "🇺🇸", "code": "US"},
"America/Chicago": {"emoji": "🇺🇸", "code": "US"},
"America/Denver": {"emoji": "🇺🇸", "code": "US"},
"America/Los_Angeles": {"emoji": "🇺🇸", "code": "US"},
# Europe
"Europe/London": {"emoji": "🇬🇧", "code": "GB"},
"Europe/Paris": {"emoji": "🇫🇷", "code": "FR"},
"Europe/Berlin": {"emoji": "🇩🇪", "code": "DE"},
"Europe/Rome": {"emoji": "🇮🇹", "code": "IT"},
"Europe/Madrid": {"emoji": "🇪🇸", "code": "ES"},
"Europe/Amsterdam": {"emoji": "🇳🇱", "code": "NL"},
"Europe/Stockholm": {"emoji": "🇸🇪", "code": "SE"},
"Europe/Oslo": {"emoji": "🇳🇴", "code": "NO"},
# Asia
"Asia/Kolkata": {"emoji": "🇮🇳", "code": "IN"},
"Asia/Mumbai": {"emoji": "🇮🇳", "code": "IN"},
"Asia/Delhi": {"emoji": "🇮🇳", "code": "IN"},
"Asia/Tokyo": {"emoji": "🇯🇵", "code": "JP"},
"Asia/Shanghai": {"emoji": "🇨🇳", "code": "CN"},
"Asia/Beijing": {"emoji": "🇨🇳", "code": "CN"},
"Asia/Seoul": {"emoji": "🇰🇷", "code": "KR"},
"Asia/Singapore": {"emoji": "🇸🇬", "code": "SG"},
"Asia/Dubai": {"emoji": "🇦🇪", "code": "AE"},
# Australia
"Australia/Sydney": {"emoji": "🇦🇺", "code": "AU"},
"Australia/Melbourne": {"emoji": "🇦🇺", "code": "AU"},
"Australia/Perth": {"emoji": "🇦🇺", "code": "AU"},
# Americas (other)
"America/Sao_Paulo": {"emoji": "🇧🇷", "code": "BR"},
"America/Mexico_City": {"emoji": "🇲🇽", "code": "MX"},
"America/Toronto": {"emoji": "🇨🇦", "code": "CA"},
# Africa
"Africa/Cairo": {"emoji": "🇪🇬", "code": "EG"},
"Africa/Lagos": {"emoji": "🇳🇬", "code": "NG"},
"Africa/Johannesburg": {"emoji": "🇿🇦", "code": "ZA"},
}
# ===========================================================================
# SECTION 1: CONFIGURATION
# ===========================================================================
# Instead of passing 30+ parameters around, we use a single dataclass.
# This makes it easy to see all options, set defaults, and pass config
# to helper functions without repeating parameters everywhere.
#
# EXAMPLE OUTPUT FORMATS:
#
# Basic (minimal):
# INFO | SENSOR | Temperature reading: 25.3°C
#
# With timestamp:
# 2025-12-10 14:30:45.123456 | INFO | SENSOR | Temperature reading: 25.3°C
#
# With timezone + country emoji:
# 2025-12-10 14:30:45.123456 🇮🇳 | INFO | SENSOR | Temperature reading: 25.3°C
#
# With location info (logger, function, line):
# INFO | SENSOR | Sensor.Sensor | read_temp | 142 | Temperature: 25.3°C
#
# With process/thread info:
# INFO | SENSOR | 12345 | MainThread | Temperature: 25.3°C
#
# Full example (all options enabled):
# 2025-12-10 14:30:45.123456 🇮🇳 | INFO | SENSOR | Sensor.Sensor | read_temp | 142 | 12345 | MainThread | Temperature: 25.3°C
@dataclass(frozen=True) # frozen=True makes it immutable (safer)
class LoggerConfig:
"""All configuration for a logger instance in one place.
WHY: The old logger had parameters scattered across __init__, methods,
and formatter constructors. This consolidates everything.
BENEFITS:
- Type hints on every field
- IDE autocomplete
- Easy to create from dict (for runtime config changes)
- Can't be accidentally modified after creation
"""
# Core identity
name: str # Logger name (e.g., "Sensor.Sensor", "Network.Network")
# File configuration
base_log_dir: str = "logs"
filename: str = "ozone_hardware.log"
max_bytes: int = 10 * 1024 * 1024 # 10 MB per file
backup_count: int = 5 # Keep 5 backup files
# Log levels
console_level: int = logging.DEBUG # What goes to stdout
file_level: int = logging.DEBUG # What goes to file (usually more verbose)
# Timestamp formatting
use_timestamp: bool = True
use_12_hour: bool = False # False = 24-hour format
datefmt_24h: str = "%Y-%m-%d %H:%M:%S.%f" # Default 24-hour time format
datefmt_12h: str = "%Y-%m-%d %I:%M:%S.%f %p" # Default 12-hour time format
timezone: str | None = None # e.g., "Asia/Kolkata", "UTC", None = local time
show_country_code: bool = False # Show country code (e.g., "IN", "US") with timezone
show_country_emoji: bool = False # Show emoji flag (e.g., 🇮🇳, 🇺🇸) with timezone
# Visual formatting
color: bool = True # ANSI colors in console
context_width: int = 10 # Width for context column (e.g., "INIT", "SENSOR")
level_width: int = 8 # Width for level column (e.g., "INFO", "ERROR")
# Location information (optional debugging details)
show_function: bool = False # Show function name
show_line: bool = False # Show line number
show_logger_name: bool = False # Show logger name
# Location column widths (only used if corresponding show_* is True)
logger_name_width: int = 20 # Width for logger name column
func_name_width: int = 20 # Width for function name column
lineno_width: int = 5 # Width for line number column
# Process/Thread information (useful for multi-threaded apps)
use_process_info: bool = False # Show process ID (PID)
use_thread_info: bool = False # Show thread name
# NOTE: Country code/emoji relationship:
# - If both show_country_emoji and show_country_code are False: no country indicator
# - If show_country_emoji is True: emoji takes priority (e.g., 🇮🇳)
# - If only show_country_code is True: text code is shown (e.g., "IN")
# - Country indicators only appear when timezone is set
@classmethod
def from_dict(cls, name: str, config_dict: dict[str, Any]) -> "LoggerConfig":
"""Create config from a dictionary (useful for apply_logger_config)."""
# Step 1: Helper function to convert log level strings to int
def parse_level(value: Any) -> int:
"""Convert 'INFO' string or int to logging.INFO constant."""
if isinstance(value, str):
return getattr(logging, value.upper(), logging.INFO)
return value if isinstance(value, int) else logging.INFO
# Step 2: Create a copy to avoid modifying the original
parsed = config_dict.copy()
# Step 3: Parse log levels if present (convert strings like "INFO" to int)
if "console_level" in parsed:
parsed["console_level"] = parse_level(parsed["console_level"])
if "file_level" in parsed:
parsed["file_level"] = parse_level(parsed["file_level"])
# Step 4: Filter to only valid fields (ignore unknown keys)
valid_fields = {k: v for k, v in parsed.items() if k in cls.__annotations__}
# Step 5: Create config with defaults for any missing fields
return cls(name=name, **valid_fields)
# ===========================================================================
# SECTION 2: FILTERS
# ===========================================================================
# Filters run before formatters and can add/modify record attributes.
class ContextFilter(logging.Filter):
"""Ensures every log record has a 'context' attribute.
WHY: The old logger's 'context' feature (like "INIT", "READ", "USB")
is very useful for scanning logs. But if a third-party library logs,
it won't have this attribute and formatting will fail.
HOW: This filter runs on every record and adds a safe default context
(derived from the logger name) if one isn't already present.
"""
def __init__(self, context_width: int) -> None:
super().__init__()
self.context_width = context_width
def filter(self, record: logging.LogRecord) -> bool: # type: ignore[override]
"""Add context attribute if missing."""
# Step 1: Check if the record already has a 'context' attribute
if not hasattr(record, "context"):
# Step 2: Derive a default context from the logger name
logger_name = record.name # e.g., "urllib3.connectionpool"
default_context = logger_name.split(".")[0] # e.g., "urllib3"
# Step 3: Truncate or pad the context to fit the configured width
if len(default_context) > self.context_width:
default_context = default_context[: self.context_width]
else:
default_context = default_context.ljust(self.context_width)
# Step 4: Assign the derived context to the record
record.context = default_context
return True # Always return True to allow the record to be logged
# ===========================================================================
# SECTION 3: FORMATTERS
# ===========================================================================
# Formatters take a LogRecord and turn it into a string.
class StructuredFormatter(logging.Formatter):
"""Builds the log line with optional timezone conversion.
WHY: The old TimezoneAwareFormatter + DefensiveFormatter did two jobs.
This combines them into one cleaner implementation.
EXAMPLE OUTPUT:
2025-12-10 14:30:45.123456 | INFO | SENSOR | Temperature reading: 25.3°C
2025-12-10 14:30:45.123456 🇮🇳 | INFO | SENSOR | Temperature reading: 25.3°C (with emoji)
2025-12-10 14:30:45.123456 IN | INFO | SENSOR | Temperature reading: 25.3°C (with code)
"""
def __init__(
self,
fmt: str,
datefmt: str | None,
timezone: str | None,
show_country_code: bool = False,
show_country_emoji: bool = False,
) -> None:
super().__init__(fmt=fmt, datefmt=datefmt)
# Parse timezone if available
self._tz = ZoneInfo(timezone) if (timezone and ZoneInfo) else None
self.timezone_name = timezone
self.show_country_code = show_country_code
self.show_country_emoji = show_country_emoji
# Determine country display suffix (emoji or code)
self.country_display = ""
if timezone and (show_country_code or show_country_emoji):
country_info = TIMEZONE_COUNTRY_MAP.get(timezone, {"emoji": "🌐", "code": "??"})
if show_country_emoji:
self.country_display = country_info["emoji"]
elif show_country_code:
self.country_display = country_info["code"]
def formatTime(self, record: logging.LogRecord, datefmt: str | None = None) -> str: # type: ignore[override]
"""Convert timestamp to target timezone and optionally add country indicator."""
# Step 1: Convert timestamp to datetime object with timezone
if self._tz:
dt = datetime.fromtimestamp(record.created, tz=self._tz)
else:
dt = datetime.fromtimestamp(record.created) # Local time
# Step 2: Format the datetime using the date format string
fmt = datefmt or self.datefmt or self.default_time_format
time_str = dt.strftime(fmt)
# Step 3: Add microseconds if not in format string (for precision)
if "%f" not in fmt:
time_str = f"{time_str}.{dt.microsecond:06d}"
# Step 4: Append country indicator (emoji or code) if configured
if self.country_display:
return f"{time_str} {self.country_display}"
return time_str
def format(self, record: logging.LogRecord) -> str: # type: ignore[override]
"""Ensure context exists before formatting (defensive)."""
# Defensive: Add default context if missing
# (ContextFilter should have done this, but be extra safe)
if not hasattr(record, "context"):
record.context = record.name.split(".")[0][:10]
# Call parent's format method to build the log line
return super().format(record)
class ColorFormatter(logging.Formatter):
"""Wraps another formatter and adds ANSI color codes to level names.
WHY: Colors make logs much easier to scan visually, but we only want
them when outputting to a terminal (not log files or CI systems).
HOW: Detects TTY, honors NO_COLOR/FORCE_COLOR env vars, wraps level name.
"""
# ANSI color codes for each log level
_COLORS = {
logging.DEBUG: "\x1b[0;37m", # Gray
logging.INFO: "\x1b[0;32m", # Green
logging.WARNING: "\x1b[1;33m", # Yellow
logging.ERROR: "\x1b[1;31m", # Red
logging.CRITICAL: "\x1b[1;37;41m", # White on red background
}
_RESET = "\x1b[0m"
def __init__(self, base_formatter: logging.Formatter, force: bool | None = None) -> None:
super().__init__()
self.base = base_formatter
self.force = force
self._enabled = self._should_enable()
def format(self, record: logging.LogRecord) -> str: # type: ignore[override]
"""Format with base formatter, then colorize level name."""
# Step 1: Get the formatted message from base formatter
message = self.base.format(record)
# Step 2: If colors are disabled, return as-is
if not self._enabled:
return message
# Step 3: Find the color for this log level
color = self._COLORS.get(record.levelno)
if not color:
return message # No color defined for this level
# Step 4: Wrap the level name with ANSI color codes
# Find and replace the level name (e.g., "INFO", "ERROR") in the message
colored_level = f"{color}{record.levelname}{self._RESET}"
return message.replace(record.levelname, colored_level, 1)
def _should_enable(self) -> bool:
"""Detect if we should use colors (TTY detection)."""
# Step 1: Check force override first (explicit user choice)
if self.force is not None:
return self.force
# Step 2: Respect NO_COLOR environment variable (universal standard)
if os.environ.get("NO_COLOR"):
return False
# Step 3: Respect FORCE_COLOR environment variable
if os.environ.get("FORCE_COLOR"):
return True
# Step 4: Only enable colors if output is a terminal (TTY)
try:
return sys.stdout.isatty()
except AttributeError:
# Some environments don't have isatty() method
return False
# ===========================================================================
# SECTION 4: SAFE ROTATING FILE HANDLER
# ===========================================================================
# Custom handler that handles missing backup files during rotation.
class SafeRotatingFileHandler(logging.handlers.RotatingFileHandler):
"""RotatingFileHandler that gracefully handles missing backup files.
WHY: The standard RotatingFileHandler can crash with FileNotFoundError
when backup files are manually deleted or missing due to race conditions.
HOW: Override doRollover to catch and ignore FileNotFoundError when
attempting to delete old backup files.
"""
def doRollover(self) -> None:
"""Perform log rotation with safe file deletion."""
if self.stream:
self.stream.close()
self.stream = None # type: ignore[assignment]
# Rotate existing backup files
for i in range(self.backupCount - 1, 0, -1):
sfn = self.rotation_filename(f"{self.baseFilename}.{i}")
dfn = self.rotation_filename(f"{self.baseFilename}.{i + 1}")
if os.path.exists(sfn):
if os.path.exists(dfn):
try:
os.remove(dfn)
except FileNotFoundError:
# File was already deleted, ignore
pass
except OSError as e:
# Log other OS errors but continue rotation
print(f"Warning: Could not remove {dfn}: {e}", file=sys.stderr)
try:
os.rename(sfn, dfn)
except OSError as e:
print(f"Warning: Could not rename {sfn} to {dfn}: {e}", file=sys.stderr)
# Remove oldest backup if it exists
dfn = self.rotation_filename(f"{self.baseFilename}.{self.backupCount}")
if os.path.exists(dfn):
try:
os.remove(dfn)
except FileNotFoundError:
# File was already deleted, ignore
pass
except OSError as e:
print(f"Warning: Could not remove {dfn}: {e}", file=sys.stderr)
# Rotate current log file
if os.path.exists(self.baseFilename):
dfn = self.rotation_filename(f"{self.baseFilename}.1")
try:
os.rename(self.baseFilename, dfn)
except OSError as e:
print(f"Warning: Could not rename {self.baseFilename} to {dfn}: {e}", file=sys.stderr)
# Open new log file
if not self.delay:
self.stream = self._open()
# ===========================================================================
# SECTION 5: HANDLER BUILDERS
# ===========================================================================
# These functions create and configure handlers for console and file output.
def _build_console_handler(config: LoggerConfig, formatter: logging.Formatter) -> logging.Handler:
"""Create console (stdout) handler with optional colors."""
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(config.console_level)
# Wrap with color formatter if enabled; otherwise use the base formatter.
handler.setFormatter(ColorFormatter(formatter) if config.color else formatter)
return handler
def _build_file_handler(config: LoggerConfig, formatter: logging.Formatter) -> logging.Handler:
"""Create safe rotating file handler."""
os.makedirs(config.base_log_dir, exist_ok=True)
log_path = os.path.join(config.base_log_dir, config.filename)
handler = SafeRotatingFileHandler(
log_path,
maxBytes=config.max_bytes,
backupCount=config.backup_count,
encoding="utf-8",
)
handler.setLevel(config.file_level)
handler.setFormatter(formatter)
return handler
def _build_formatter(config: LoggerConfig) -> logging.Formatter:
"""Build the main formatter from config."""
fmt_parts: list[str] = []
# Timestamp first (optional)
if config.use_timestamp:
fmt_parts.append("%(asctime)s")
# Level and context columns
fmt_parts.append(f"%(levelname)-{config.level_width}s")
fmt_parts.append(f"%(context)-{config.context_width}s")
# Optional location info
if config.show_logger_name:
fmt_parts.append(f"%(name)-{config.logger_name_width}s")
if config.show_function:
fmt_parts.append(f"%(funcName)-{config.func_name_width}s")
if config.show_line:
fmt_parts.append(f"%(lineno)-{config.lineno_width}d")
# Optional process/thread info
if config.use_process_info:
fmt_parts.append("%(process)d")
if config.use_thread_info:
fmt_parts.append("%(threadName)s")
# Message last
fmt_parts.append("%(message)s")
fmt_str = " | ".join(fmt_parts)
# Choose date format based on 12/24-hour preference
datefmt = config.datefmt_12h if config.use_12_hour else config.datefmt_24h
return StructuredFormatter(
fmt=fmt_str,
datefmt=datefmt if config.use_timestamp else None,
timezone=config.timezone,
show_country_code=config.show_country_code,
show_country_emoji=config.show_country_emoji,
)
# ===========================================================================
# SECTION 5: PUBLIC API
# ===========================================================================
# This is what users interact with - same as old logger.
[docs]
class OizomLogger:
"""Main logger wrapper - backward compatible with old oizom_logger.
USAGE:
logger = OizomLogger(__name__)
logger.info_with_context("INIT", "Starting sensor setup")
# Or get underlying logger for standard logging:
log = logger.get()
log.info("Standard log message")
"""
[docs]
def __init__(self, logger_name: str, **kwargs: Any) -> None:
"""Initialize logger with optional config overrides.
WHY: Old logger took 30+ kwargs. We still accept them for compatibility,
but internally convert to LoggerConfig for cleaner handling.
"""
# Build immutable config from kwargs (backward-compatible with old API)
self.config = LoggerConfig.from_dict(logger_name, kwargs)
self.logger = logging.getLogger(self.config.name)
self._configure()
def _configure(self) -> None:
"""Set up handlers, filters, and formatters."""
# Set logger level to the more verbose of console/file (lower numeric = more verbose)
self.logger.setLevel(min(self.config.console_level, self.config.file_level))
# Clear existing handlers to avoid duplicates on reconfigure
self.logger.handlers.clear()
# Build formatter and handlers
formatter = _build_formatter(self.config)
console_handler = _build_console_handler(self.config, formatter)
file_handler = _build_file_handler(self.config, formatter)
# Ensure every record has context
self.logger.filters.clear()
self.logger.addFilter(ContextFilter(self.config.context_width))
# Attach handlers
self.logger.addHandler(console_handler)
self.logger.addHandler(file_handler)
# Avoid double-logging to root unless explicitly requested
self.logger.propagate = False
[docs]
def install_globally(self, replace_existing: bool = True) -> None:
"""Attach this logger's handlers to root logger (for 3rd party libs)."""
root = logging.getLogger()
if replace_existing:
root.handlers.clear()
root.filters.clear()
# Mirror level and filter
root.setLevel(self.logger.level)
root.addFilter(ContextFilter(self.config.context_width))
# Create new handlers with the same configuration for the root logger
formatter = _build_formatter(self.config)
console_handler = _build_console_handler(self.config, formatter)
file_handler = _build_file_handler(self.config, formatter)
root.addHandler(console_handler)
root.addHandler(file_handler)
# Convenience methods for context-based logging
[docs]
def log_with_context(self, level: str, context: str, message: str, *args: Any, **kwargs: Any) -> None:
"""Core method for logging with context token."""
# Normalize level
level_no = getattr(logging, level.upper(), logging.INFO)
# Enforce width to keep columns aligned
ctx = context[: self.config.context_width]
ctx = ctx.ljust(self.config.context_width)
self.logger.log(level_no, message, *args, extra={"context": ctx}, stacklevel=3, **kwargs)
[docs]
def debug_with_context(self, context: str, message: str, *args: Any, **kwargs: Any) -> None:
self.log_with_context("DEBUG", context, message, *args, **kwargs)
[docs]
def info_with_context(self, context: str, message: str, *args: Any, **kwargs: Any) -> None:
self.log_with_context("INFO", context, message, *args, **kwargs)
[docs]
def warning_with_context(self, context: str, message: str, *args: Any, **kwargs: Any) -> None:
self.log_with_context("WARNING", context, message, *args, **kwargs)
[docs]
def error_with_context(self, context: str, message: str, *args: Any, **kwargs: Any) -> None:
self.log_with_context("ERROR", context, message, *args, **kwargs)
[docs]
def critical_with_context(self, context: str, message: str, *args: Any, **kwargs: Any) -> None:
self.log_with_context("CRITICAL", context, message, *args, **kwargs)
[docs]
def get(self) -> logging.Logger:
"""Return underlying logger for standard logging calls."""
return self.logger
[docs]
def apply_logger_config(config_dict: dict[str, Any]) -> None:
"""Reconfigure all loggers at runtime (useful for config file changes)."""
# Build config from the flat dict (empty name since it applies to all)
config = LoggerConfig.from_dict("", config_dict)
formatter = _build_formatter(config)
# Get all existing logger names
logger_names = list(logging.Logger.manager.loggerDict.keys())
# Update each existing logger
for logger_name in logger_names:
logger = logging.getLogger(logger_name)
# Only update loggers that have handlers (have been configured)
if logger.handlers:
logger.setLevel(min(config.console_level, config.file_level))
# Rebuild handlers with new formatter
logger.handlers.clear()
logger.filters.clear()
logger.addFilter(ContextFilter(config.context_width))
logger.addHandler(_build_console_handler(config, formatter))
logger.addHandler(_build_file_handler(config, formatter))
logger.propagate = False
# ===========================================================================
# MODULE EXPORTS
# ===========================================================================
__all__ = ["OizomLogger", "apply_logger_config"]