S1005403_RisCC/target_simulator/utils/logger.py

451 lines
16 KiB
Python

"""Centralized logging helpers used by the GUI and core.
This module implements a queue-based logging system that safely forwards
LogRecord objects from background threads into GUI-updated handlers. It
provides a small Tkinter-friendly handler, queue integration helpers and
convenience utilities for applying saved logger levels.
Public API highlights:
- :func:`setup_basic_logging` — configure the global queue-based system.
- :func:`add_tkinter_handler` — attach a Tkinter-based log view.
- :func:`get_logger` — convenience wrapper around :mod:`logging`.
"""
import logging
import logging.handlers # For RotatingFileHandler
import tkinter as tk
from tkinter.scrolledtext import ScrolledText
from queue import Queue, Empty as QueueEmpty
from typing import Optional, Dict, Any
from contextlib import contextmanager
from logging import Logger
from target_simulator.utils.config_manager import ConfigManager
import os
import json
# Module-level logger for utils.logging helpers
logger = logging.getLogger(__name__)
# --- Module-level globals for the centralized logging queue system ---
_global_log_queue: Optional[Queue[logging.LogRecord]] = None
_actual_console_handler: Optional[logging.StreamHandler] = None
_actual_file_handler: Optional[logging.handlers.RotatingFileHandler] = None
_actual_tkinter_handler: Optional["TkinterTextHandler"] = None
_log_processor_after_id: Optional[str] = None
_logging_system_active: bool = False
_tk_root_instance_for_processing: Optional[tk.Tk] = None
_base_formatter: Optional[logging.Formatter] = None
# Ottimizzazioni: polling adattivo e batching
GLOBAL_LOG_QUEUE_POLL_INTERVAL_MS = 200 # Ridotto a 200ms (5Hz invece di 10Hz)
LOG_BATCH_SIZE = 50 # Processa max 50 log per ciclo per evitare blocchi GUI
_last_log_time = 0.0 # Per polling adattivo
class TkinterTextHandler(logging.Handler):
"""
A logging handler that directs log messages to a Tkinter Text widget.
This handler is called directly from the GUI thread's processing loop.
Optimizations:
- Batches multiple log entries to reduce Tkinter widget operations
- Limits total widget size to prevent memory bloat
- Only scrolls to end if user hasn't scrolled up manually
"""
def __init__(
self, text_widget: tk.Text, level_colors: Dict[int, str], max_lines: int = 1000
):
super().__init__()
self.text_widget = text_widget
self.level_colors = level_colors
self.max_lines = max_lines
self._pending_records = [] # Buffer per batching
self._last_yview = None # Track user scroll position
self._configure_tags()
def _configure_tags(self):
for level, color_value in self.level_colors.items():
level_name = logging.getLevelName(level)
if color_value:
try:
self.text_widget.tag_config(level_name, foreground=color_value)
except tk.TclError:
pass # Widget might not be ready
def emit(self, record: logging.LogRecord):
"""Buffer the record for batch processing."""
try:
if not self.text_widget.winfo_exists():
return
self._pending_records.append(record)
except Exception as e:
print(f"Error in TkinterTextHandler.emit: {e}", flush=True)
def flush_pending(self):
"""Flush all pending log records to the widget in a single operation."""
if not self._pending_records:
return
try:
if not self.text_widget.winfo_exists():
self._pending_records.clear()
return
# Check if user has scrolled away from bottom
yview = self.text_widget.yview()
user_at_bottom = yview[1] >= 0.98 # Within 2% of bottom
# Single state change for all inserts
self.text_widget.configure(state=tk.NORMAL)
# Batch insert all pending records
for record in self._pending_records:
msg = self.format(record)
level_name = record.levelname
self.text_widget.insert(tk.END, msg + "\n", (level_name,))
# Trim old lines if exceeded max
line_count = int(self.text_widget.index("end-1c").split(".")[0])
if line_count > self.max_lines:
excess = line_count - self.max_lines
self.text_widget.delete("1.0", f"{excess}.0")
self.text_widget.configure(state=tk.DISABLED)
# Only auto-scroll if user was at bottom
if user_at_bottom:
self.text_widget.see(tk.END)
self._pending_records.clear()
except Exception as e:
print(f"Error in TkinterTextHandler.flush_pending: {e}", flush=True)
self._pending_records.clear()
class QueuePuttingHandler(logging.Handler):
"""
A simple handler that puts any received LogRecord into a global queue.
"""
def __init__(self, handler_queue: Queue[logging.LogRecord]):
super().__init__()
self.handler_queue = handler_queue
def emit(self, record: logging.LogRecord):
self.handler_queue.put_nowait(record)
def _process_global_log_queue():
"""
GUI Thread: Periodically processes LogRecords from the _global_log_queue
and dispatches them to the actual configured handlers.
Optimizations:
- Processes logs in batches (max LOG_BATCH_SIZE per cycle)
- Adaptive polling: faster when logs are active, slower when idle
- Single flush operation for Tkinter handler (batched writes)
"""
global _logging_system_active, _log_processor_after_id, _last_log_time
import time
if (
not _logging_system_active
or not _tk_root_instance_for_processing
or not _tk_root_instance_for_processing.winfo_exists()
):
return
processed_count = 0
try:
# Process up to LOG_BATCH_SIZE records per cycle to avoid GUI freezes
while (
_global_log_queue
and not _global_log_queue.empty()
and processed_count < LOG_BATCH_SIZE
):
record = _global_log_queue.get_nowait()
# Console and file handlers write immediately (fast, non-blocking)
if _actual_console_handler:
_actual_console_handler.handle(record)
if _actual_file_handler:
_actual_file_handler.handle(record)
# Tkinter handler buffers the record (no widget operations yet)
if _actual_tkinter_handler:
_actual_tkinter_handler.handle(record)
_global_log_queue.task_done()
processed_count += 1
_last_log_time = time.time()
except QueueEmpty:
pass
except Exception as e:
print(f"Error in log processing queue: {e}", flush=True)
# Flush all pending Tkinter records in a single batch operation
try:
if _actual_tkinter_handler and hasattr(
_actual_tkinter_handler, "flush_pending"
):
_actual_tkinter_handler.flush_pending()
except Exception as e:
print(f"Error flushing Tkinter logs: {e}", flush=True)
# Adaptive polling: faster interval if logs are recent, slower when idle
try:
time_since_last_log = time.time() - _last_log_time
if time_since_last_log < 2.0 or processed_count >= LOG_BATCH_SIZE:
# Recent activity or queue backlog: poll faster
next_interval = GLOBAL_LOG_QUEUE_POLL_INTERVAL_MS
elif time_since_last_log < 10.0:
# Moderate activity: normal polling
next_interval = GLOBAL_LOG_QUEUE_POLL_INTERVAL_MS * 2
else:
# Idle: slow polling to reduce CPU
next_interval = GLOBAL_LOG_QUEUE_POLL_INTERVAL_MS * 5
except Exception:
next_interval = GLOBAL_LOG_QUEUE_POLL_INTERVAL_MS
# Schedule next processing cycle
if _logging_system_active:
_log_processor_after_id = _tk_root_instance_for_processing.after(
int(next_interval), _process_global_log_queue
)
def setup_basic_logging(
root_tk_instance_for_processor: tk.Tk,
logging_config_dict: Optional[Dict[str, Any]] = None,
):
"""Configure the global, queue-based logging system.
This sets up a small logging queue and a background processor that is
polled from the provided Tk root. The function also attaches a console
handler immediately so logs are visible before the GUI polling loop
begins.
Args:
root_tk_instance_for_processor: Tk root used to schedule the queue
processing callback via :meth:`tkinter.Tk.after`.
logging_config_dict: Optional mapping controlling format, levels and
enabled handlers.
"""
global _global_log_queue, _actual_console_handler, _actual_file_handler, _logging_system_active
global _tk_root_instance_for_processing, _log_processor_after_id, _base_formatter
if _logging_system_active:
return
if logging_config_dict is None:
logging_config_dict = {}
log_format_str = logging_config_dict.get(
"format", "% (asctime)s [%(levelname)-8s] %(name)-25s : %(message)s"
)
log_date_format_str = logging_config_dict.get("date_format", "%Y-%m-%d %H:%M:%S")
_base_formatter = logging.Formatter(log_format_str, datefmt=log_date_format_str)
_global_log_queue = Queue()
_tk_root_instance_for_processing = root_tk_instance_for_processor
root_logger = logging.getLogger()
for handler in root_logger.handlers[:]:
root_logger.removeHandler(handler)
root_logger.setLevel(logging_config_dict.get("default_root_level", logging.INFO))
if logging_config_dict.get("enable_console", True):
_actual_console_handler = logging.StreamHandler()
_actual_console_handler.setFormatter(_base_formatter)
_actual_console_handler.setLevel(logging.DEBUG)
# DO NOT attach console handler directly to root logger - it will be
# processed through the queue system to avoid duplicate output
queue_putter = QueuePuttingHandler(handler_queue=_global_log_queue)
queue_putter.setLevel(logging.DEBUG)
root_logger.addHandler(queue_putter)
# Emit a small startup message so users running from console see logging is active
try:
root_logger.debug("Logging system initialized (queue-based).")
except Exception:
pass
_logging_system_active = True
_log_processor_after_id = _tk_root_instance_for_processing.after(
GLOBAL_LOG_QUEUE_POLL_INTERVAL_MS, _process_global_log_queue
)
def add_tkinter_handler(gui_log_widget: tk.Text, logging_config_dict: Dict[str, Any]):
global _actual_tkinter_handler, _base_formatter
if not _logging_system_active or not _base_formatter:
return
if _actual_tkinter_handler:
_actual_tkinter_handler.close()
if (
isinstance(gui_log_widget, (tk.Text, ScrolledText))
and gui_log_widget.winfo_exists()
):
level_colors = logging_config_dict.get("colors", {})
_actual_tkinter_handler = TkinterTextHandler(
text_widget=gui_log_widget, level_colors=level_colors
)
_actual_tkinter_handler.setFormatter(_base_formatter)
_actual_tkinter_handler.setLevel(logging.DEBUG)
logger.info("Tkinter log handler added successfully.")
else:
print(
"ERROR: GUI log widget invalid, cannot add TkinterTextHandler.", flush=True
)
def get_logger(name: str) -> logging.Logger:
"""Return a :class:`logging.Logger` instance for the given name.
This is a thin wrapper over :func:`logging.getLogger` kept for callers in
the project for clarity and possible future extension.
"""
return logging.getLogger(name)
def get_logger(name: str) -> logging.Logger:
return logging.getLogger(name)
@contextmanager
def temporary_log_level(logger: Logger, level: int):
"""Context manager to temporarily set a logger's level.
Usage:
with temporary_log_level(logging.getLogger('some.name'), logging.DEBUG):
# inside this block the logger will be DEBUG
...
"""
old_level = logger.level
logger.setLevel(level)
try:
yield
finally:
logger.setLevel(old_level)
def shutdown_logging_system():
global _logging_system_active, _log_processor_after_id
if not _logging_system_active:
return
_logging_system_active = False
if (
_log_processor_after_id
and _tk_root_instance_for_processing
and _tk_root_instance_for_processing.winfo_exists()
):
_tk_root_instance_for_processing.after_cancel(_log_processor_after_id)
# Final flush of the queue
_process_global_log_queue()
logging.shutdown()
def apply_saved_logger_levels():
"""Apply saved logger levels read from configuration at startup.
Loads preferences from a `logger_prefs.json` next to the settings file or
falls back to ``general.logger_panel.saved_levels`` inside settings.
Each configured logger name will be set to the configured level if valid.
"""
try:
cfg = ConfigManager()
except Exception:
return
try:
# Prefer a dedicated logger_prefs.json next to the main settings file
prefs_path = None
cfg_path = getattr(cfg, "filepath", None)
if cfg_path:
prefs_path = os.path.join(os.path.dirname(cfg_path), "logger_prefs.json")
saved = {}
if prefs_path and os.path.exists(prefs_path):
try:
with open(prefs_path, "r", encoding="utf-8") as f:
jp = json.load(f)
if isinstance(jp, dict):
saved = jp.get("saved_levels", {}) or {}
except Exception:
saved = {}
else:
# Fallback to settings.json general.logger_panel
try:
gen = cfg.get_general_settings()
lp = gen.get("logger_panel", {}) if isinstance(gen, dict) else {}
saved = lp.get("saved_levels", {}) if isinstance(lp, dict) else {}
except Exception:
saved = {}
for name, lvl_name in (saved or {}).items():
try:
lvl_val = logging.getLevelName(lvl_name)
if isinstance(lvl_val, int):
logging.getLogger(name).setLevel(lvl_val)
except Exception:
pass
except Exception:
pass
def apply_saved_logger_levels():
"""Apply saved logger levels from ConfigManager at startup.
Reads `general.logger_panel.saved_levels` from settings.json and sets
each configured logger to the saved level name.
"""
try:
cfg = ConfigManager()
except Exception:
return
try:
# Prefer a dedicated logger_prefs.json next to the main settings file
prefs_path = None
cfg_path = getattr(cfg, "filepath", None)
if cfg_path:
prefs_path = os.path.join(os.path.dirname(cfg_path), "logger_prefs.json")
saved = {}
if prefs_path and os.path.exists(prefs_path):
try:
with open(prefs_path, "r", encoding="utf-8") as f:
jp = json.load(f)
if isinstance(jp, dict):
saved = jp.get("saved_levels", {}) or {}
except Exception:
saved = {}
else:
# Fallback to settings.json general.logger_panel
try:
gen = cfg.get_general_settings()
lp = gen.get("logger_panel", {}) if isinstance(gen, dict) else {}
saved = lp.get("saved_levels", {}) if isinstance(lp, dict) else {}
except Exception:
saved = {}
for name, lvl_name in (saved or {}).items():
try:
lvl_val = logging.getLevelName(lvl_name)
if isinstance(lvl_val, int):
logging.getLogger(name).setLevel(lvl_val)
except Exception:
pass
except Exception:
pass