aggiunta funzioe di debug per il protocollo sfp funzionante

This commit is contained in:
VALLONGOL 2025-10-16 11:14:38 +02:00
parent 075a4fda1c
commit 3bee128952
3 changed files with 474 additions and 64 deletions

View File

@ -1,4 +1,4 @@
# core/sfp_transport.py # core/transport/sfp_transport.py
""" """
Provides a reusable transport layer for the Simple Fragmentation Protocol (SFP). Provides a reusable transport layer for the Simple Fragmentation Protocol (SFP).
@ -14,17 +14,36 @@ import threading
import time import time
from typing import Dict, Callable, Optional from typing import Dict, Callable, Optional
from controlpanel import config # Rimosso l'import da config, ora la classe è indipendente
from controlpanel.utils.network import create_udp_socket, close_udp_socket from target_simulator.utils.network import create_udp_socket, close_udp_socket
from controlpanel.core.sfp_structures import SFPHeader from target_simulator.core.sfp_structures import SFPHeader
# Define a type hint for payload handlers # Define a type hint for payload handlers
PayloadHandler = Callable[[bytearray], None] PayloadHandler = Callable[[bytearray], None]
# Module-level logging control -------------------------------------------------
# You can override the logging level for this module by setting LOG_LEVEL at
# the top of this file (e.g. LOG_LEVEL = logging.DEBUG) or by configuring the
# returned `logger` object from outside the module:
#
# import logging
# import target_simulator.core.sfp_transport as st
# st.LOG_LEVEL = logging.DEBUG
# st.logger.setLevel(st.LOG_LEVEL)
#
# If left as None the logger will inherit configuration from the root logger.
LOG_LEVEL: Optional[int] = logging.INFO
# Create a module logger. Use module name for clear hierarchical logging.
logger = logging.getLogger(__name__)
if LOG_LEVEL is not None:
logger.setLevel(LOG_LEVEL)
class SfpTransport: class SfpTransport:
"""Manages SFP communication and payload reassembly.""" """Manages SFP communication and payload reassembly."""
def __init__(self, host: str, port: int, payload_handlers: Dict[int, PayloadHandler]): # --- INIZIO MODIFICHE ---
def __init__(self, host: str, port: int, payload_handlers: Dict[int, PayloadHandler], ack_config: Optional[Dict[int, int]] = None):
""" """
Initializes the SFP Transport layer. Initializes the SFP Transport layer.
@ -32,42 +51,43 @@ class SfpTransport:
host (str): The local IP address to bind the UDP socket to. host (str): The local IP address to bind the UDP socket to.
port (int): The local port to listen on. port (int): The local port to listen on.
payload_handlers (Dict[int, PayloadHandler]): A dictionary mapping payload_handlers (Dict[int, PayloadHandler]): A dictionary mapping
SFP_FLOW IDs (as integers) to their corresponding handler functions. SFP_FLOW IDs to their corresponding handler functions.
Each handler will be called with the complete payload (bytearray). ack_config (Optional[Dict[int, int]]): A dictionary mapping SFP_FLOW IDs
to the SFP_WIN value to use in the ACK packet. If None, defaults will be used.
""" """
# --- FINE MODIFICHE ---
self._log_prefix = "[SfpTransport]" self._log_prefix = "[SfpTransport]"
logging.info(f"{self._log_prefix} Initializing for {host}:{port}...") logger.info(f"{self._log_prefix} Initializing for {host}:{port}...")
self._host = host self._host = host
self._port = port self._port = port
self._payload_handlers = payload_handlers self._payload_handlers = payload_handlers
# --- INIZIO MODIFICHE ---
self._ack_config = ack_config if ack_config is not None else {}
# --- FINE MODIFICHE ---
self._socket: Optional[socket.socket] = None self._socket: Optional[socket.socket] = None
self._receiver_thread: Optional[threading.Thread] = None self._receiver_thread: Optional[threading.Thread] = None
self._stop_event = threading.Event() self._stop_event = threading.Event()
# Reassembly state dictionaries, managed by this transport layer
self._fragments: Dict[tuple, Dict[int, int]] = {} self._fragments: Dict[tuple, Dict[int, int]] = {}
self._buffers: Dict[tuple, bytearray] = {} self._buffers: Dict[tuple, bytearray] = {}
logging.debug( logger.debug(
f"{self._log_prefix} Registered handlers for flows: " f"{self._log_prefix} Registered handlers for flows: "
f"{[chr(k) if 32 <= k <= 126 else k for k in self._payload_handlers.keys()]}" f"{[chr(k) if 32 <= k <= 126 else k for k in self._payload_handlers.keys()]}"
) )
logger.debug(f"{self._log_prefix} ACK window config: {self._ack_config}")
def start(self) -> bool: def start(self) -> bool:
""" """Starts the transport layer... (nessuna modifica qui)"""
Starts the transport layer by creating the socket and launching the receiver thread.
Returns:
bool: True if started successfully, False otherwise.
"""
if self._receiver_thread is not None and self._receiver_thread.is_alive(): if self._receiver_thread is not None and self._receiver_thread.is_alive():
logging.warning(f"{self._log_prefix} Start called, but receiver is already running.") logger.warning(f"{self._log_prefix} Start called, but receiver is already running.")
return True return True
self._socket = create_udp_socket(self._host, self._port) self._socket = create_udp_socket(self._host, self._port)
if not self._socket: if not self._socket:
logging.critical(f"{self._log_prefix} Failed to create and bind socket. Cannot start.") logger.critical(f"{self._log_prefix} Failed to create and bind socket. Cannot start.")
return False return False
self._stop_event.clear() self._stop_event.clear()
@ -75,35 +95,32 @@ class SfpTransport:
target=self._receive_loop, name="SfpTransportThread", daemon=True target=self._receive_loop, name="SfpTransportThread", daemon=True
) )
self._receiver_thread.start() self._receiver_thread.start()
logging.info(f"{self._log_prefix} Receiver thread started.")
return True return True
def shutdown(self): def shutdown(self):
"""Stops the receiver thread and closes the socket.""" """Stops the receiver thread... (nessuna modifica qui)"""
logging.info(f"{self._log_prefix} Shutdown initiated.")
self._stop_event.set() self._stop_event.set()
# The socket is closed here to interrupt the blocking recvfrom call
if self._socket: if self._socket:
close_udp_socket(self._socket) close_udp_socket(self._socket)
self._socket = None self._socket = None
if self._receiver_thread and self._receiver_thread.is_alive(): if self._receiver_thread and self._receiver_thread.is_alive():
logging.debug(f"{self._log_prefix} Waiting for receiver thread to join...") logger.debug(f"{self._log_prefix} Waiting for receiver thread to join...")
self._receiver_thread.join(timeout=2.0) self._receiver_thread.join(timeout=2.0)
if self._receiver_thread.is_alive(): if self._receiver_thread.is_alive():
logging.warning(f"{self._log_prefix} Receiver thread did not join cleanly.") logger.warning(f"{self._log_prefix} Receiver thread did not join cleanly.")
logging.info(f"{self._log_prefix} Shutdown complete.") logger.info(f"{self._log_prefix} Shutdown complete.")
def _receive_loop(self): def _receive_loop(self):
"""The main loop that listens for UDP packets and processes them.""" """The main loop... (nessuna modifica qui)"""
log_prefix = f"{self._log_prefix} Loop" log_prefix = f"{self._log_prefix} Loop"
logging.info(f"{log_prefix} Starting receive loop.") logger.info(f"{log_prefix} Starting receive loop.")
while not self._stop_event.is_set(): while not self._stop_event.is_set():
if not self._socket: if not self._socket:
logging.error(f"{log_prefix} Socket is not available. Stopping loop.") logger.error(f"{log_prefix} Socket is not available. Stopping loop.")
break break
try: try:
@ -113,30 +130,29 @@ class SfpTransport:
except socket.timeout: except socket.timeout:
continue continue
except OSError: except OSError:
# This is expected when the socket is closed during shutdown
if not self._stop_event.is_set(): if not self._stop_event.is_set():
logging.error(f"{log_prefix} Socket error.", exc_info=True) logger.error(f"{log_prefix} Socket error.", exc_info=True)
break break
except Exception: except Exception:
logging.exception(f"{log_prefix} Unexpected error in recvfrom.") logger.exception(f"{log_prefix} Unexpected error in recvfrom.")
time.sleep(0.01) time.sleep(0.01)
continue continue
self._process_packet(data, addr) self._process_packet(data, addr)
logging.info(f"{log_prefix} Receive loop terminated.") logger.info(f"{log_prefix} Receive loop terminated.")
def _process_packet(self, data: bytes, addr: tuple): def _process_packet(self, data: bytes, addr: tuple):
"""Parses an SFP packet and handles fragment reassembly.""" """Parses an SFP packet... (nessuna modifica qui)"""
header_size = SFPHeader.size() header_size = SFPHeader.size()
if len(data) < header_size: if len(data) < header_size:
logging.warning(f"Packet from {addr} is too small for SFP header. Ignoring.") logger.warning(f"Packet from {addr} is too small for SFP header. Ignoring.")
return return
try: try:
header = SFPHeader.from_buffer_copy(data) header = SFPHeader.from_buffer_copy(data)
except (ValueError, TypeError): except (ValueError, TypeError):
logging.error(f"Failed to parse SFP header from {addr}. Ignoring.") logger.error(f"Failed to parse SFP header from {addr}. Ignoring.")
return return
flow, tid = header.SFP_FLOW, header.SFP_TID flow, tid = header.SFP_FLOW, header.SFP_TID
@ -145,60 +161,52 @@ class SfpTransport:
total_size = header.SFP_TOTSIZE total_size = header.SFP_TOTSIZE
key = (flow, tid) key = (flow, tid)
# Handle ACK Request
if header.SFP_FLAGS & 0x01: if header.SFP_FLAGS & 0x01:
self._send_ack(addr, data[:header_size]) self._send_ack(addr, data[:header_size])
# Validate packet metadata
if total_frags == 0 or total_frags > 60000 or total_size <= 0: if total_frags == 0 or total_frags > 60000 or total_size <= 0:
logging.warning(f"Invalid metadata for {key}: total_frags={total_frags}, total_size={total_size}. Ignoring.") logger.warning(f"Invalid metadata for {key}: total_frags={total_frags}, total_size={total_size}. Ignoring.")
return return
# Start of a new transaction
if frag == 0: if frag == 0:
self._cleanup_lingering_transactions(flow, tid) self._cleanup_lingering_transactions(flow, tid)
logging.debug(f"New transaction started for key={key}. Total size: {total_size} bytes.") logger.debug(f"New transaction started for key={key}. Total size: {total_size} bytes.")
self._fragments[key] = {} self._fragments[key] = {}
try: try:
self._buffers[key] = bytearray(total_size) self._buffers[key] = bytearray(total_size)
except (MemoryError, ValueError): except (MemoryError, ValueError):
logging.error(f"Failed to allocate {total_size} bytes for key={key}. Ignoring transaction.") logger.error(f"Failed to allocate {total_size} bytes for key={key}. Ignoring transaction.")
self._fragments.pop(key, None) self._fragments.pop(key, None)
return return
# Check if we are tracking this transaction
if key not in self._buffers or key not in self._fragments: if key not in self._buffers or key not in self._fragments:
logging.debug(f"Ignoring fragment {frag} for untracked transaction key={key}.") logger.debug(f"Ignoring fragment {frag} for untracked transaction key={key}.")
return return
# Store fragment info and copy payload
self._fragments[key][frag] = total_frags self._fragments[key][frag] = total_frags
payload = data[header_size:] payload = data[header_size:]
bytes_to_copy = min(pl_size, len(payload)) bytes_to_copy = min(pl_size, len(payload))
if (pl_offset + bytes_to_copy) > len(self._buffers[key]): if (pl_offset + bytes_to_copy) > len(self._buffers[key]):
logging.error(f"Payload for key={key}, frag={frag} would overflow buffer. Ignoring.") logger.error(f"Payload for key={key}, frag={frag} would overflow buffer. Ignoring.")
return return
self._buffers[key][pl_offset : pl_offset + bytes_to_copy] = payload[:bytes_to_copy] self._buffers[key][pl_offset : pl_offset + bytes_to_copy] = payload[:bytes_to_copy]
# Check for completion
if len(self._fragments[key]) == total_frags: if len(self._fragments[key]) == total_frags:
#logging.info(f"Transaction complete for key={key}. Handing off to application layer.") logger.debug(f"Transaction complete for key={key}. Handing off to application layer.")
# Retrieve completed buffer and clean up state for this key
completed_payload = self._buffers.pop(key) completed_payload = self._buffers.pop(key)
self._fragments.pop(key) self._fragments.pop(key)
# Find and call the appropriate handler
handler = self._payload_handlers.get(flow) handler = self._payload_handlers.get(flow)
if handler: if handler:
try: try:
handler(completed_payload) handler(completed_payload)
except Exception: except Exception:
logging.exception(f"Error executing payload handler for flow {flow}.") logger.exception(f"Error executing payload handler for flow {flow}.")
else: else:
logging.warning(f"No payload handler registered for flow ID {flow}.") logger.warning(f"No payload handler registered for flow ID {flow}.")
def _send_ack(self, dest_addr: tuple, original_header_bytes: bytes): def _send_ack(self, dest_addr: tuple, original_header_bytes: bytes):
"""Sends an SFP ACK packet back to the sender.""" """Sends an SFP ACK packet back to the sender."""
@ -209,32 +217,29 @@ class SfpTransport:
ack_header = bytearray(original_header_bytes) ack_header = bytearray(original_header_bytes)
flow = ack_header[SFPHeader.get_field_offset("SFP_FLOW")] flow = ack_header[SFPHeader.get_field_offset("SFP_FLOW")]
# Determine window size based on flow # --- INIZIO MODIFICHE ---
window_size = 0 # Usa la configurazione passata al costruttore. Se non c'è una voce
if flow == ord('M'): # MFD # per questo flow, usa 0 come default sicuro.
window_size = config.ACK_WINDOW_SIZE_MFD window_size = self._ack_config.get(flow, 0)
elif flow == ord('S'): # SAR # --- FINE MODIFICHE ---
window_size = config.ACK_WINDOW_SIZE_SAR
# Modify header for ACK response
ack_header[SFPHeader.get_field_offset("SFP_DIRECTION")] = 0x3C # '<' ack_header[SFPHeader.get_field_offset("SFP_DIRECTION")] = 0x3C # '<'
ack_header[SFPHeader.get_field_offset("SFP_WIN")] = window_size ack_header[SFPHeader.get_field_offset("SFP_WIN")] = window_size
original_flags = ack_header[SFPHeader.get_field_offset("SFP_FLAGS")] original_flags = ack_header[SFPHeader.get_field_offset("SFP_FLAGS")]
ack_header[SFPHeader.get_field_offset("SFP_FLAGS")] = (original_flags | 0x01) & ~0x02 ack_header[SFPHeader.get_field_offset("SFP_FLAGS")] = (original_flags | 0x01) & ~0x02
self._socket.sendto(ack_header, dest_addr) self._socket.sendto(ack_header, dest_addr)
logging.debug(f"{log_prefix} Sent to {dest_addr} for flow {chr(flow) if 32<=flow<=126 else flow}.") logger.debug(f"{log_prefix} Sent to {dest_addr} for flow {chr(flow) if 32<=flow<=126 else flow} with WIN={window_size}.")
except Exception: except Exception:
logging.exception(f"{log_prefix} Failed to send to {dest_addr}.") logger.exception(f"{log_prefix} Failed to send to {dest_addr}.")
def _cleanup_lingering_transactions(self, current_flow: int, current_tid: int): def _cleanup_lingering_transactions(self, current_flow: int, current_tid: int):
"""Removes old, incomplete transactions for the same flow.""" """Removes old, incomplete transactions... (nessuna modifica qui)"""
# This is a simplified cleanup. The original was more complex for stats.
keys_to_remove = [ keys_to_remove = [
key for key in self._fragments key for key in self._fragments
if key[0] == current_flow and key[1] != current_tid if key[0] == current_flow and key[1] != current_tid
] ]
for key in keys_to_remove: for key in keys_to_remove:
logging.warning(f"Cleaning up lingering/incomplete transaction for key={key}.") logger.warning(f"Cleaning up lingering/incomplete transaction for key={key}.")
self._fragments.pop(key, None) self._fragments.pop(key, None)
self._buffers.pop(key, None) self._buffers.pop(key, None)

View File

@ -23,6 +23,7 @@ from target_simulator.core.models import Scenario, Target
from target_simulator.utils.logger import get_logger, shutdown_logging_system from target_simulator.utils.logger import get_logger, shutdown_logging_system
from target_simulator.utils.config_manager import ConfigManager from target_simulator.utils.config_manager import ConfigManager
from target_simulator.gui.sfp_debug_window import SfpDebugWindow
GUI_QUEUE_POLL_INTERVAL_MS = 100 GUI_QUEUE_POLL_INTERVAL_MS = 100
@ -47,6 +48,7 @@ class MainView(tk.Tk):
self.lru_communicator: Optional[CommunicatorInterface] = None self.lru_communicator: Optional[CommunicatorInterface] = None
self.scenario = Scenario() self.scenario = Scenario()
self.current_scenario_name: Optional[str] = None self.current_scenario_name: Optional[str] = None
self.sfp_debug_window: Optional[SfpDebugWindow] = None
# --- Simulation Engine --- # --- Simulation Engine ---
self.simulation_engine: Optional[SimulationEngine] = None self.simulation_engine: Optional[SimulationEngine] = None
@ -239,6 +241,10 @@ class MainView(tk.Tk):
settings_menu.add_command( settings_menu.add_command(
label="Radar Config...", command=self._open_radar_config label="Radar Config...", command=self._open_radar_config
) )
debug_menu = tk.Menu(menubar, tearoff=0)
menubar.add_cascade(label="Debug", menu=debug_menu)
debug_menu.add_command(label="SFP Packet Inspector...", command=self._open_sfp_debug_window)
def _create_statusbar(self): def _create_statusbar(self):
status_bar = ttk.Frame(self, relief=tk.SUNKEN) status_bar = ttk.Frame(self, relief=tk.SUNKEN)
@ -669,3 +675,14 @@ class MainView(tk.Tk):
shutdown_logging_system() shutdown_logging_system()
self.destroy() self.destroy()
def _open_sfp_debug_window(self):
"""Opens the SFP debug window, ensuring only one instance exists."""
if self.sfp_debug_window and self.sfp_debug_window.winfo_exists():
self.sfp_debug_window.lift()
self.sfp_debug_window.focus_force()
self.logger.info("SFP Packet Inspector window is already open.")
return
self.logger.info("Opening SFP Packet Inspector window...")
self.sfp_debug_window = SfpDebugWindow(self)

View File

@ -0,0 +1,388 @@
# target_simulator/gui/sfp_debug_window.py
"""
Provides a Toplevel window for debugging the SFP transport layer.
This version uses a sampling approach to handle high-frequency data streams
without overwhelming the GUI thread.
"""
import tkinter as tk
from tkinter import ttk, scrolledtext
import logging
import threading
import ctypes
import time
from typing import Dict, Callable, Optional, Any
# Third-party imports for image display
try:
from PIL import Image, ImageTk
import numpy as np
import cv2
_IMAGE_LIBS_AVAILABLE = True
except ImportError:
_IMAGE_LIBS_AVAILABLE = False
# Imports from the project structure
from target_simulator.core.sfp_transport import SfpTransport, PayloadHandler
from target_simulator.core.sfp_structures import ImageLeaderData, SFPHeader
# --- Helper Class for Routing and Buffering Payloads ---
class DebugPayloadRouter:
"""
A router that buffers the last received payload for each flow,
allowing the GUI to sample the data at a lower frequency.
This class is thread-safe.
"""
def __init__(self):
self._log_prefix = "[DebugPayloadRouter]"
self._lock = threading.Lock()
# Buffer to store the last received payload for each flow type
self._latest_payloads: Dict[str, bytearray] = {}
logging.info(f"{self._log_prefix} Initialized.")
def get_handlers(self) -> Dict[int, PayloadHandler]:
"""Returns handlers that update the internal last-payload buffer."""
return {
ord('M'): lambda payload: self._update_last_payload('MFD', payload),
ord('S'): lambda payload: self._update_last_payload('SAR', payload),
ord('B'): lambda payload: self._update_last_payload('BIN', payload),
ord('J'): lambda payload: self._update_last_payload('JSON', payload),
}
def _update_last_payload(self, flow_id: str, payload: bytearray):
"""Thread-safely stores the latest payload for a given flow."""
with self._lock:
self._latest_payloads[flow_id] = payload
def get_and_clear_latest_payloads(self) -> Dict[str, bytearray]:
"""
Thread-safely retrieves all new payloads received since the last call
and clears the internal buffer.
Returns:
Dict[str, bytearray]: A dictionary of the latest payload for each flow.
"""
with self._lock:
# Atomically swap the buffer with an empty one
new_payloads = self._latest_payloads
self._latest_payloads = {}
return new_payloads
# --- Main Debug Window Class ---
class SfpDebugWindow(tk.Toplevel):
"""A self-contained SFP debugging and packet inspection window."""
GUI_POLL_INTERVAL_MS = 250 # Poll for new data 4 times per second
def __init__(self, master):
super().__init__(master)
self.title("SFP Packet Inspector")
self.geometry("900x700")
self.transient(master)
self.logger = logging.getLogger(__name__)
self.sfp_transport: Optional[SfpTransport] = None
self.payload_router = DebugPayloadRouter()
self.mfd_photo: Optional[ImageTk.PhotoImage] = None
self.sar_photo: Optional[ImageTk.PhotoImage] = None
# Read image display size from settings (general.image_display.size)
try:
gm = getattr(master, "config_manager", None)
general = gm.get_general_settings() if gm else {}
img_conf = general.get("image_display", {})
self.image_area_size = int(img_conf.get("size", 150))
except Exception:
self.image_area_size = 150
self._create_widgets()
self.protocol("WM_DELETE_WINDOW", self._on_close)
self.after(self.GUI_POLL_INTERVAL_MS, self._process_latest_payloads)
def _create_widgets(self):
# --- Connection Controls (unchanged) ---
conn_frame = ttk.LabelFrame(self, text="Connection", padding=5)
conn_frame.pack(side=tk.TOP, fill=tk.X, padx=5, pady=5)
ttk.Label(conn_frame, text="IP:").pack(side=tk.LEFT, padx=(5, 2))
self.ip_var = tk.StringVar(value="127.0.0.1")
ttk.Entry(conn_frame, textvariable=self.ip_var, width=15).pack(side=tk.LEFT)
ttk.Label(conn_frame, text="Port:").pack(side=tk.LEFT, padx=(10, 2))
self.port_var = tk.StringVar(value="55556")
ttk.Entry(conn_frame, textvariable=self.port_var, width=7).pack(side=tk.LEFT)
self.connect_btn = ttk.Button(conn_frame, text="Connect", command=self._on_connect)
self.connect_btn.pack(side=tk.LEFT, padx=(10, 5))
self.disconnect_btn = ttk.Button(conn_frame, text="Disconnect", command=self._on_disconnect, state=tk.DISABLED)
self.disconnect_btn.pack(side=tk.LEFT, padx=5)
# Button to configure image display size
self.image_size_btn = ttk.Button(conn_frame, text="Image size...", command=self._open_image_size_dialog)
self.image_size_btn.pack(side=tk.LEFT, padx=5)
# --- Data Display Notebook (unchanged) ---
self.notebook = ttk.Notebook(self)
self.notebook.pack(fill=tk.BOTH, expand=True, padx=5, pady=5)
self.log_tab = scrolledtext.ScrolledText(self.notebook, state=tk.DISABLED, wrap=tk.WORD, font=("Consolas", 9))
self.notebook.add(self.log_tab, text="Raw Log")
if _IMAGE_LIBS_AVAILABLE:
self.mfd_tab = self._create_image_tab("MFD Image")
self.notebook.add(self.mfd_tab["frame"], text="MFD Image")
self.sar_tab = self._create_image_tab("SAR Image")
self.notebook.add(self.sar_tab["frame"], text="SAR Image")
self.bin_tab = scrolledtext.ScrolledText(self.notebook, state=tk.DISABLED, wrap=tk.NONE, font=("Consolas", 10))
self.notebook.add(self.bin_tab, text="Binary (Hex)")
self.json_tab = scrolledtext.ScrolledText(self.notebook, state=tk.DISABLED, wrap=tk.WORD, font=("Consolas", 10))
self.notebook.add(self.json_tab, text="JSON")
def _create_image_tab(self, title: str) -> Dict:
frame = ttk.Frame(self.notebook)
# Fixed-size container to keep UI tidy. Image area will be size x size px.
image_container = ttk.Frame(frame, width=self.image_area_size, height=self.image_area_size, relief=tk.SUNKEN)
image_container.pack(pady=5, padx=5)
image_container.pack_propagate(False)
image_label = ttk.Label(image_container, text=f"Waiting for {title}...", anchor=tk.CENTER)
image_label.pack(fill=tk.BOTH, expand=True)
hex_view = scrolledtext.ScrolledText(frame, height=8, state=tk.DISABLED, wrap=tk.NONE, font=("Consolas", 9))
hex_view.pack(fill=tk.BOTH, expand=True, pady=5, padx=5)
return {"frame": frame, "image_label": image_label, "hex_view": hex_view, "image_container": image_container}
def _open_image_size_dialog(self):
"""Open a small dialog to change the image display size and persist it to settings."""
dlg = tk.Toplevel(self)
dlg.title("Image Size")
dlg.transient(self)
dlg.grab_set()
ttk.Label(dlg, text="Image area size (px):").pack(padx=10, pady=(10, 2))
size_var = tk.StringVar(value=str(self.image_area_size))
entry = ttk.Entry(dlg, textvariable=size_var, width=8)
entry.pack(padx=10, pady=(0, 10))
btn_frame = ttk.Frame(dlg)
btn_frame.pack(padx=10, pady=(0, 10))
def on_save():
try:
v = int(size_var.get())
if v <= 0:
raise ValueError()
except Exception:
message = "Please enter a positive integer for image size."
try:
tk.messagebox.showerror("Invalid value", message, parent=dlg)
except Exception:
pass
return
# Apply to current window
self.image_area_size = v
# Update existing containers if present
for tab in (getattr(self, 'mfd_tab', None), getattr(self, 'sar_tab', None)):
if tab and 'image_container' in tab:
tab['image_container'].config(width=v, height=v)
# Persist to settings via ConfigManager on master (if available)
gm = getattr(self.master, 'config_manager', None)
if gm:
general = gm.get_general_settings() or {}
image_display = general.get('image_display', {})
image_display['size'] = v
general['image_display'] = image_display
gm.save_general_settings(general)
dlg.destroy()
def on_cancel():
dlg.destroy()
ttk.Button(btn_frame, text="Cancel", command=on_cancel).pack(side=tk.RIGHT, padx=(0, 5))
ttk.Button(btn_frame, text="Save", command=on_save).pack(side=tk.RIGHT)
def _on_connect(self):
ip = self.ip_var.get()
try:
port = int(self.port_var.get())
except ValueError:
self._log_to_widget("ERROR: Invalid port number.", "ERROR")
return
self._log_to_widget(f"Attempting to connect to {ip}:{port}...")
ack_config = {ord('M'): 32, ord('S'): 16}
self.sfp_transport = SfpTransport(
host=ip, port=port,
payload_handlers=self.payload_router.get_handlers(),
ack_config=ack_config
)
if self.sfp_transport.start():
self._log_to_widget("Connection successful. Listening for packets...", "INFO")
self.connect_btn.config(state=tk.DISABLED)
self.disconnect_btn.config(state=tk.NORMAL)
else:
self._log_to_widget("Connection failed. Check IP/Port and logs.", "ERROR")
self.sfp_transport = None
def _on_disconnect(self):
if self.sfp_transport:
self._log_to_widget("Disconnecting...", "INFO")
self.sfp_transport.shutdown()
self.sfp_transport = None
self.connect_btn.config(state=tk.NORMAL)
self.disconnect_btn.config(state=tk.DISABLED)
self._log_to_widget("Disconnected.", "INFO")
def _on_close(self):
self.logger.info("SFP Debug Window closing.")
self._on_disconnect()
self.destroy()
def _process_latest_payloads(self):
"""GUI-thread loop to sample and display the latest payloads."""
# Get all new payloads that have arrived since the last check
new_payloads = self.payload_router.get_and_clear_latest_payloads()
# If there are new payloads, process them
if new_payloads:
self._log_to_widget(f"Processing {len(new_payloads)} new payload(s) for flows: {list(new_payloads.keys())}")
for flow_id, payload in new_payloads.items():
if flow_id == 'MFD' and _IMAGE_LIBS_AVAILABLE:
self._display_image_data(payload, self.mfd_tab, 'mfd_photo')
#self.notebook.select(self.mfd_tab["frame"])
elif flow_id == 'SAR' and _IMAGE_LIBS_AVAILABLE:
self._display_image_data(payload, self.sar_tab, 'sar_photo')
#self.notebook.select(self.sar_tab["frame"])
elif flow_id == 'BIN':
self._display_hex_data(payload, self.bin_tab)
#self.notebook.select(self.bin_tab)
elif flow_id == 'JSON':
self._display_json_data(payload, self.json_tab)
#self.notebook.select(self.json_tab)
# Reschedule the next check
self.after(self.GUI_POLL_INTERVAL_MS, self._process_latest_payloads)
def _display_image_data(self, payload: bytearray, tab_widgets: Dict[str, Any], photo_attr: str):
"""Parses an image payload and displays it. Now handles simplified structure."""
try:
if len(payload) < ctypes.sizeof(ImageLeaderData):
raise ValueError("Payload smaller than ImageLeaderData header.")
leader = ImageLeaderData.from_buffer(payload)
h, w, bpp = leader.HEADER_DATA.DY, leader.HEADER_DATA.DX, leader.HEADER_DATA.BPP
stride = leader.HEADER_DATA.STRIDE
offset = ctypes.sizeof(ImageLeaderData)
if not (h > 0 and w > 0 and bpp in [1, 2] and stride >= w):
raise ValueError(f"Invalid image dimensions in header: {w}x{h}, bpp={bpp}, stride={stride}")
if bpp == 1: dtype = np.uint8
else: dtype = np.uint16
expected_size = stride * h * bpp
if (offset + expected_size) > len(payload):
# Fallback for old format where PIXEL_TAG was at the end of leader
offset_fallback = ctypes.sizeof(SFPHeader) + ctypes.sizeof(ImageLeaderData) - ctypes.sizeof(leader.PIXEL_TAG)
if (offset_fallback + expected_size) <= len(payload):
offset = offset_fallback
else:
raise ValueError(f"Incomplete image data. Expected {expected_size} bytes, got {len(payload) - offset}")
pixel_data_view = np.ndarray(
shape=(h, stride),
dtype=dtype,
buffer=payload,
offset=offset
)
# Crop to actual width if stride is larger
image_data = pixel_data_view[:, :w]
display_img_8bit = cv2.normalize(image_data, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U)
img_pil = Image.fromarray(cv2.cvtColor(display_img_8bit, cv2.COLOR_GRAY2RGB))
# Resize image to fit the label area while preserving aspect ratio
try:
# Use the fixed-size container (150x150) for resizing target
resized = self._resize_pil_to_label(img_pil, tab_widgets.get("image_container", tab_widgets["image_label"]))
except Exception:
# Fallback to original if anything goes wrong
resized = img_pil
photo = ImageTk.PhotoImage(image=resized)
tab_widgets["image_label"].config(image=photo, text="")
setattr(self, photo_attr, photo)
except Exception as e:
self.logger.error(f"Error parsing image payload: {e}")
tab_widgets["image_label"].config(image=None, text=f"Error parsing image:\n{e}")
setattr(self, photo_attr, None)
self._display_hex_data(payload, tab_widgets["hex_view"])
def _resize_pil_to_label(self, img: 'Image.Image', label_widget: ttk.Label) -> 'Image.Image':
"""Resize a PIL Image to fit within the current label widget size.
If the label widget has not been mapped yet (width/height == 1), this
will fallback to the image's original size.
"""
try:
# Get current allocated size for label (in pixels)
width = label_widget.winfo_width()
height = label_widget.winfo_height()
# If the widget isn't yet laid out, width/height may be 1 -> use geometry
if width <= 1 or height <= 1:
geom = self.geometry() # format: WxH+X+Y
if 'x' in geom:
parts = geom.split('+', 1)[0].split('x')
win_w, win_h = int(parts[0]), int(parts[1])
# Use a fraction of window size for image area
width = max(1, int(win_w * 0.9))
height = max(1, int(win_h * 0.6))
if width <= 1 or height <= 1:
return img
img_w, img_h = img.size
# Compute scale preserving aspect ratio
scale = min(width / img_w, height / img_h)
if scale >= 1.0:
return img
new_w = max(1, int(img_w * scale))
new_h = max(1, int(img_h * scale))
return img.resize((new_w, new_h), Image.LANCZOS)
except Exception:
return img
def _display_hex_data(self, payload: bytearray, widget: scrolledtext.ScrolledText):
hex_dump = self._format_hex_dump(payload)
widget.config(state=tk.NORMAL)
widget.delete("1.0", tk.END)
widget.insert("1.0", hex_dump)
widget.config(state=tk.DISABLED)
def _display_json_data(self, payload: bytearray, widget: scrolledtext.ScrolledText):
try:
import json
text = json.dumps(json.loads(payload.decode('utf-8')), indent=2)
except Exception as e:
text = f"--- FAILED TO PARSE JSON ---\n{e}\n\n--- RAW HEX DUMP ---\n"
text += self._format_hex_dump(payload)
widget.config(state=tk.NORMAL)
widget.delete("1.0", tk.END)
widget.insert("1.0", text)
widget.config(state=tk.DISABLED)
def _log_to_widget(self, message: str, level: str = "DEBUG"):
self.logger.info(message)
self.log_tab.config(state=tk.NORMAL)
self.log_tab.insert(tk.END, f"[{level}] {message}\n")
self.log_tab.config(state=tk.DISABLED)
self.log_tab.see(tk.END)
def _format_hex_dump(self, data: bytes, length=16) -> str:
lines = []
for i in range(0, len(data), length):
chunk = data[i:i+length]
hex_part = ' '.join(f'{b:02X}' for b in chunk)
ascii_part = ''.join(chr(b) if 32 <= b < 127 else '.' for b in chunk)
lines.append(f"{i:08X} {hex_part:<{length*3}} |{ascii_part}|")
return '\n'.join(lines)