add new sfp_transport
This commit is contained in:
parent
754d67b914
commit
075a4fda1c
@ -3,7 +3,7 @@
|
|||||||
"scan_limit": 60,
|
"scan_limit": 60,
|
||||||
"max_range": 100,
|
"max_range": 100,
|
||||||
"geometry": "1200x1024+463+195",
|
"geometry": "1200x1024+463+195",
|
||||||
"last_selected_scenario": "scenario2",
|
"last_selected_scenario": "scenario_9g",
|
||||||
"connection": {
|
"connection": {
|
||||||
"target": {
|
"target": {
|
||||||
"type": "tftp",
|
"type": "tftp",
|
||||||
@ -316,6 +316,63 @@
|
|||||||
"use_spline": true
|
"use_spline": true
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
},
|
||||||
|
"scenario_9g": {
|
||||||
|
"name": "scenario2",
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"target_id": 2,
|
||||||
|
"active": true,
|
||||||
|
"traceable": true,
|
||||||
|
"trajectory": [
|
||||||
|
{
|
||||||
|
"maneuver_type": "Fly to Point",
|
||||||
|
"duration_s": 1.0,
|
||||||
|
"target_range_nm": 28.0,
|
||||||
|
"target_azimuth_deg": 0.0,
|
||||||
|
"target_altitude_ft": 10000.0,
|
||||||
|
"target_velocity_fps": 506.343,
|
||||||
|
"target_heading_deg": 180.0,
|
||||||
|
"longitudinal_acceleration_g": 0.0,
|
||||||
|
"lateral_acceleration_g": 0.0,
|
||||||
|
"vertical_acceleration_g": 0.0,
|
||||||
|
"turn_direction": "Right"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"maneuver_type": "Fly for Duration",
|
||||||
|
"duration_s": 300.0,
|
||||||
|
"target_altitude_ft": 10000.0,
|
||||||
|
"target_velocity_fps": 506.343,
|
||||||
|
"target_heading_deg": 180.0,
|
||||||
|
"longitudinal_acceleration_g": 0.0,
|
||||||
|
"lateral_acceleration_g": 0.0,
|
||||||
|
"vertical_acceleration_g": 0.0,
|
||||||
|
"turn_direction": "Right"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"maneuver_type": "Dynamic Maneuver",
|
||||||
|
"duration_s": 9.0,
|
||||||
|
"maneuver_speed_fps": 1519.0289999999995,
|
||||||
|
"longitudinal_acceleration_g": 0.0,
|
||||||
|
"lateral_acceleration_g": 9.0,
|
||||||
|
"vertical_acceleration_g": 0.0,
|
||||||
|
"turn_direction": "Right"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"maneuver_type": "Fly for Duration",
|
||||||
|
"duration_s": 100.0,
|
||||||
|
"target_altitude_ft": 10000.0,
|
||||||
|
"target_velocity_fps": 1012.686,
|
||||||
|
"target_heading_deg": -90.0,
|
||||||
|
"longitudinal_acceleration_g": 0.0,
|
||||||
|
"lateral_acceleration_g": 0.0,
|
||||||
|
"vertical_acceleration_g": 0.0,
|
||||||
|
"turn_direction": "Right"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"use_spline": false
|
||||||
|
}
|
||||||
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
90
target_simulator/core/payload_router.py
Normal file
90
target_simulator/core/payload_router.py
Normal file
@ -0,0 +1,90 @@
|
|||||||
|
# core/payload_router.py
|
||||||
|
"""
|
||||||
|
Defines the PayloadRouter class, responsible for directing incoming,
|
||||||
|
reassembled SFP payloads to the correct application-level handler.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import json
|
||||||
|
from typing import Dict, Callable
|
||||||
|
|
||||||
|
# Definisci il tipo per un handler, se vuoi essere più esplicito
|
||||||
|
PayloadHandler = Callable[[bytearray], None]
|
||||||
|
|
||||||
|
class PayloadRouter:
|
||||||
|
"""
|
||||||
|
Inspects SFP flow IDs and routes payloads to registered handlers.
|
||||||
|
Also contains the implementations for each specific payload handler.
|
||||||
|
"""
|
||||||
|
def __init__(self):
|
||||||
|
self._log_prefix = "[PayloadRouter]"
|
||||||
|
logging.info(f"{self._log_prefix} Initializing...")
|
||||||
|
|
||||||
|
def get_handlers(self) -> Dict[int, PayloadHandler]:
|
||||||
|
"""
|
||||||
|
Returns a dictionary mapping SFP_FLOW IDs to their handler methods.
|
||||||
|
This is the primary method used to connect this router to the transport layer.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict[int, PayloadHandler]: The dictionary of flow IDs and handlers.
|
||||||
|
"""
|
||||||
|
handlers = {
|
||||||
|
ord('M'): self.handle_mfd_image_payload,
|
||||||
|
ord('S'): self.handle_sar_image_payload,
|
||||||
|
ord('B'): self.handle_binary_data_payload,
|
||||||
|
ord('J'): self.handle_json_payload,
|
||||||
|
# Aggiungi qui nuovi flussi e handler
|
||||||
|
}
|
||||||
|
logging.debug(f"{self._log_prefix} Providing handlers for flows: {[chr(k) if 32<=k<=126 else k for k in handlers.keys()]}")
|
||||||
|
return handlers
|
||||||
|
|
||||||
|
# --- Handler Implementations ---
|
||||||
|
|
||||||
|
def handle_mfd_image_payload(self, payload: bytearray):
|
||||||
|
"""Handler for MFD image payloads."""
|
||||||
|
log_prefix = f"{self._log_prefix} MFD"
|
||||||
|
logging.info(f"{log_prefix} Received payload. Size: {len(payload)} bytes. Processing...")
|
||||||
|
# Qui la logica per interpretare il payload dell'immagine MFD
|
||||||
|
# Ad esempio, potresti usare ctypes per mappare una struttura dati:
|
||||||
|
# from .sfp_structures import ImageLeaderData
|
||||||
|
# image_leader = ImageLeaderData.from_buffer(payload)
|
||||||
|
# logging.debug(f"{log_prefix} Frame counter: {image_leader.HEADER_DATA.FCOUNTER}")
|
||||||
|
pass
|
||||||
|
|
||||||
|
def handle_sar_image_payload(self, payload: bytearray):
|
||||||
|
"""Handler for SAR image payloads."""
|
||||||
|
log_prefix = f"{self._log_prefix} SAR"
|
||||||
|
logging.info(f"{log_prefix} Received payload. Size: {len(payload)} bytes. Processing...")
|
||||||
|
# Qui la logica per interpretare il payload dell'immagine SAR
|
||||||
|
pass
|
||||||
|
|
||||||
|
def handle_binary_data_payload(self, payload: bytearray):
|
||||||
|
"""Handler for generic binary data payloads."""
|
||||||
|
log_prefix = f"{self._log_prefix} BIN"
|
||||||
|
logging.info(f"{log_prefix} Received payload. Size: {len(payload)} bytes.")
|
||||||
|
try:
|
||||||
|
# Esempio: leggi un ID (2 byte), una lunghezza (4 byte) e dei dati
|
||||||
|
if len(payload) >= 6:
|
||||||
|
message_id = int.from_bytes(payload[0:2], 'little')
|
||||||
|
data_length = int.from_bytes(payload[2:6], 'little')
|
||||||
|
actual_data = payload[6:6+data_length]
|
||||||
|
logging.info(f"{log_prefix} Parsed binary message: ID={message_id}, Length={data_length}, Data size={len(actual_data)}")
|
||||||
|
else:
|
||||||
|
logging.warning(f"{log_prefix} Payload too small for expected binary format.")
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"{log_prefix} Error parsing binary payload: {e}")
|
||||||
|
|
||||||
|
def handle_json_payload(self, payload: bytearray):
|
||||||
|
"""Handler for JSON payloads."""
|
||||||
|
log_prefix = f"{self._log_prefix} JSON"
|
||||||
|
logging.info(f"{log_prefix} Received payload. Size: {len(payload)} bytes.")
|
||||||
|
try:
|
||||||
|
json_string = payload.decode('utf-8')
|
||||||
|
data = json.loads(json_string)
|
||||||
|
message_type = data.get('type', 'Unknown')
|
||||||
|
logging.info(f"{log_prefix} Decoded JSON message of type '{message_type}'.")
|
||||||
|
# Qui la logica per processare l'oggetto JSON
|
||||||
|
# if message_type == 'status_update':
|
||||||
|
# handle_status_update(data)
|
||||||
|
except (UnicodeDecodeError, json.JSONDecodeError) as e:
|
||||||
|
logging.error(f"{log_prefix} Error decoding JSON payload: {e}")
|
||||||
162
target_simulator/core/sfp_structures.py
Normal file
162
target_simulator/core/sfp_structures.py
Normal file
@ -0,0 +1,162 @@
|
|||||||
|
# core/sfp_structures.py
|
||||||
|
"""
|
||||||
|
Defines the ctypes Structures for the Simple Fragmentation Protocol (SFP)
|
||||||
|
and the application-specific image data format.
|
||||||
|
|
||||||
|
This centralizes data structure definitions, allowing them to be shared
|
||||||
|
between the transport layer (which uses SFPHeader) and the application
|
||||||
|
layer (which interprets the image-specific structures).
|
||||||
|
"""
|
||||||
|
|
||||||
|
import ctypes
|
||||||
|
|
||||||
|
# --- SFP Transport Layer Structures ---
|
||||||
|
|
||||||
|
class SFPHeader(ctypes.Structure):
|
||||||
|
"""Structure representing the SFP header (32 bytes)."""
|
||||||
|
_pack_ = 1
|
||||||
|
_fields_ = [
|
||||||
|
("SFP_MARKER", ctypes.c_uint8),
|
||||||
|
("SFP_DIRECTION", ctypes.c_uint8),
|
||||||
|
("SFP_PROT_VER", ctypes.c_uint8),
|
||||||
|
("SFP_PT_SPARE", ctypes.c_uint8),
|
||||||
|
("SFP_TAG", ctypes.c_uint8),
|
||||||
|
("SFP_SRC", ctypes.c_uint8),
|
||||||
|
("SFP_FLOW", ctypes.c_uint8),
|
||||||
|
("SFP_TID", ctypes.c_uint8),
|
||||||
|
("SFP_FLAGS", ctypes.c_uint8),
|
||||||
|
("SFP_WIN", ctypes.c_uint8),
|
||||||
|
("SFP_ERR", ctypes.c_uint8),
|
||||||
|
("SFP_ERR_INFO", ctypes.c_uint8),
|
||||||
|
("SFP_TOTFRGAS", ctypes.c_uint16),
|
||||||
|
("SFP_FRAG", ctypes.c_uint16),
|
||||||
|
("SFP_RECTYPE", ctypes.c_uint8),
|
||||||
|
("SFP_RECSPARE", ctypes.c_uint8),
|
||||||
|
("SFP_PLDAP", ctypes.c_uint8),
|
||||||
|
("SFP_PLEXT", ctypes.c_uint8),
|
||||||
|
("SFP_RECCOUNTER", ctypes.c_uint16),
|
||||||
|
("SFP_PLSIZE", ctypes.c_uint16),
|
||||||
|
("SFP_TOTSIZE", ctypes.c_uint32),
|
||||||
|
("SFP_PLOFFSET", ctypes.c_uint32),
|
||||||
|
]
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def size(cls):
|
||||||
|
"""Returns the size of the structure in bytes."""
|
||||||
|
return ctypes.sizeof(cls)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_field_offset(field_name: str) -> int:
|
||||||
|
"""Returns the byte offset of a specific field within the structure."""
|
||||||
|
try:
|
||||||
|
return getattr(SFPHeader, field_name).offset
|
||||||
|
except AttributeError:
|
||||||
|
return -1
|
||||||
|
|
||||||
|
# --- Application Layer (Image Format) Structures ---
|
||||||
|
|
||||||
|
class DataTag(ctypes.Structure):
|
||||||
|
"""Structure representing a generic data tag (8 bytes)."""
|
||||||
|
_pack_ = 1
|
||||||
|
_fields_ = [
|
||||||
|
("ID", ctypes.c_uint8 * 2),
|
||||||
|
("VALID", ctypes.c_uint8),
|
||||||
|
("VERSION", ctypes.c_uint8),
|
||||||
|
("SIZE", ctypes.c_uint32),
|
||||||
|
]
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def size(cls):
|
||||||
|
"""Returns the size of the structure in bytes."""
|
||||||
|
return ctypes.sizeof(cls)
|
||||||
|
|
||||||
|
|
||||||
|
class HeaderData(ctypes.Structure):
|
||||||
|
"""Structure representing the HEADER_DATA block (~48 bytes)."""
|
||||||
|
_pack_ = 1
|
||||||
|
_fields_ = [
|
||||||
|
("TYPE", ctypes.c_uint8),
|
||||||
|
("SUBTYPE", ctypes.c_uint8),
|
||||||
|
("LOGCOLORS", ctypes.c_uint8),
|
||||||
|
("IMG_RESERVED", ctypes.c_uint8),
|
||||||
|
("PRODINFO", ctypes.c_uint32 * 2),
|
||||||
|
("TOD", ctypes.c_uint16 * 2),
|
||||||
|
("RESERVED", ctypes.c_uint32),
|
||||||
|
("FCOUNTER", ctypes.c_uint32),
|
||||||
|
("TIMETAG", ctypes.c_uint32),
|
||||||
|
("NOMINALFRATE", ctypes.c_uint16),
|
||||||
|
("FRAMETAG", ctypes.c_uint16),
|
||||||
|
("OX", ctypes.c_uint16),
|
||||||
|
("OY", ctypes.c_uint16),
|
||||||
|
("DX", ctypes.c_uint16),
|
||||||
|
("DY", ctypes.c_uint16),
|
||||||
|
("STRIDE", ctypes.c_uint16),
|
||||||
|
("BPP", ctypes.c_uint8),
|
||||||
|
("COMP", ctypes.c_uint8),
|
||||||
|
("SPARE", ctypes.c_uint16),
|
||||||
|
("PALTYPE", ctypes.c_uint8),
|
||||||
|
("GAP", ctypes.c_uint8),
|
||||||
|
]
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def size(cls):
|
||||||
|
"""Returns the size of the structure in bytes."""
|
||||||
|
return ctypes.sizeof(cls)
|
||||||
|
|
||||||
|
|
||||||
|
class GeoData(ctypes.Structure):
|
||||||
|
"""Structure representing the GEO_DATA block (~80 bytes)."""
|
||||||
|
_pack_ = 1
|
||||||
|
_fields_ = [
|
||||||
|
("INVMASK", ctypes.c_uint32),
|
||||||
|
("ORIENTATION", ctypes.c_float),
|
||||||
|
("LATITUDE", ctypes.c_float),
|
||||||
|
("LONGITUDE", ctypes.c_float),
|
||||||
|
("REF_X", ctypes.c_uint16),
|
||||||
|
("REF_Y", ctypes.c_uint16),
|
||||||
|
("SCALE_X", ctypes.c_float),
|
||||||
|
("SCALE_Y", ctypes.c_float),
|
||||||
|
("POI_ORIENTATION", ctypes.c_float),
|
||||||
|
("POI_LATITUDE", ctypes.c_float),
|
||||||
|
("POI_LONGITUDE", ctypes.c_float),
|
||||||
|
("POI_ALTITUDE", ctypes.c_float),
|
||||||
|
("POI_X", ctypes.c_uint16),
|
||||||
|
("POI_Y", ctypes.c_uint16),
|
||||||
|
("SPARE", ctypes.c_uint32 * 7),
|
||||||
|
]
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def size(cls):
|
||||||
|
"""Returns the size of the structure in bytes."""
|
||||||
|
return ctypes.sizeof(cls)
|
||||||
|
|
||||||
|
|
||||||
|
class ImageLeaderData(ctypes.Structure):
|
||||||
|
"""Represents the complete Image Leader data structure (~1320 bytes)."""
|
||||||
|
_pack_ = 1
|
||||||
|
_fields_ = [
|
||||||
|
("HEADER_TAG", DataTag),
|
||||||
|
("HEADER_DATA", HeaderData),
|
||||||
|
("GEO_TAG", DataTag),
|
||||||
|
("GEO_DATA", GeoData),
|
||||||
|
("RESERVED_TAG", DataTag),
|
||||||
|
("RESERVED_DATA", ctypes.c_uint8 * 128),
|
||||||
|
("CM_TAG", DataTag),
|
||||||
|
("COLOUR_MAP", ctypes.c_uint32 * 256),
|
||||||
|
("PIXEL_TAG", DataTag),
|
||||||
|
]
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def size(cls):
|
||||||
|
"""Returns the size of the structure in bytes."""
|
||||||
|
return ctypes.sizeof(cls)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_reserved_data_size() -> int:
|
||||||
|
"""Returns the static size of the RESERVED_DATA field."""
|
||||||
|
return 128
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_colour_map_size() -> int:
|
||||||
|
"""Returns the static size of the COLOUR_MAP field in bytes."""
|
||||||
|
return 1024
|
||||||
240
target_simulator/core/sfp_transport.py
Normal file
240
target_simulator/core/sfp_transport.py
Normal file
@ -0,0 +1,240 @@
|
|||||||
|
# core/sfp_transport.py
|
||||||
|
"""
|
||||||
|
Provides a reusable transport layer for the Simple Fragmentation Protocol (SFP).
|
||||||
|
|
||||||
|
This module handles UDP socket communication, SFP header parsing, fragment
|
||||||
|
reassembly, and ACK generation. It is application-agnostic and uses a
|
||||||
|
callback/handler system to pass fully reassembled payloads to the
|
||||||
|
application layer based on the SFP_FLOW identifier.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import socket
|
||||||
|
import logging
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
from typing import Dict, Callable, Optional
|
||||||
|
|
||||||
|
from controlpanel import config
|
||||||
|
from controlpanel.utils.network import create_udp_socket, close_udp_socket
|
||||||
|
from controlpanel.core.sfp_structures import SFPHeader
|
||||||
|
|
||||||
|
# Define a type hint for payload handlers
|
||||||
|
PayloadHandler = Callable[[bytearray], None]
|
||||||
|
|
||||||
|
class SfpTransport:
|
||||||
|
"""Manages SFP communication and payload reassembly."""
|
||||||
|
|
||||||
|
def __init__(self, host: str, port: int, payload_handlers: Dict[int, PayloadHandler]):
|
||||||
|
"""
|
||||||
|
Initializes the SFP Transport layer.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
host (str): The local IP address to bind the UDP socket to.
|
||||||
|
port (int): The local port to listen on.
|
||||||
|
payload_handlers (Dict[int, PayloadHandler]): A dictionary mapping
|
||||||
|
SFP_FLOW IDs (as integers) to their corresponding handler functions.
|
||||||
|
Each handler will be called with the complete payload (bytearray).
|
||||||
|
"""
|
||||||
|
self._log_prefix = "[SfpTransport]"
|
||||||
|
logging.info(f"{self._log_prefix} Initializing for {host}:{port}...")
|
||||||
|
|
||||||
|
self._host = host
|
||||||
|
self._port = port
|
||||||
|
self._payload_handlers = payload_handlers
|
||||||
|
self._socket: Optional[socket.socket] = None
|
||||||
|
self._receiver_thread: Optional[threading.Thread] = None
|
||||||
|
self._stop_event = threading.Event()
|
||||||
|
|
||||||
|
# Reassembly state dictionaries, managed by this transport layer
|
||||||
|
self._fragments: Dict[tuple, Dict[int, int]] = {}
|
||||||
|
self._buffers: Dict[tuple, bytearray] = {}
|
||||||
|
|
||||||
|
logging.debug(
|
||||||
|
f"{self._log_prefix} Registered handlers for flows: "
|
||||||
|
f"{[chr(k) if 32 <= k <= 126 else k for k in self._payload_handlers.keys()]}"
|
||||||
|
)
|
||||||
|
|
||||||
|
def start(self) -> bool:
|
||||||
|
"""
|
||||||
|
Starts the transport layer by creating the socket and launching the receiver thread.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if started successfully, False otherwise.
|
||||||
|
"""
|
||||||
|
if self._receiver_thread is not None and self._receiver_thread.is_alive():
|
||||||
|
logging.warning(f"{self._log_prefix} Start called, but receiver is already running.")
|
||||||
|
return True
|
||||||
|
|
||||||
|
self._socket = create_udp_socket(self._host, self._port)
|
||||||
|
if not self._socket:
|
||||||
|
logging.critical(f"{self._log_prefix} Failed to create and bind socket. Cannot start.")
|
||||||
|
return False
|
||||||
|
|
||||||
|
self._stop_event.clear()
|
||||||
|
self._receiver_thread = threading.Thread(
|
||||||
|
target=self._receive_loop, name="SfpTransportThread", daemon=True
|
||||||
|
)
|
||||||
|
self._receiver_thread.start()
|
||||||
|
logging.info(f"{self._log_prefix} Receiver thread started.")
|
||||||
|
return True
|
||||||
|
|
||||||
|
def shutdown(self):
|
||||||
|
"""Stops the receiver thread and closes the socket."""
|
||||||
|
logging.info(f"{self._log_prefix} Shutdown initiated.")
|
||||||
|
self._stop_event.set()
|
||||||
|
|
||||||
|
# The socket is closed here to interrupt the blocking recvfrom call
|
||||||
|
if self._socket:
|
||||||
|
close_udp_socket(self._socket)
|
||||||
|
self._socket = None
|
||||||
|
|
||||||
|
if self._receiver_thread and self._receiver_thread.is_alive():
|
||||||
|
logging.debug(f"{self._log_prefix} Waiting for receiver thread to join...")
|
||||||
|
self._receiver_thread.join(timeout=2.0)
|
||||||
|
if self._receiver_thread.is_alive():
|
||||||
|
logging.warning(f"{self._log_prefix} Receiver thread did not join cleanly.")
|
||||||
|
|
||||||
|
logging.info(f"{self._log_prefix} Shutdown complete.")
|
||||||
|
|
||||||
|
def _receive_loop(self):
|
||||||
|
"""The main loop that listens for UDP packets and processes them."""
|
||||||
|
log_prefix = f"{self._log_prefix} Loop"
|
||||||
|
logging.info(f"{log_prefix} Starting receive loop.")
|
||||||
|
|
||||||
|
while not self._stop_event.is_set():
|
||||||
|
if not self._socket:
|
||||||
|
logging.error(f"{log_prefix} Socket is not available. Stopping loop.")
|
||||||
|
break
|
||||||
|
|
||||||
|
try:
|
||||||
|
data, addr = self._socket.recvfrom(65535)
|
||||||
|
if not data:
|
||||||
|
continue
|
||||||
|
except socket.timeout:
|
||||||
|
continue
|
||||||
|
except OSError:
|
||||||
|
# This is expected when the socket is closed during shutdown
|
||||||
|
if not self._stop_event.is_set():
|
||||||
|
logging.error(f"{log_prefix} Socket error.", exc_info=True)
|
||||||
|
break
|
||||||
|
except Exception:
|
||||||
|
logging.exception(f"{log_prefix} Unexpected error in recvfrom.")
|
||||||
|
time.sleep(0.01)
|
||||||
|
continue
|
||||||
|
|
||||||
|
self._process_packet(data, addr)
|
||||||
|
|
||||||
|
logging.info(f"{log_prefix} Receive loop terminated.")
|
||||||
|
|
||||||
|
def _process_packet(self, data: bytes, addr: tuple):
|
||||||
|
"""Parses an SFP packet and handles fragment reassembly."""
|
||||||
|
header_size = SFPHeader.size()
|
||||||
|
if len(data) < header_size:
|
||||||
|
logging.warning(f"Packet from {addr} is too small for SFP header. Ignoring.")
|
||||||
|
return
|
||||||
|
|
||||||
|
try:
|
||||||
|
header = SFPHeader.from_buffer_copy(data)
|
||||||
|
except (ValueError, TypeError):
|
||||||
|
logging.error(f"Failed to parse SFP header from {addr}. Ignoring.")
|
||||||
|
return
|
||||||
|
|
||||||
|
flow, tid = header.SFP_FLOW, header.SFP_TID
|
||||||
|
frag, total_frags = header.SFP_FRAG, header.SFP_TOTFRGAS
|
||||||
|
pl_size, pl_offset = header.SFP_PLSIZE, header.SFP_PLOFFSET
|
||||||
|
total_size = header.SFP_TOTSIZE
|
||||||
|
key = (flow, tid)
|
||||||
|
|
||||||
|
# Handle ACK Request
|
||||||
|
if header.SFP_FLAGS & 0x01:
|
||||||
|
self._send_ack(addr, data[:header_size])
|
||||||
|
|
||||||
|
# Validate packet metadata
|
||||||
|
if total_frags == 0 or total_frags > 60000 or total_size <= 0:
|
||||||
|
logging.warning(f"Invalid metadata for {key}: total_frags={total_frags}, total_size={total_size}. Ignoring.")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Start of a new transaction
|
||||||
|
if frag == 0:
|
||||||
|
self._cleanup_lingering_transactions(flow, tid)
|
||||||
|
logging.debug(f"New transaction started for key={key}. Total size: {total_size} bytes.")
|
||||||
|
self._fragments[key] = {}
|
||||||
|
try:
|
||||||
|
self._buffers[key] = bytearray(total_size)
|
||||||
|
except (MemoryError, ValueError):
|
||||||
|
logging.error(f"Failed to allocate {total_size} bytes for key={key}. Ignoring transaction.")
|
||||||
|
self._fragments.pop(key, None)
|
||||||
|
return
|
||||||
|
|
||||||
|
# Check if we are tracking this transaction
|
||||||
|
if key not in self._buffers or key not in self._fragments:
|
||||||
|
logging.debug(f"Ignoring fragment {frag} for untracked transaction key={key}.")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Store fragment info and copy payload
|
||||||
|
self._fragments[key][frag] = total_frags
|
||||||
|
payload = data[header_size:]
|
||||||
|
bytes_to_copy = min(pl_size, len(payload))
|
||||||
|
|
||||||
|
if (pl_offset + bytes_to_copy) > len(self._buffers[key]):
|
||||||
|
logging.error(f"Payload for key={key}, frag={frag} would overflow buffer. Ignoring.")
|
||||||
|
return
|
||||||
|
|
||||||
|
self._buffers[key][pl_offset : pl_offset + bytes_to_copy] = payload[:bytes_to_copy]
|
||||||
|
|
||||||
|
# Check for completion
|
||||||
|
if len(self._fragments[key]) == total_frags:
|
||||||
|
#logging.info(f"Transaction complete for key={key}. Handing off to application layer.")
|
||||||
|
|
||||||
|
# Retrieve completed buffer and clean up state for this key
|
||||||
|
completed_payload = self._buffers.pop(key)
|
||||||
|
self._fragments.pop(key)
|
||||||
|
|
||||||
|
# Find and call the appropriate handler
|
||||||
|
handler = self._payload_handlers.get(flow)
|
||||||
|
if handler:
|
||||||
|
try:
|
||||||
|
handler(completed_payload)
|
||||||
|
except Exception:
|
||||||
|
logging.exception(f"Error executing payload handler for flow {flow}.")
|
||||||
|
else:
|
||||||
|
logging.warning(f"No payload handler registered for flow ID {flow}.")
|
||||||
|
|
||||||
|
def _send_ack(self, dest_addr: tuple, original_header_bytes: bytes):
|
||||||
|
"""Sends an SFP ACK packet back to the sender."""
|
||||||
|
log_prefix = f"{self._log_prefix} ACK"
|
||||||
|
if not self._socket: return
|
||||||
|
|
||||||
|
try:
|
||||||
|
ack_header = bytearray(original_header_bytes)
|
||||||
|
flow = ack_header[SFPHeader.get_field_offset("SFP_FLOW")]
|
||||||
|
|
||||||
|
# Determine window size based on flow
|
||||||
|
window_size = 0
|
||||||
|
if flow == ord('M'): # MFD
|
||||||
|
window_size = config.ACK_WINDOW_SIZE_MFD
|
||||||
|
elif flow == ord('S'): # SAR
|
||||||
|
window_size = config.ACK_WINDOW_SIZE_SAR
|
||||||
|
|
||||||
|
# Modify header for ACK response
|
||||||
|
ack_header[SFPHeader.get_field_offset("SFP_DIRECTION")] = 0x3C # '<'
|
||||||
|
ack_header[SFPHeader.get_field_offset("SFP_WIN")] = window_size
|
||||||
|
original_flags = ack_header[SFPHeader.get_field_offset("SFP_FLAGS")]
|
||||||
|
ack_header[SFPHeader.get_field_offset("SFP_FLAGS")] = (original_flags | 0x01) & ~0x02
|
||||||
|
|
||||||
|
self._socket.sendto(ack_header, dest_addr)
|
||||||
|
logging.debug(f"{log_prefix} Sent to {dest_addr} for flow {chr(flow) if 32<=flow<=126 else flow}.")
|
||||||
|
except Exception:
|
||||||
|
logging.exception(f"{log_prefix} Failed to send to {dest_addr}.")
|
||||||
|
|
||||||
|
def _cleanup_lingering_transactions(self, current_flow: int, current_tid: int):
|
||||||
|
"""Removes old, incomplete transactions for the same flow."""
|
||||||
|
# This is a simplified cleanup. The original was more complex for stats.
|
||||||
|
keys_to_remove = [
|
||||||
|
key for key in self._fragments
|
||||||
|
if key[0] == current_flow and key[1] != current_tid
|
||||||
|
]
|
||||||
|
for key in keys_to_remove:
|
||||||
|
logging.warning(f"Cleaning up lingering/incomplete transaction for key={key}.")
|
||||||
|
self._fragments.pop(key, None)
|
||||||
|
self._buffers.pop(key, None)
|
||||||
158
target_simulator/utils/network.py
Normal file
158
target_simulator/utils/network.py
Normal file
@ -0,0 +1,158 @@
|
|||||||
|
# network.py
|
||||||
|
"""
|
||||||
|
THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||||
|
|
||||||
|
Handles basic UDP socket operations like creation, configuration (buffer size),
|
||||||
|
and closing for the application. Uses standardized logging prefixes.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Standard library imports
|
||||||
|
import socket
|
||||||
|
import logging
|
||||||
|
import sys # Keep sys import if needed for platform-specific checks (not currently used)
|
||||||
|
|
||||||
|
# No local application imports needed in this module typically
|
||||||
|
|
||||||
|
|
||||||
|
def create_udp_socket(local_ip, local_port):
|
||||||
|
"""
|
||||||
|
Creates a UDP socket, attempts to increase its receive buffer size,
|
||||||
|
and binds it to the specified local IP address and port.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
local_ip (str): The local IP address to bind to.
|
||||||
|
local_port (int): The local port to bind to.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
socket.socket: The created and bound UDP socket, or None on error.
|
||||||
|
"""
|
||||||
|
log_prefix = "[Network]" # Standard prefix for this module
|
||||||
|
logging.debug(
|
||||||
|
f"{log_prefix} Attempting to create UDP socket for {local_ip}:{local_port}"
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Create the UDP socket (IPv4)
|
||||||
|
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||||
|
logging.debug(f"{log_prefix} Socket object created.")
|
||||||
|
|
||||||
|
# --- Receive Buffer Size Adjustment ---
|
||||||
|
try:
|
||||||
|
# Get the default buffer size (INFO level is okay for this setup detail)
|
||||||
|
default_rcvbuf = sock.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF)
|
||||||
|
logging.info(
|
||||||
|
f"{log_prefix} Default socket receive buffer size: {default_rcvbuf} bytes"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Set a larger buffer size (adjust value as needed)
|
||||||
|
desired_rcvbuf = 8 * 1024 * 1024 # Request 8MB
|
||||||
|
logging.debug(
|
||||||
|
f"{log_prefix} Requesting receive buffer size: {desired_rcvbuf} bytes"
|
||||||
|
)
|
||||||
|
sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, desired_rcvbuf)
|
||||||
|
|
||||||
|
# Verify the actual size set by the OS (INFO level okay)
|
||||||
|
actual_rcvbuf = sock.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF)
|
||||||
|
logging.info(
|
||||||
|
f"{log_prefix} Actual receive buffer size set: {actual_rcvbuf} bytes"
|
||||||
|
)
|
||||||
|
if actual_rcvbuf < desired_rcvbuf:
|
||||||
|
# WARNING if OS limited the size
|
||||||
|
logging.warning(
|
||||||
|
f"{log_prefix} OS capped the receive buffer size lower than requested."
|
||||||
|
)
|
||||||
|
except OSError as buf_e:
|
||||||
|
# ERROR if setting buffer fails (might indicate permissions or OS limits issue)
|
||||||
|
logging.error(
|
||||||
|
f"{log_prefix} Failed to set receive buffer size: {buf_e}. Check OS limits (e.g., sysctl net.core.rmem_max)."
|
||||||
|
)
|
||||||
|
except Exception as buf_e:
|
||||||
|
# Keep EXCEPTION for unexpected errors during buffer setting
|
||||||
|
logging.exception(
|
||||||
|
f"{log_prefix} Unexpected error setting receive buffer: {buf_e}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# --- Bind the Socket ---
|
||||||
|
logging.debug(f"{log_prefix} Binding socket to ('{local_ip}', {local_port})...")
|
||||||
|
sock.bind((local_ip, local_port))
|
||||||
|
# INFO for successful bind confirmation
|
||||||
|
logging.info(
|
||||||
|
f"{log_prefix} UDP socket created and bound successfully to {local_ip}:{local_port}"
|
||||||
|
)
|
||||||
|
return sock
|
||||||
|
|
||||||
|
except socket.error as e:
|
||||||
|
# ERROR for critical socket creation/binding failure
|
||||||
|
logging.error(
|
||||||
|
f"{log_prefix} Error creating/binding UDP socket for {local_ip}:{local_port}: {e}"
|
||||||
|
)
|
||||||
|
return None
|
||||||
|
except Exception as e:
|
||||||
|
# Keep EXCEPTION for any other unexpected errors
|
||||||
|
logging.exception(
|
||||||
|
f"{log_prefix} Unexpected error during UDP socket creation for {local_ip}:{local_port}: {e}"
|
||||||
|
)
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def receive_udp_packet(sock, buffer_size=65535):
|
||||||
|
"""
|
||||||
|
Receives a single UDP packet from the specified socket.
|
||||||
|
Note: Currently unused by the UdpReceiver class which uses sock.recvfrom directly.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
sock (socket.socket): The UDP socket to receive from.
|
||||||
|
buffer_size (int): The maximum buffer size for receiving data.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
tuple: A tuple containing (data, sender_address), or (None, None) on error.
|
||||||
|
"""
|
||||||
|
log_prefix = "[Network]" # Standard prefix
|
||||||
|
# DEBUG for function call (if used)
|
||||||
|
logging.debug(
|
||||||
|
f"{log_prefix} Attempting to receive UDP packet (buffer size: {buffer_size})."
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
data, addr = sock.recvfrom(buffer_size)
|
||||||
|
logging.debug(f"{log_prefix} Received {len(data)} bytes from {addr}.")
|
||||||
|
return data, addr
|
||||||
|
except socket.timeout:
|
||||||
|
# DEBUG for expected timeout (if timeout is set on socket)
|
||||||
|
logging.debug(f"{log_prefix} Socket recvfrom timeout.")
|
||||||
|
return None, None
|
||||||
|
except socket.error as e:
|
||||||
|
# ERROR for socket errors during receive
|
||||||
|
logging.error(f"{log_prefix} Error receiving UDP packet: {e}")
|
||||||
|
return None, None
|
||||||
|
except Exception as e:
|
||||||
|
# Keep EXCEPTION for unexpected errors
|
||||||
|
logging.exception(f"{log_prefix} Unexpected error receiving UDP packet: {e}")
|
||||||
|
return None, None
|
||||||
|
|
||||||
|
|
||||||
|
def close_udp_socket(sock):
|
||||||
|
"""
|
||||||
|
Closes the specified UDP socket gracefully.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
sock (socket.socket): The UDP socket object to close.
|
||||||
|
"""
|
||||||
|
log_prefix = "[Network]" # Standard prefix
|
||||||
|
# Check if socket is valid before attempting close
|
||||||
|
if sock and hasattr(sock, "fileno") and sock.fileno() != -1:
|
||||||
|
try:
|
||||||
|
# INFO for significant action: closing the main socket
|
||||||
|
logging.info(f"{log_prefix} Closing UDP socket (fd={sock.fileno()})...")
|
||||||
|
sock.close()
|
||||||
|
logging.info(f"{log_prefix} UDP socket closed successfully.")
|
||||||
|
except socket.error as e:
|
||||||
|
# ERROR if closing fails
|
||||||
|
logging.error(f"{log_prefix} Error closing UDP socket: {e}")
|
||||||
|
except Exception as e:
|
||||||
|
# Keep EXCEPTION for unexpected errors
|
||||||
|
logging.exception(f"{log_prefix} Unexpected error closing UDP socket: {e}")
|
||||||
|
else:
|
||||||
|
# DEBUG if socket is already closed or invalid
|
||||||
|
logging.debug(
|
||||||
|
f"{log_prefix} Socket close requested, but socket was already closed or invalid."
|
||||||
|
)
|
||||||
Loading…
Reference in New Issue
Block a user