aggiunto l'invio del comando tgtinit con i parametri corretti.

This commit is contained in:
VALLONGOL 2025-10-20 14:09:01 +02:00
parent 9c89bf92a8
commit f23c522f60
8 changed files with 706 additions and 927 deletions

BIN
SUM7056227 Rev. A.pdf Normal file

Binary file not shown.

View File

@ -27,6 +27,8 @@ def build_tgtinit(target: Target) -> str:
f"{target.current_altitude_ft:.2f}", f"{target.current_altitude_ft:.2f}",
] ]
qualifiers = ["/s" if target.active else "/-s", "/t" if target.traceable else "/-t"] qualifiers = ["/s" if target.active else "/-s", "/t" if target.traceable else "/-t"]
if hasattr(target, "restart") and getattr(target, "restart", False):
qualifiers.append("/r")
command_parts = ["tgtinit"] + [str(p) for p in params] + qualifiers command_parts = ["tgtinit"] + [str(p) for p in params] + qualifiers
full_command = " ".join(command_parts) full_command = " ".join(command_parts)

View File

@ -62,6 +62,7 @@ class Target:
trajectory: List[Waypoint] = field(default_factory=list) trajectory: List[Waypoint] = field(default_factory=list)
active: bool = True active: bool = True
traceable: bool = True traceable: bool = True
restart: bool = False
use_spline: bool = False use_spline: bool = False
current_range_nm: float = field(init=False, default=0.0) current_range_nm: float = field(init=False, default=0.0)
current_azimuth_deg: float = field(init=False, default=0.0) current_azimuth_deg: float = field(init=False, default=0.0)

View File

@ -1,13 +1,19 @@
"""Clean SFP transport implementation with detailed logging. # target_simulator/core/sfp_transport.py
Handles UDP receive loop, SFP header parsing, fragment reassembly, ACK """
generation, and hands completed payloads to registered handlers. Provides a reusable transport layer for the Simple Fragmentation Protocol (SFP).
This module handles UDP socket communication, SFP header parsing, fragment
reassembly, ACK generation, and payload sending. It is application-agnostic
and uses a callback/handler system to pass fully reassembled payloads to the
application layer based on the SFP_FLOW identifier.
""" """
import socket import socket
import logging import logging
import threading import threading
import time import time
import ctypes
from typing import Dict, Callable, Optional from typing import Dict, Callable, Optional
from target_simulator.utils.network import create_udp_socket, close_udp_socket from target_simulator.utils.network import create_udp_socket, close_udp_socket
@ -15,14 +21,16 @@ from target_simulator.core.sfp_structures import SFPHeader
PayloadHandler = Callable[[bytearray], None] PayloadHandler = Callable[[bytearray], None]
LOG_LEVEL: Optional[int] = logging.INFO
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
if LOG_LEVEL is not None:
logger.setLevel(LOG_LEVEL)
class SfpTransport: class SfpTransport:
"""Manages SFP communication and payload reassembly.""" """Manages SFP communication, payload reassembly, and sending."""
# Max size for a script payload, conservative to fit in server buffers.
MAX_SCRIPT_PAYLOAD_SIZE = 1020
# Size of the fixed part of the script message (the DataTag).
SCRIPT_TAG_SIZE = 8
def __init__( def __init__(
self, self,
@ -44,6 +52,8 @@ class SfpTransport:
self._socket: Optional[socket.socket] = None self._socket: Optional[socket.socket] = None
self._receiver_thread: Optional[threading.Thread] = None self._receiver_thread: Optional[threading.Thread] = None
self._stop_event = threading.Event() self._stop_event = threading.Event()
self._tid_counter = 0 # Simple transaction ID counter for sending
self._send_lock = threading.Lock() # Protects TID counter and socket sending
# transaction state: key=(flow, tid) -> {frag_index: total_frags} # transaction state: key=(flow, tid) -> {frag_index: total_frags}
self._fragments: Dict[tuple, Dict[int, int]] = {} self._fragments: Dict[tuple, Dict[int, int]] = {}
@ -57,6 +67,7 @@ class SfpTransport:
logger.debug(f"{self._log_prefix} ACK window config: {self._ack_config}") logger.debug(f"{self._log_prefix} ACK window config: {self._ack_config}")
def start(self) -> bool: def start(self) -> bool:
"""Starts the receiving thread."""
if self._receiver_thread is not None and self._receiver_thread.is_alive(): if self._receiver_thread is not None and self._receiver_thread.is_alive():
logger.warning( logger.warning(
f"{self._log_prefix} Start called, but receiver is already running." f"{self._log_prefix} Start called, but receiver is already running."
@ -78,6 +89,7 @@ class SfpTransport:
return True return True
def shutdown(self): def shutdown(self):
"""Stops the receiving thread and closes the socket."""
self._stop_event.set() self._stop_event.set()
if self._socket: if self._socket:
@ -94,6 +106,120 @@ class SfpTransport:
logger.info(f"{self._log_prefix} Shutdown complete.") logger.info(f"{self._log_prefix} Shutdown complete.")
def send_script_command(
self, command_string: str, destination: tuple, flow_id: int = ord("R")
) -> bool:
"""
Encapsulates and sends a text command as a single-fragment SFP packet.
This method constructs a payload similar to the server's `script_message_t`,
wraps it in an SFP header, and sends it to the specified destination.
Args:
command_string: The text command to send (e.g., "tgtinit ...").
destination: A tuple (ip, port) for the destination.
flow_id: The SFP flow ID to use for this message.
Returns:
True if the packet was sent successfully, False otherwise.
"""
log_prefix = f"{self._log_prefix} Send"
if not self._socket:
logger.error(f"{log_prefix} Cannot send: socket is not open.")
return False
# Define the expected payload structures using ctypes
# The server expects script_payload_t which contains a ctrl_tag and ctrl[32]
# before the script_tag, so replicate that layout here.
class LocalDataTag(ctypes.Structure):
_pack_ = 1
_fields_ = [
("ID", ctypes.c_uint8 * 2),
("VALID", ctypes.c_uint8),
("VERSION", ctypes.c_uint8),
("SIZE", ctypes.c_uint32),
]
class ScriptPayload(ctypes.Structure):
_pack_ = 1
_fields_ = [
("ctrl_tag", LocalDataTag),
("ctrl", ctypes.c_uint32 * 32),
("script_tag", LocalDataTag),
("script", ctypes.c_uint8 * self.MAX_SCRIPT_PAYLOAD_SIZE),
]
try:
# Normalize command: ensure it starts with '$' (no blank after $) and ends with newline
cs = command_string or ""
cs = cs.strip()
#if cs and not cs.startswith("$"):
# cs = "$" + cs.lstrip()
if cs and not cs.endswith("\n"):
cs = cs + "\n"
# (no transformation) send cs as-is; server-side now accepts spaces
command_bytes = cs.encode("utf-8")
if len(command_bytes) > self.MAX_SCRIPT_PAYLOAD_SIZE:
logger.error(
f"{log_prefix} Command is too long ({len(command_bytes)} bytes). "
f"Max is {self.MAX_SCRIPT_PAYLOAD_SIZE}."
)
return False
# Create and populate the script payload structure
payload_struct = ScriptPayload()
payload_struct.script_tag.ID[0] = ord("C")
payload_struct.script_tag.ID[1] = ord("S")
payload_struct.script_tag.VALID = 1
payload_struct.script_tag.VERSION = 1
payload_struct.script_tag.SIZE = len(command_bytes)
ctypes.memmove(
payload_struct.script, command_bytes, len(command_bytes)
)
payload_bytes = bytes(payload_struct)
# Compute the offset of the script buffer within the payload structure
try:
script_offset = ScriptPayload.script.offset
except Exception:
# Fallback: assume script tag only (legacy)
script_offset = self.SCRIPT_TAG_SIZE
actual_payload_size = script_offset + len(command_bytes)
payload_bytes = payload_bytes[:actual_payload_size]
# Create and populate the SFP header
header = SFPHeader()
with self._send_lock:
self._tid_counter = (self._tid_counter + 1) % 256
header.SFP_TID = self._tid_counter
header.SFP_DIRECTION = ord(">")
header.SFP_FLOW = flow_id
header.SFP_TOTFRGAS = 1 # Single fragment message
header.SFP_FRAG = 0
header.SFP_PLSIZE = len(payload_bytes)
header.SFP_TOTSIZE = len(payload_bytes)
header.SFP_PLOFFSET = 0
full_packet = bytes(header) + payload_bytes
# Send the packet
self._socket.sendto(full_packet, destination)
# Log the actual normalized command that was placed into the payload
try:
sent_preview = cs if isinstance(cs, str) else cs.decode('utf-8', errors='replace')
except Exception:
sent_preview = repr(cs)
logger.info(
f"{log_prefix} Sent command to {destination} (TID: {header.SFP_TID}): {sent_preview!r}"
)
return True
except Exception as e:
logger.exception(f"{log_prefix} Failed to send script command.")
return False
def _receive_loop(self): def _receive_loop(self):
log_prefix = f"{self._log_prefix} Loop" log_prefix = f"{self._log_prefix} Loop"
logger.info(f"{log_prefix} Starting receive loop.") logger.info(f"{log_prefix} Starting receive loop.")
@ -107,18 +233,13 @@ class SfpTransport:
data, addr = self._socket.recvfrom(65535) data, addr = self._socket.recvfrom(65535)
if not data: if not data:
continue continue
try: if self._raw_packet_callback:
if self._raw_packet_callback: self._raw_packet_callback(data, addr)
self._raw_packet_callback(data, addr)
except Exception:
logger.exception(
f"{log_prefix} raw_packet_callback raised an exception"
)
except socket.timeout: except socket.timeout:
continue continue
except OSError: except OSError as e:
if not self._stop_event.is_set(): if not self._stop_event.is_set():
logger.error(f"{log_prefix} Socket error.", exc_info=True) logger.error(f"{log_prefix} Socket error: {e}")
break break
except Exception: except Exception:
logger.exception(f"{log_prefix} Unexpected error in recvfrom.") logger.exception(f"{log_prefix} Unexpected error in recvfrom.")
@ -130,15 +251,11 @@ class SfpTransport:
logger.info(f"{log_prefix} Receive loop terminated.") logger.info(f"{log_prefix} Receive loop terminated.")
def _process_packet(self, data: bytes, addr: tuple): def _process_packet(self, data: bytes, addr: tuple):
"""Parse SFP packet, log details, and reassemble fragments.
Logging includes parsed header fields and a small payload preview.
The preview attempts JSON decode to detect text-based payloads; if
that fails the first bytes are logged in hex.
"""
header_size = SFPHeader.size() header_size = SFPHeader.size()
if len(data) < header_size: if len(data) < header_size:
logger.warning(f"Packet from {addr} is too small for SFP header. Ignoring.") logger.warning(
f"Packet from {addr} is too small for SFP header. Ignoring."
)
return return
try: try:
@ -147,7 +264,6 @@ class SfpTransport:
logger.error(f"Failed to parse SFP header from {addr}. Ignoring.") logger.error(f"Failed to parse SFP header from {addr}. Ignoring.")
return return
# Extract header fields
flow = header.SFP_FLOW flow = header.SFP_FLOW
tid = header.SFP_TID tid = header.SFP_TID
frag = header.SFP_FRAG frag = header.SFP_FRAG
@ -157,41 +273,18 @@ class SfpTransport:
total_size = header.SFP_TOTSIZE total_size = header.SFP_TOTSIZE
flags = header.SFP_FLAGS flags = header.SFP_FLAGS
try:
flow_name = chr(flow) if 32 <= flow < 127 else str(flow)
except Exception:
flow_name = str(flow)
# Payload preview for logging
payload_preview = data[header_size : header_size + 256]
try:
import json
json.loads(payload_preview.decode("utf-8", errors="strict"))
payload_preview_text = "JSON (preview)"
except Exception:
payload_preview_text = "Hex preview: " + " ".join(
f"{b:02X}" for b in payload_preview[:64]
)
logger.info(
f"{self._log_prefix} Packet from {addr} - flow={flow_name} ({flow}), tid={tid}, flags=0x{flags:X}, frag={frag}/{total_frags}, pl_size={pl_size}, pl_offset={pl_offset}, total_size={total_size}. {payload_preview_text}"
)
key = (flow, tid) key = (flow, tid)
# If sender requested an ACK bit in flags, reply
if header.SFP_FLAGS & 0x01: if header.SFP_FLAGS & 0x01:
self._send_ack(addr, data[:header_size]) self._send_ack(addr, data[:header_size])
# Basic validation
if total_frags == 0 or total_frags > 60000 or total_size <= 0: if total_frags == 0 or total_frags > 60000 or total_size <= 0:
logger.warning( logger.warning(
f"Invalid metadata for {key}: total_frags={total_frags}, total_size={total_size}. Ignoring." f"Invalid metadata for {key}: total_frags={total_frags}, "
f"total_size={total_size}. Ignoring."
) )
return return
# Start a new transaction when frag==0
if frag == 0: if frag == 0:
self._cleanup_lingering_transactions(flow, tid) self._cleanup_lingering_transactions(flow, tid)
logger.debug( logger.debug(
@ -223,288 +316,9 @@ class SfpTransport:
) )
return return
# Copy into buffer self._buffers[key][pl_offset : pl_offset + bytes_to_copy] = payload[
self._buffers[key][pl_offset : pl_offset + bytes_to_copy] = payload[:bytes_to_copy] :bytes_to_copy
# If all fragments received, hand off to handler
if len(self._fragments[key]) == total_frags:
logger.debug(
f"Transaction complete for key={key}. Handing off to application layer."
)
completed_payload = self._buffers.pop(key)
self._fragments.pop(key)
handler = self._payload_handlers.get(flow)
if handler:
try:
handler(completed_payload)
except Exception:
logger.exception(
f"Error executing payload handler for flow {flow}."
)
else:
logger.warning(f"No payload handler registered for flow ID {flow}.")
def _send_ack(self, dest_addr: tuple, original_header_bytes: bytes):
log_prefix = f"{self._log_prefix} ACK"
if not self._socket:
return
try:
ack_header = bytearray(original_header_bytes)
flow = ack_header[SFPHeader.get_field_offset("SFP_FLOW")]
window_size = self._ack_config.get(flow, 0)
ack_header[SFPHeader.get_field_offset("SFP_DIRECTION")] = 0x3C # '<'
ack_header[SFPHeader.get_field_offset("SFP_WIN")] = window_size
original_flags = ack_header[SFPHeader.get_field_offset("SFP_FLAGS")]
ack_header[SFPHeader.get_field_offset("SFP_FLAGS")] = (
original_flags | 0x01
) & ~0x02
self._socket.sendto(ack_header, dest_addr)
logger.debug(
f"{log_prefix} Sent to {dest_addr} for flow {chr(flow) if 32<=flow<=126 else flow} with WIN={window_size}."
)
except Exception:
logger.exception(f"{log_prefix} Failed to send to {dest_addr}.")
def _cleanup_lingering_transactions(self, current_flow: int, current_tid: int):
keys_to_remove = [
key
for key in self._fragments
if key[0] == current_flow and key[1] != current_tid
] ]
for key in keys_to_remove:
logger.warning(
f"Cleaning up lingering/incomplete transaction for key={key}."
)
self._fragments.pop(key, None)
self._buffers.pop(key, None)
"""
Provides a reusable transport layer for the Simple Fragmentation Protocol (SFP).
This module handles UDP socket communication, SFP header parsing, fragment
reassembly, and ACK generation. It is application-agnostic and uses a
callback/handler system to pass fully reassembled payloads to the
application layer based on the SFP_FLOW identifier.
"""
"""
SFP transport layer (clean, single definition).
"""
import socket
import logging
import threading
import time
from typing import Dict, Callable, Optional
from target_simulator.utils.network import create_udp_socket, close_udp_socket
from target_simulator.core.sfp_structures import SFPHeader
PayloadHandler = Callable[[bytearray], None]
LOG_LEVEL: Optional[int] = logging.INFO
logger = logging.getLogger(__name__)
if LOG_LEVEL is not None:
logger.setLevel(LOG_LEVEL)
class SfpTransport:
"""Manages SFP communication and payload reassembly."""
def __init__(
self,
host: str,
port: int,
payload_handlers: Dict[int, PayloadHandler],
ack_config: Optional[Dict[int, int]] = None,
raw_packet_callback: Optional[Callable[[bytes, tuple], None]] = None,
):
self._log_prefix = "[SfpTransport]"
logger.info(f"{self._log_prefix} Initializing for {host}:{port}...")
self._host = host
self._port = port
self._payload_handlers = payload_handlers
self._ack_config = ack_config if ack_config is not None else {}
self._raw_packet_callback = raw_packet_callback
self._socket: Optional[socket.socket] = None
self._receiver_thread: Optional[threading.Thread] = None
self._stop_event = threading.Event()
self._fragments: Dict[tuple, Dict[int, int]] = {}
self._buffers: Dict[tuple, bytearray] = {}
logger.debug(
f"{self._log_prefix} Registered handlers for flows: "
f"{[chr(k) if 32 <= k <= 126 else k for k in self._payload_handlers.keys()]}"
)
logger.debug(f"{self._log_prefix} ACK window config: {self._ack_config}")
def start(self) -> bool:
if self._receiver_thread is not None and self._receiver_thread.is_alive():
logger.warning(
f"{self._log_prefix} Start called, but receiver is already running."
)
return True
self._socket = create_udp_socket(self._host, self._port)
if not self._socket:
logger.critical(
f"{self._log_prefix} Failed to create and bind socket. Cannot start."
)
return False
self._stop_event.clear()
self._receiver_thread = threading.Thread(
target=self._receive_loop, name="SfpTransportThread", daemon=True
)
self._receiver_thread.start()
return True
def shutdown(self):
self._stop_event.set()
if self._socket:
close_udp_socket(self._socket)
self._socket = None
if self._receiver_thread and self._receiver_thread.is_alive():
logger.debug(f"{self._log_prefix} Waiting for receiver thread to join...")
self._receiver_thread.join(timeout=2.0)
if self._receiver_thread.is_alive():
logger.warning(
f"{self._log_prefix} Receiver thread did not join cleanly."
)
logger.info(f"{self._log_prefix} Shutdown complete.")
def _receive_loop(self):
log_prefix = f"{self._log_prefix} Loop"
logger.info(f"{log_prefix} Starting receive loop.")
while not self._stop_event.is_set():
if not self._socket:
logger.error(f"{log_prefix} Socket is not available. Stopping loop.")
break
try:
data, addr = self._socket.recvfrom(65535)
if not data:
continue
try:
if self._raw_packet_callback:
self._raw_packet_callback(data, addr)
except Exception:
logger.exception(
f"{log_prefix} raw_packet_callback raised an exception"
)
except socket.timeout:
continue
except OSError:
if not self._stop_event.is_set():
logger.error(f"{log_prefix} Socket error.", exc_info=True)
break
except Exception:
logger.exception(f"{log_prefix} Unexpected error in recvfrom.")
time.sleep(0.01)
continue
self._process_packet(data, addr)
logger.info(f"{log_prefix} Receive loop terminated.")
def _process_packet(self, data: bytes, addr: tuple):
header_size = SFPHeader.size()
if len(data) < header_size:
logger.warning(f"Packet from {addr} is too small for SFP header. Ignoring.")
return
try:
header = SFPHeader.from_buffer_copy(data)
except (ValueError, TypeError):
logger.error(f"Failed to parse SFP header from {addr}. Ignoring.")
return
# Extract fields
flow = header.SFP_FLOW
tid = header.SFP_TID
frag = header.SFP_FRAG
total_frags = header.SFP_TOTFRGAS
pl_size = header.SFP_PLSIZE
pl_offset = header.SFP_PLOFFSET
total_size = header.SFP_TOTSIZE
flags = header.SFP_FLAGS
try:
flow_name = chr(flow) if 32 <= flow < 127 else str(flow)
except Exception:
flow_name = str(flow)
# Preview payload for logging
payload_preview = data[header_size : header_size + 256]
try:
import json
json.loads(payload_preview.decode("utf-8", errors="strict"))
payload_preview_text = "JSON (preview)"
except Exception:
payload_preview_text = "Hex preview: " + " ".join(
f"{b:02X}" for b in payload_preview[:64]
)
logger.debug(
f"{self._log_prefix} Packet from {addr} - flow={flow_name} ({flow}), tid={tid}, flags=0x{flags:X}, frag={frag}/{total_frags}, pl_size={pl_size}, pl_offset={pl_offset}, total_size={total_size}. {payload_preview_text}"
)
key = (flow, tid)
if header.SFP_FLAGS & 0x01:
self._send_ack(addr, data[:header_size])
if total_frags == 0 or total_frags > 60000 or total_size <= 0:
logger.warning(
f"Invalid metadata for {key}: total_frags={total_frags}, total_size={total_size}. Ignoring."
)
return
if frag == 0:
self._cleanup_lingering_transactions(flow, tid)
logger.debug(
f"New transaction started for key={key}. Total size: {total_size} bytes."
)
self._fragments[key] = {}
try:
self._buffers[key] = bytearray(total_size)
except (MemoryError, ValueError):
logger.error(
f"Failed to allocate {total_size} bytes for key={key}. Ignoring transaction."
)
self._fragments.pop(key, None)
return
if key not in self._buffers or key not in self._fragments:
logger.debug(
f"Ignoring fragment {frag} for untracked transaction key={key}."
)
return
self._fragments[key][frag] = total_frags
payload = data[header_size:]
bytes_to_copy = min(pl_size, len(payload))
if (pl_offset + bytes_to_copy) > len(self._buffers[key]):
logger.error(
f"Payload for key={key}, frag={frag} would overflow buffer. Ignoring."
)
return
self._buffers[key][pl_offset : pl_offset + bytes_to_copy] = payload[:bytes_to_copy]
if len(self._fragments[key]) == total_frags: if len(self._fragments[key]) == total_frags:
logger.debug( logger.debug(
@ -533,10 +347,9 @@ class SfpTransport:
try: try:
ack_header = bytearray(original_header_bytes) ack_header = bytearray(original_header_bytes)
flow = ack_header[SFPHeader.get_field_offset("SFP_FLOW")] flow = ack_header[SFPHeader.get_field_offset("SFP_FLOW")]
window_size = self._ack_config.get(flow, 0) window_size = self._ack_config.get(flow, 0)
ack_header[SFPHeader.get_field_offset("SFP_DIRECTION")] = 0x3C # '<' ack_header[SFPHeader.get_field_offset("SFP_DIRECTION")] = ord("<")
ack_header[SFPHeader.get_field_offset("SFP_WIN")] = window_size ack_header[SFPHeader.get_field_offset("SFP_WIN")] = window_size
original_flags = ack_header[SFPHeader.get_field_offset("SFP_FLAGS")] original_flags = ack_header[SFPHeader.get_field_offset("SFP_FLAGS")]
ack_header[SFPHeader.get_field_offset("SFP_FLAGS")] = ( ack_header[SFPHeader.get_field_offset("SFP_FLAGS")] = (
@ -544,18 +357,15 @@ class SfpTransport:
) & ~0x02 ) & ~0x02
self._socket.sendto(ack_header, dest_addr) self._socket.sendto(ack_header, dest_addr)
flow_char = chr(flow) if 32 <= flow <= 126 else flow
logger.debug( logger.debug(
f"{log_prefix} Sent to {dest_addr} for flow {chr(flow) if 32<=flow<=126 else flow} with WIN={window_size}." f"{log_prefix} Sent to {dest_addr} for flow {flow_char} with WIN={window_size}."
) )
except Exception: except Exception:
logger.exception(f"{log_prefix} Failed to send to {dest_addr}.") logger.exception(f"{log_prefix} Failed to send to {dest_addr}.")
def _cleanup_lingering_transactions(self, current_flow: int, current_tid: int): def _cleanup_lingering_transactions(self, current_flow: int, current_tid: int):
"""Remove old, incomplete transactions for the same flow but different TID. """Remove old, incomplete transactions for the same flow."""
This prevents buffers from previous transactions (same flow) from
interfering with a newly started transaction.
"""
keys_to_remove = [ keys_to_remove = [
key key
for key in list(self._fragments.keys()) for key in list(self._fragments.keys())
@ -566,4 +376,4 @@ class SfpTransport:
f"Cleaning up lingering/incomplete transaction for key={key}." f"Cleaning up lingering/incomplete transaction for key={key}."
) )
self._fragments.pop(key, None) self._fragments.pop(key, None)
self._buffers.pop(key, None) self._buffers.pop(key, None)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,57 @@
import sys
from pathlib import Path
p = Path('SUM7056227 Rev. A.pdf')
if not p.exists():
print('PDF not found at', p)
sys.exit(2)
# Try multiple PDF libraries
reader = None
try:
from pypdf import PdfReader
reader = PdfReader(str(p))
except Exception:
try:
import PyPDF2
reader = PyPDF2.PdfReader(str(p))
except Exception as e:
print('No suitable PDF reader installed:', e)
sys.exit(3)
text = []
for i,pg in enumerate(reader.pages):
try:
t = pg.extract_text() or ''
except Exception:
t = ''
text.append(t)
full = '\n'.join(text)
# search for relevant keywords
keywords = ['tgtinit', 'tgtset', 'tgtreset', 'command', 'parameters', 'format']
found = False
for kw in keywords:
idx = full.lower().find(kw)
if idx != -1:
found = True
start = max(0, idx-200)
end = min(len(full), idx+400)
ctx = full[start:end]
print('\n--- context around "{}" ---\n'.format(kw))
print(ctx)
if not found:
# fallback: print first 3000 chars for manual inspection
print('\n--- No keywords found; printing first 3000 chars of PDF text ---\n')
print(full[:3000])
sys.exit(0)
else:
# Also print the specific pages around TOC entries (38-41) for clarity
print('\n--- Explicitly printing pages 38-41 ---\n')
for pi in range(max(0, 38-1), min(len(reader.pages), 41)):
print(f'--- PAGE {pi+1} ---\n')
try:
print(reader.pages[pi].extract_text() or '')
except Exception as e:
print('ERROR extracting page', pi+1, e)
sys.exit(0)

64
tools/udp_test_send.py Normal file
View File

@ -0,0 +1,64 @@
import threading
import socket
import time
import sys
import os
# Ensure project root on PYTHONPATH when running from terminal if needed
# This script assumes you run it with PYTHONPATH set to the project root.
from target_simulator.core.sfp_transport import SfpTransport
LISTEN_IP = '127.0.0.1'
LISTEN_PORT = 60001
CLIENT_BIND_PORT = 60002
received = []
def listener():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind((LISTEN_IP, LISTEN_PORT))
print(f"Listener bound on {LISTEN_IP}:{LISTEN_PORT}")
s.settimeout(5.0)
try:
data, addr = s.recvfrom(65535)
print(f"Listener received {len(data)} bytes from {addr}")
print(data[:200])
received.append((data, addr))
except socket.timeout:
print("Listener timed out waiting for data")
finally:
s.close()
# Start listener thread
lt = threading.Thread(target=listener, daemon=True)
lt.start()
# Give listener a moment
time.sleep(0.2)
# Create transport bound to CLIENT_BIND_PORT and send a command
transport = SfpTransport(host=LISTEN_IP, port=CLIENT_BIND_PORT, payload_handlers={}, raw_packet_callback=None)
if not transport.start():
print("Failed to start SfpTransport (bind failed?)")
sys.exit(1)
cmd = 'tgtreset -1'
print(f"Sending command: {cmd}")
sent = transport.send_script_command(cmd, (LISTEN_IP, LISTEN_PORT))
print(f"send_script_command returned: {sent}")
# Wait for listener
lt.join(timeout=6.0)
transport.shutdown()
if received:
print('SUCCESS: packet received by listener')
data, addr = received[0]
# Try to display a small header slice
print('raw bytes (first 80):', data[:80])
sys.exit(0)
else:
print('FAIL: no packet received')
sys.exit(2)

60
tools/udp_test_send2.py Normal file
View File

@ -0,0 +1,60 @@
import threading
import socket
import time
import sys
from target_simulator.core.sfp_transport import SfpTransport
LISTEN_IP = '127.0.0.1'
LISTEN_PORT = 60110
# Use port 0 to let the OS pick a free ephemeral port to avoid bind conflicts during tests
CLIENT_BIND_PORT = 0
received = []
def listener(listen_ip, listen_port):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.bind((listen_ip, listen_port))
except Exception as e:
print(f"Listener bind failed for {listen_ip}:{listen_port}: {e}")
return
print(f"Listener bound on {listen_ip}:{listen_port}")
s.settimeout(5.0)
try:
data, addr = s.recvfrom(65535)
print(f"Listener received {len(data)} bytes from {addr}")
print(data[:256])
received.append((data, addr))
except socket.timeout:
print("Listener timed out waiting for data")
finally:
s.close()
if __name__ == '__main__':
lt = threading.Thread(target=listener, args=(LISTEN_IP, LISTEN_PORT), daemon=True)
lt.start()
time.sleep(0.1)
transport = SfpTransport(host=LISTEN_IP, port=CLIENT_BIND_PORT, payload_handlers={}, raw_packet_callback=None)
ok = transport.start()
print(f"Transport started: {ok}")
time.sleep(0.05)
cmd = 'tgtreset -1'
print(f"Sending command: {cmd} to {(LISTEN_IP, LISTEN_PORT)}")
sent = transport.send_script_command(cmd, (LISTEN_IP, LISTEN_PORT))
print(f"send_script_command returned: {sent}")
lt.join(timeout=6.0)
transport.shutdown()
if received:
print('SUCCESS: packet received by listener')
data, addr = received[0]
print('raw bytes (first 80):', data[:80])
sys.exit(0)
else:
print('FAIL: no packet received')
sys.exit(2)