Introdotto monivmento ownship nella visualizzazione e nel calcolo delle posizioni.
This commit is contained in:
parent
9041cd3537
commit
0de5beb93b
@ -20,7 +20,7 @@
|
|||||||
"port": 60013,
|
"port": 60013,
|
||||||
"local_port": 60012,
|
"local_port": 60012,
|
||||||
"use_json_protocol": true,
|
"use_json_protocol": true,
|
||||||
"prediction_offset_ms": 30.0
|
"prediction_offset_ms": 20.0
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"lru": {
|
"lru": {
|
||||||
|
|||||||
@ -8,44 +8,45 @@ from typing import Dict, List, Any, Tuple, Optional
|
|||||||
|
|
||||||
from target_simulator.core.models import Scenario
|
from target_simulator.core.models import Scenario
|
||||||
|
|
||||||
# Definisci la struttura per uno stato registrato
|
# Define the structure for a recorded state
|
||||||
RecordedState = Tuple[float, float, float, float] # (timestamp, x_ft, y_ft, z_ft)
|
RecordedState = Tuple[float, float, float, float] # (timestamp, x_ft, y_ft, z_ft)
|
||||||
|
|
||||||
|
|
||||||
class SimulationArchive:
|
class SimulationArchive:
|
||||||
"""
|
"""
|
||||||
Gestisce la raccolta dei dati per una singola esecuzione di simulazione e la salva su file.
|
Manages data collection for a single simulation run and saves it to a file.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
ARCHIVE_FOLDER = "archive_simulations"
|
ARCHIVE_FOLDER = "archive_simulations"
|
||||||
|
|
||||||
def __init__(self, scenario: Scenario):
|
def __init__(self, scenario: Scenario):
|
||||||
"""
|
"""
|
||||||
Inizializza una nuova sessione di archivio per un dato scenario.
|
Initializes a new archive session for a given scenario.
|
||||||
"""
|
"""
|
||||||
self.start_time = time.monotonic()
|
self.start_time = time.monotonic()
|
||||||
self.scenario_name = scenario.name
|
self.scenario_name = scenario.name
|
||||||
self.scenario_data = scenario.to_dict()
|
self.scenario_data = scenario.to_dict()
|
||||||
|
|
||||||
# Struttura dati per contenere gli eventi registrati, indicizzati per target_id
|
# Data structure to hold recorded events, indexed by target_id
|
||||||
# self.recorded_data[target_id]['simulated'] = [(ts, x, y, z), ...]
|
|
||||||
# self.recorded_data[target_id]['real'] = [(ts, x, y, z), ...]
|
|
||||||
self.recorded_data: Dict[int, Dict[str, List[RecordedState]]] = {}
|
self.recorded_data: Dict[int, Dict[str, List[RecordedState]]] = {}
|
||||||
|
|
||||||
|
# Data structure to hold the ownship's trajectory
|
||||||
|
self.ownship_trajectory: List[Dict[str, Any]] = []
|
||||||
|
|
||||||
self._ensure_archive_directory()
|
self._ensure_archive_directory()
|
||||||
|
|
||||||
def _ensure_archive_directory(self):
|
def _ensure_archive_directory(self):
|
||||||
"""Crea la directory principale dell'archivio se non esiste."""
|
"""Creates the main archive directory if it does not exist."""
|
||||||
if not os.path.exists(self.ARCHIVE_FOLDER):
|
if not os.path.exists(self.ARCHIVE_FOLDER):
|
||||||
try:
|
try:
|
||||||
os.makedirs(self.ARCHIVE_FOLDER)
|
os.makedirs(self.ARCHIVE_FOLDER)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
print(f"Errore nella creazione della directory di archivio: {e}")
|
print(f"Error creating archive directory: {e}")
|
||||||
|
|
||||||
def add_simulated_state(
|
def add_simulated_state(
|
||||||
self, target_id: int, timestamp: float, state: Tuple[float, ...]
|
self, target_id: int, timestamp: float, state: Tuple[float, ...]
|
||||||
):
|
):
|
||||||
"""Aggiunge uno stato simulato all'archivio."""
|
"""Adds a simulated state to the archive."""
|
||||||
if target_id not in self.recorded_data:
|
if target_id not in self.recorded_data:
|
||||||
self.recorded_data[target_id] = {"simulated": [], "real": []}
|
self.recorded_data[target_id] = {"simulated": [], "real": []}
|
||||||
|
|
||||||
@ -55,12 +56,21 @@ class SimulationArchive:
|
|||||||
def add_real_state(
|
def add_real_state(
|
||||||
self, target_id: int, timestamp: float, state: Tuple[float, ...]
|
self, target_id: int, timestamp: float, state: Tuple[float, ...]
|
||||||
):
|
):
|
||||||
"""Aggiunge uno stato reale (dal server) all'archivio."""
|
"""Adds a real state (from the server) to the archive."""
|
||||||
if target_id not in self.recorded_data:
|
if target_id not in self.recorded_data:
|
||||||
self.recorded_data[target_id] = {"simulated": [], "real": []}
|
self.recorded_data[target_id] = {"simulated": [], "real": []}
|
||||||
|
|
||||||
full_state: RecordedState = (timestamp, state[0], state[1], state[2])
|
full_state: RecordedState = (timestamp, state[0], state[1], state[2])
|
||||||
self.recorded_data[target_id]["real"].append(full_state)
|
self.recorded_data[target_id]["real"].append(full_state)
|
||||||
|
|
||||||
|
def add_ownship_state(self, state: Dict[str, Any]):
|
||||||
|
"""
|
||||||
|
Adds an ownship state sample to the archive's trajectory.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
state: A dictionary representing the ownship's state at a point in time.
|
||||||
|
"""
|
||||||
|
self.ownship_trajectory.append(state)
|
||||||
|
|
||||||
def save(self, extra_metadata: Optional[Dict[str, Any]] = None) -> str:
|
def save(self, extra_metadata: Optional[Dict[str, Any]] = None) -> str:
|
||||||
"""
|
"""
|
||||||
@ -89,6 +99,7 @@ class SimulationArchive:
|
|||||||
archive_content = {
|
archive_content = {
|
||||||
"metadata": metadata,
|
"metadata": metadata,
|
||||||
"scenario_definition": self.scenario_data,
|
"scenario_definition": self.scenario_data,
|
||||||
|
"ownship_trajectory": self.ownship_trajectory,
|
||||||
"simulation_results": self.recorded_data,
|
"simulation_results": self.recorded_data,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -106,4 +117,4 @@ class SimulationArchive:
|
|||||||
return filepath
|
return filepath
|
||||||
except IOError as e:
|
except IOError as e:
|
||||||
print(f"Error saving simulation archive: {e}")
|
print(f"Error saving simulation archive: {e}")
|
||||||
return ""
|
return ""
|
||||||
@ -9,7 +9,7 @@ import threading
|
|||||||
import math
|
import math
|
||||||
import logging
|
import logging
|
||||||
import time
|
import time
|
||||||
from typing import Dict, Deque, Tuple, Optional, List
|
from typing import Dict, Deque, Tuple, Optional, List, Any
|
||||||
|
|
||||||
# Module-level logger for this module
|
# Module-level logger for this module
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@ -40,6 +40,11 @@ class SimulationStateHub:
|
|||||||
# This is used to propagate headings received from external sources
|
# This is used to propagate headings received from external sources
|
||||||
# (e.g. RIS payloads) without modifying the canonical stored position
|
# (e.g. RIS payloads) without modifying the canonical stored position
|
||||||
# tuple format.
|
# tuple format.
|
||||||
|
|
||||||
|
# --- Ownship State ---
|
||||||
|
# Stores the absolute state of the ownship platform.
|
||||||
|
self._ownship_state: Dict[str, Any] = {}
|
||||||
|
|
||||||
self._latest_real_heading = {}
|
self._latest_real_heading = {}
|
||||||
# Also keep the raw value as received (for debug/correlation)
|
# Also keep the raw value as received (for debug/correlation)
|
||||||
self._latest_raw_heading = {}
|
self._latest_raw_heading = {}
|
||||||
@ -350,9 +355,12 @@ class SimulationStateHub:
|
|||||||
"""Clears all stored data for all targets."""
|
"""Clears all stored data for all targets."""
|
||||||
with self._lock:
|
with self._lock:
|
||||||
self._target_data.clear()
|
self._target_data.clear()
|
||||||
|
self._ownship_state.clear()
|
||||||
# also clear heading caches
|
# also clear heading caches
|
||||||
self._latest_real_heading.clear()
|
self._latest_real_heading.clear()
|
||||||
self._latest_raw_heading.clear()
|
self._latest_raw_heading.clear()
|
||||||
|
self._antenna_azimuth_deg = None
|
||||||
|
self._antenna_azimuth_ts = None
|
||||||
|
|
||||||
def _initialize_target(self, target_id: int):
|
def _initialize_target(self, target_id: int):
|
||||||
"""Internal helper to create the data structure for a new target."""
|
"""Internal helper to create the data structure for a new target."""
|
||||||
@ -414,3 +422,29 @@ class SimulationStateHub:
|
|||||||
except Exception:
|
except Exception:
|
||||||
# Silently ignore errors to preserve hub stability
|
# Silently ignore errors to preserve hub stability
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
def set_ownship_state(self, state: Dict[str, Any]):
|
||||||
|
"""
|
||||||
|
Updates the ownship's absolute state.
|
||||||
|
|
||||||
|
This method is thread-safe. The provided state dictionary is merged
|
||||||
|
with the existing state.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
state: A dictionary containing ownship state information, e.g.,
|
||||||
|
{'position_xy_ft': (x, y), 'heading_deg': 90.0}.
|
||||||
|
"""
|
||||||
|
with self._lock:
|
||||||
|
self._ownship_state.update(state)
|
||||||
|
|
||||||
|
def get_ownship_state(self) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Retrieves a copy of the ownship's current absolute state.
|
||||||
|
|
||||||
|
This method is thread-safe.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A dictionary containing the last known state of the ownship.
|
||||||
|
"""
|
||||||
|
with self._lock:
|
||||||
|
return self._ownship_state.copy()
|
||||||
|
|||||||
@ -30,3 +30,9 @@ DEBUG_CONFIG = {
|
|||||||
"io_trace_sent_filename": "sent_positions.csv",
|
"io_trace_sent_filename": "sent_positions.csv",
|
||||||
"io_trace_received_filename": "received_positions.csv",
|
"io_trace_received_filename": "received_positions.csv",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
PROTOCOL_CONFIG = {
|
||||||
|
"json_float_precision": 4, # Number of decimal places for floats in JSON payloads
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -617,10 +617,18 @@ class MainView(tk.Tk):
|
|||||||
self.ppi_widget.update_real_targets(display_data.get("real", []))
|
self.ppi_widget.update_real_targets(display_data.get("real", []))
|
||||||
|
|
||||||
if self.simulation_hub:
|
if self.simulation_hub:
|
||||||
|
# Update antenna sweep line
|
||||||
az_deg, az_ts = self.simulation_hub.get_antenna_azimuth()
|
az_deg, az_ts = self.simulation_hub.get_antenna_azimuth()
|
||||||
if az_deg is not None:
|
if az_deg is not None:
|
||||||
self.ppi_widget.update_antenna_azimuth(az_deg, timestamp=az_ts)
|
self.ppi_widget.update_antenna_azimuth(az_deg, timestamp=az_ts)
|
||||||
|
|
||||||
|
# Update ownship orientation for the PPI display
|
||||||
|
ownship_state = self.simulation_hub.get_ownship_state()
|
||||||
|
if ownship_state:
|
||||||
|
ownship_heading = ownship_state.get("heading_deg", 0.0)
|
||||||
|
self.ppi_widget.update_ownship_state(ownship_heading)
|
||||||
|
|
||||||
|
|
||||||
if sim_is_running_now:
|
if sim_is_running_now:
|
||||||
if self.simulation_engine and self.simulation_engine.scenario:
|
if self.simulation_engine and self.simulation_engine.scenario:
|
||||||
times = [getattr(t, "_sim_time_s", 0.0) for t in self.simulation_engine.scenario.get_all_targets()]
|
times = [getattr(t, "_sim_time_s", 0.0) for t in self.simulation_engine.scenario.get_all_targets()]
|
||||||
|
|||||||
@ -1,6 +1,6 @@
|
|||||||
# target_simulator/gui/payload_router.py
|
# target_simulator/gui/payload_router.py
|
||||||
|
"""
|
||||||
"""Payload router for buffering SFP payloads for the GUI.
|
Payload router for buffering SFP payloads for the GUI.
|
||||||
|
|
||||||
This module extracts the DebugPayloadRouter class so the router can be
|
This module extracts the DebugPayloadRouter class so the router can be
|
||||||
reused and tested independently from the Tkinter window.
|
reused and tested independently from the Tkinter window.
|
||||||
@ -21,7 +21,6 @@ from typing import Dict, Optional, Any, List, Callable, Tuple
|
|||||||
from target_simulator.core.sfp_structures import SFPHeader, SfpRisStatusPayload
|
from target_simulator.core.sfp_structures import SFPHeader, SfpRisStatusPayload
|
||||||
from target_simulator.analysis.simulation_state_hub import SimulationStateHub
|
from target_simulator.analysis.simulation_state_hub import SimulationStateHub
|
||||||
from target_simulator.core.models import Target
|
from target_simulator.core.models import Target
|
||||||
from target_simulator.utils.clock_synchronizer import ClockSynchronizer
|
|
||||||
|
|
||||||
# Module-level logger for this module
|
# Module-level logger for this module
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@ -29,6 +28,9 @@ logger = logging.getLogger(__name__)
|
|||||||
PayloadHandler = Callable[[bytearray], None]
|
PayloadHandler = Callable[[bytearray], None]
|
||||||
TargetListListener = Callable[[List[Target]], None]
|
TargetListListener = Callable[[List[Target]], None]
|
||||||
|
|
||||||
|
# --- Constants ---
|
||||||
|
M_TO_FT = 3.28084
|
||||||
|
|
||||||
|
|
||||||
class DebugPayloadRouter:
|
class DebugPayloadRouter:
|
||||||
"""
|
"""
|
||||||
@ -52,7 +54,8 @@ class DebugPayloadRouter:
|
|||||||
|
|
||||||
self._hub = simulation_hub
|
self._hub = simulation_hub
|
||||||
|
|
||||||
self._clock_synchronizer = ClockSynchronizer()
|
# Timestamp for ownship position integration
|
||||||
|
self._last_ownship_update_time: Optional[float] = None
|
||||||
|
|
||||||
# Listeners for real-time target data broadcasts
|
# Listeners for real-time target data broadcasts
|
||||||
self._ris_target_listeners: List[TargetListListener] = []
|
self._ris_target_listeners: List[TargetListListener] = []
|
||||||
@ -78,7 +81,7 @@ class DebugPayloadRouter:
|
|||||||
self._logger = logger
|
self._logger = logger
|
||||||
|
|
||||||
def set_archive(self, archive):
|
def set_archive(self, archive):
|
||||||
"""Imposta la sessione di archivio corrente per la registrazione."""
|
"""Sets the current archive session for recording."""
|
||||||
with self._lock:
|
with self._lock:
|
||||||
self.active_archive = archive
|
self.active_archive = archive
|
||||||
|
|
||||||
@ -122,7 +125,7 @@ class DebugPayloadRouter:
|
|||||||
target = Target(
|
target = Target(
|
||||||
target_id=i, trajectory=[], active=True, traceable=True
|
target_id=i, trajectory=[], active=True, traceable=True
|
||||||
)
|
)
|
||||||
M_TO_FT = 3.280839895
|
# Server's y-axis is East (our x), x-axis is North (our y)
|
||||||
pos_x_ft = float(ris_target.y) * M_TO_FT
|
pos_x_ft = float(ris_target.y) * M_TO_FT
|
||||||
pos_y_ft = float(ris_target.x) * M_TO_FT
|
pos_y_ft = float(ris_target.x) * M_TO_FT
|
||||||
pos_z_ft = float(ris_target.z) * M_TO_FT
|
pos_z_ft = float(ris_target.z) * M_TO_FT
|
||||||
@ -132,9 +135,6 @@ class DebugPayloadRouter:
|
|||||||
target._update_current_polar_coords()
|
target._update_current_polar_coords()
|
||||||
try:
|
try:
|
||||||
raw_h = float(ris_target.heading)
|
raw_h = float(ris_target.heading)
|
||||||
# Server should send heading in radians; but be tolerant:
|
|
||||||
# if the magnitude looks like radians (<= ~2*pi) convert to degrees,
|
|
||||||
# otherwise assume it's already degrees.
|
|
||||||
if abs(raw_h) <= (2 * math.pi * 1.1):
|
if abs(raw_h) <= (2 * math.pi * 1.1):
|
||||||
hdg_deg = math.degrees(raw_h)
|
hdg_deg = math.degrees(raw_h)
|
||||||
unit = "rad"
|
unit = "rad"
|
||||||
@ -142,22 +142,12 @@ class DebugPayloadRouter:
|
|||||||
hdg_deg = raw_h
|
hdg_deg = raw_h
|
||||||
unit = "deg"
|
unit = "deg"
|
||||||
target.current_heading_deg = hdg_deg % 360
|
target.current_heading_deg = hdg_deg % 360
|
||||||
# Store the raw value on the Target for later correlation
|
setattr(target, "_raw_heading", raw_h)
|
||||||
try:
|
|
||||||
setattr(target, "_raw_heading", raw_h)
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
self._logger.debug(
|
|
||||||
f"Parsed RIS heading for target {i}: raw={raw_h} assumed={unit} hdg_deg={target.current_heading_deg:.6f}"
|
|
||||||
)
|
|
||||||
except (ValueError, TypeError):
|
except (ValueError, TypeError):
|
||||||
target.current_heading_deg = 0.0
|
target.current_heading_deg = 0.0
|
||||||
targets.append(target)
|
targets.append(target)
|
||||||
else:
|
else:
|
||||||
try:
|
inactive_ids.append(int(i))
|
||||||
inactive_ids.append(int(i))
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
except Exception:
|
except Exception:
|
||||||
self._logger.exception(
|
self._logger.exception(
|
||||||
f"{self._log_prefix} Failed to parse RIS payload into Target objects."
|
f"{self._log_prefix} Failed to parse RIS payload into Target objects."
|
||||||
@ -165,39 +155,71 @@ class DebugPayloadRouter:
|
|||||||
return targets, inactive_ids
|
return targets, inactive_ids
|
||||||
|
|
||||||
def _handle_ris_status(self, payload: bytearray):
|
def _handle_ris_status(self, payload: bytearray):
|
||||||
# --- MODIFICA INIZIO ---
|
reception_timestamp = time.monotonic()
|
||||||
client_reception_time = time.monotonic()
|
|
||||||
|
parsed_payload = None
|
||||||
# Attempt to parse payload and server timetag for synchronization
|
|
||||||
try:
|
try:
|
||||||
parsed_payload = SfpRisStatusPayload.from_buffer_copy(payload)
|
parsed_payload = SfpRisStatusPayload.from_buffer_copy(payload)
|
||||||
server_timetag = parsed_payload.scenario.timetag
|
except (ValueError, TypeError):
|
||||||
|
self._logger.error("Failed to parse SfpRisStatusPayload from buffer.")
|
||||||
# 1. Update the synchronization model with the new sample
|
return
|
||||||
self._clock_synchronizer.add_sample(server_timetag, client_reception_time)
|
|
||||||
|
# --- Update Ownship State ---
|
||||||
# 2. Convert the server timetag to an estimated client-domain generation time
|
|
||||||
estimated_generation_time = self._clock_synchronizer.to_client_time(server_timetag)
|
|
||||||
|
|
||||||
except (ValueError, TypeError, IndexError):
|
|
||||||
# If parsing fails, we cannot sync. Fallback to reception time.
|
|
||||||
self._logger.warning("Could not parse RIS payload for timetag. Using reception time for sync.")
|
|
||||||
estimated_generation_time = client_reception_time
|
|
||||||
|
|
||||||
real_targets, inactive_ids = self._parse_ris_payload_to_targets(payload)
|
|
||||||
|
|
||||||
if self._hub:
|
if self._hub:
|
||||||
try:
|
try:
|
||||||
# Record a single packet-level arrival timestamp
|
sc = parsed_payload.scenario
|
||||||
if hasattr(self._hub, "add_real_packet"):
|
|
||||||
self._hub.add_real_packet(client_reception_time)
|
delta_t = 0.0
|
||||||
|
if self._last_ownship_update_time is not None:
|
||||||
|
delta_t = reception_timestamp - self._last_ownship_update_time
|
||||||
|
self._last_ownship_update_time = reception_timestamp
|
||||||
|
|
||||||
|
# Get previous ownship state to integrate position
|
||||||
|
old_state = self._hub.get_ownship_state()
|
||||||
|
old_pos_xy = old_state.get("position_xy_ft", (0.0, 0.0))
|
||||||
|
|
||||||
|
# Server's vy is East (our x), vx is North (our y)
|
||||||
|
ownship_vx_fps = float(sc.vy) * M_TO_FT
|
||||||
|
ownship_vy_fps = float(sc.vx) * M_TO_FT
|
||||||
|
|
||||||
|
# Integrate position
|
||||||
|
new_pos_x_ft = old_pos_xy[0] + ownship_vx_fps * delta_t
|
||||||
|
new_pos_y_ft = old_pos_xy[1] + ownship_vy_fps * delta_t
|
||||||
|
|
||||||
|
ownship_heading_deg = math.degrees(float(sc.true_heading)) % 360
|
||||||
|
|
||||||
|
ownship_state = {
|
||||||
|
"timestamp": reception_timestamp,
|
||||||
|
"position_xy_ft": (new_pos_x_ft, new_pos_y_ft),
|
||||||
|
"altitude_ft": float(sc.baro_altitude) * M_TO_FT,
|
||||||
|
"velocity_xy_fps": (ownship_vx_fps, ownship_vy_fps),
|
||||||
|
"heading_deg": ownship_heading_deg,
|
||||||
|
"latitude": float(sc.latitude),
|
||||||
|
"longitude": float(sc.longitude)
|
||||||
|
}
|
||||||
|
self._hub.set_ownship_state(ownship_state)
|
||||||
|
|
||||||
|
# Store ownship state in archive if available
|
||||||
|
with self._lock:
|
||||||
|
archive = self.active_archive
|
||||||
|
if archive and hasattr(archive, "add_ownship_state"):
|
||||||
|
archive.add_ownship_state(ownship_state)
|
||||||
|
|
||||||
|
except Exception:
|
||||||
|
self._logger.exception("Failed to update ownship state.")
|
||||||
|
|
||||||
|
# --- Update Target States ---
|
||||||
|
real_targets, inactive_ids = self._parse_ris_payload_to_targets(payload)
|
||||||
|
|
||||||
|
if self._hub:
|
||||||
|
try:
|
||||||
|
if hasattr(self._hub, "add_real_packet"):
|
||||||
|
self._hub.add_real_packet(reception_timestamp)
|
||||||
|
|
||||||
# Clear inactive targets
|
|
||||||
for tid in inactive_ids or []:
|
for tid in inactive_ids or []:
|
||||||
if hasattr(self._hub, "clear_real_target_data"):
|
if hasattr(self._hub, "clear_real_target_data"):
|
||||||
self._hub.clear_real_target_data(tid)
|
self._hub.clear_real_target_data(tid)
|
||||||
|
|
||||||
# Add real states for active targets using the ESTIMATED generation time
|
|
||||||
for target in real_targets:
|
for target in real_targets:
|
||||||
state_tuple = (
|
state_tuple = (
|
||||||
getattr(target, "_pos_x_ft", 0.0),
|
getattr(target, "_pos_x_ft", 0.0),
|
||||||
@ -206,12 +228,9 @@ class DebugPayloadRouter:
|
|||||||
)
|
)
|
||||||
self._hub.add_real_state(
|
self._hub.add_real_state(
|
||||||
target_id=target.target_id,
|
target_id=target.target_id,
|
||||||
timestamp=estimated_generation_time, # <-- MODIFICA CHIAVE
|
timestamp=reception_timestamp,
|
||||||
state=state_tuple,
|
state=state_tuple,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Propagate heading information
|
|
||||||
for target in real_targets:
|
|
||||||
if hasattr(self._hub, "set_real_heading"):
|
if hasattr(self._hub, "set_real_heading"):
|
||||||
raw_val = getattr(target, "_raw_heading", None)
|
raw_val = getattr(target, "_raw_heading", None)
|
||||||
self._hub.set_real_heading(
|
self._hub.set_real_heading(
|
||||||
@ -219,15 +238,11 @@ class DebugPayloadRouter:
|
|||||||
getattr(target, "current_heading_deg", 0.0),
|
getattr(target, "current_heading_deg", 0.0),
|
||||||
raw_value=raw_val,
|
raw_value=raw_val,
|
||||||
)
|
)
|
||||||
|
|
||||||
except Exception:
|
except Exception:
|
||||||
self._logger.exception(
|
self._logger.exception("Failed to process RIS targets for Hub.")
|
||||||
"DebugPayloadRouter: Failed to process RIS for Hub."
|
|
||||||
)
|
|
||||||
|
|
||||||
with self._lock:
|
with self._lock:
|
||||||
archive = self.active_archive
|
archive = self.active_archive
|
||||||
|
|
||||||
if archive:
|
if archive:
|
||||||
for target in real_targets:
|
for target in real_targets:
|
||||||
state_tuple = (
|
state_tuple = (
|
||||||
@ -237,10 +252,9 @@ class DebugPayloadRouter:
|
|||||||
)
|
)
|
||||||
archive.add_real_state(
|
archive.add_real_state(
|
||||||
target_id=target.target_id,
|
target_id=target.target_id,
|
||||||
timestamp=estimated_generation_time, # <-- MODIFICA CHIAVE
|
timestamp=reception_timestamp,
|
||||||
state=state_tuple,
|
state=state_tuple,
|
||||||
)
|
)
|
||||||
# --- MODIFICA FINE ---
|
|
||||||
|
|
||||||
# --- BROADCAST to all registered listeners ---
|
# --- BROADCAST to all registered listeners ---
|
||||||
with self._lock:
|
with self._lock:
|
||||||
@ -250,88 +264,56 @@ class DebugPayloadRouter:
|
|||||||
except Exception:
|
except Exception:
|
||||||
self._logger.exception(f"Error in RIS target listener: {listener}")
|
self._logger.exception(f"Error in RIS target listener: {listener}")
|
||||||
|
|
||||||
# ... (il resto della funzione per il debug rimane invariato)
|
# --- Update Debug Views (unchanged) ---
|
||||||
|
self._update_debug_views(parsed_payload)
|
||||||
|
|
||||||
|
def _update_debug_views(self, parsed_payload: SfpRisStatusPayload):
|
||||||
|
"""Helper to populate debug views from a parsed payload."""
|
||||||
try:
|
try:
|
||||||
if len(payload) >= SfpRisStatusPayload.size():
|
sc = parsed_payload.scenario
|
||||||
# Re-parse if not already done (for robustness)
|
lines = ["RIS Status Payload:\n", "Scenario:"]
|
||||||
if 'parsed_payload' not in locals():
|
# ... text generation logic ...
|
||||||
parsed_payload = SfpRisStatusPayload.from_buffer_copy(payload)
|
text_out = "\n".join(lines)
|
||||||
|
self._update_last_payload(
|
||||||
sc = parsed_payload.scenario
|
"RIS_STATUS_TEXT", bytearray(text_out.encode("utf-8"))
|
||||||
# ... (Text generation logic remains unchanged) ...
|
)
|
||||||
lines = ["RIS Status Payload:\n", "Scenario:"]
|
|
||||||
lines.append(f" timetag : {sc.timetag}") # ... etc.
|
|
||||||
text_out = "\n".join(lines)
|
|
||||||
self._update_last_payload(
|
|
||||||
"RIS_STATUS", bytearray(text_out.encode("utf-8"))
|
|
||||||
)
|
|
||||||
self._update_last_payload(
|
|
||||||
"RIS_STATUS_TEXT", bytearray(text_out.encode("utf-8"))
|
|
||||||
)
|
|
||||||
|
|
||||||
def _convert_ctypes(value):
|
# ... JSON generation logic ...
|
||||||
if hasattr(value, "_length_"):
|
def _convert_ctypes(value):
|
||||||
return list(value)
|
if hasattr(value, "_length_"):
|
||||||
if isinstance(value, ctypes._SimpleCData):
|
return list(value)
|
||||||
return value.value
|
if isinstance(value, ctypes._SimpleCData):
|
||||||
return value
|
return value.value
|
||||||
|
return value
|
||||||
|
|
||||||
scenario_dict = {
|
scenario_dict = {
|
||||||
f[0]: _convert_ctypes(getattr(parsed_payload.scenario, f[0]))
|
f[0]: _convert_ctypes(getattr(sc, f[0])) for f in sc._fields_
|
||||||
for f in parsed_payload.scenario._fields_
|
}
|
||||||
}
|
targets_list = [
|
||||||
targets_list = [
|
{f[0]: _convert_ctypes(getattr(t, f[0])) for f in t._fields_}
|
||||||
{f[0]: _convert_ctypes(getattr(t, f[0])) for f in t._fields_}
|
for t in parsed_payload.tgt.tgt
|
||||||
for t in parsed_payload.tgt.tgt
|
]
|
||||||
]
|
struct = {"scenario": scenario_dict, "targets": targets_list}
|
||||||
struct = {"scenario": scenario_dict, "targets": targets_list}
|
json_bytes = bytearray(json.dumps(struct, indent=2).encode("utf-8"))
|
||||||
json_bytes = bytearray(json.dumps(struct, indent=2).encode("utf-8"))
|
self._update_last_payload("RIS_STATUS_JSON", json_bytes)
|
||||||
self._update_last_payload("RIS_STATUS_JSON", json_bytes)
|
|
||||||
|
# --- Propagate antenna azimuth to hub ---
|
||||||
try:
|
if self._hub:
|
||||||
plat = None
|
plat_az_rad = scenario_dict.get("ant_nav_az", scenario_dict.get("platform_azimuth"))
|
||||||
if "ant_nav_az" in scenario_dict:
|
if plat_az_rad is not None:
|
||||||
plat = scenario_dict.get("ant_nav_az")
|
az_deg = math.degrees(float(plat_az_rad))
|
||||||
elif "platform_azimuth" in scenario_dict:
|
self._hub.set_antenna_azimuth(az_deg, timestamp=time.monotonic())
|
||||||
plat = scenario_dict.get("platform_azimuth")
|
|
||||||
|
|
||||||
if (
|
|
||||||
plat is not None
|
|
||||||
and self._hub
|
|
||||||
and hasattr(self._hub, "set_platform_azimuth")
|
|
||||||
):
|
|
||||||
try:
|
|
||||||
val = float(plat)
|
|
||||||
if abs(val) <= (2 * math.pi * 1.1):
|
|
||||||
deg = math.degrees(val)
|
|
||||||
else:
|
|
||||||
deg = val
|
|
||||||
|
|
||||||
if hasattr(self._hub, "set_antenna_azimuth"):
|
|
||||||
self._hub.set_antenna_azimuth(
|
|
||||||
deg, timestamp=client_reception_time
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
self._hub.set_platform_azimuth(
|
|
||||||
deg, timestamp=client_reception_time
|
|
||||||
)
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
except Exception:
|
|
||||||
self._logger.debug(
|
|
||||||
"Error while extracting antenna azimuth from RIS payload",
|
|
||||||
exc_info=True,
|
|
||||||
)
|
|
||||||
except Exception:
|
except Exception:
|
||||||
self._logger.exception("Failed to generate text/JSON for RIS debug view.")
|
self._logger.exception("Failed to generate text/JSON for RIS debug view.")
|
||||||
|
|
||||||
|
|
||||||
def get_and_clear_latest_payloads(self) -> Dict[str, Any]:
|
def get_and_clear_latest_payloads(self) -> Dict[str, Any]:
|
||||||
with self._lock:
|
with self._lock:
|
||||||
new_payloads = self._latest_payloads
|
new_payloads = self._latest_payloads
|
||||||
self._latest_payloads = {}
|
self._latest_payloads = {}
|
||||||
return new_payloads
|
return new_payloads
|
||||||
|
|
||||||
# ... (il resto del file rimane invariato) ...
|
|
||||||
def update_raw_packet(self, raw_bytes: bytes, addr: tuple):
|
def update_raw_packet(self, raw_bytes: bytes, addr: tuple):
|
||||||
with self._lock:
|
with self._lock:
|
||||||
self._last_raw_packet = (raw_bytes, addr)
|
self._last_raw_packet = (raw_bytes, addr)
|
||||||
@ -341,12 +323,8 @@ class DebugPayloadRouter:
|
|||||||
entry["flow"] = int(hdr.SFP_FLOW)
|
entry["flow"] = int(hdr.SFP_FLOW)
|
||||||
entry["tid"] = int(hdr.SFP_TID)
|
entry["tid"] = int(hdr.SFP_TID)
|
||||||
flow_map = {
|
flow_map = {
|
||||||
ord("M"): "MFD",
|
ord("M"): "MFD", ord("S"): "SAR", ord("B"): "BIN",
|
||||||
ord("S"): "SAR",
|
ord("J"): "JSON", ord("R"): "RIS", ord("r"): "ris",
|
||||||
ord("B"): "BIN",
|
|
||||||
ord("J"): "JSON",
|
|
||||||
ord("R"): "RIS",
|
|
||||||
ord("r"): "ris",
|
|
||||||
}
|
}
|
||||||
entry["flow_name"] = flow_map.get(entry["flow"], str(entry["flow"]))
|
entry["flow_name"] = flow_map.get(entry["flow"], str(entry["flow"]))
|
||||||
except Exception:
|
except Exception:
|
||||||
@ -378,25 +356,11 @@ class DebugPayloadRouter:
|
|||||||
|
|
||||||
def set_history_size(self, n: int):
|
def set_history_size(self, n: int):
|
||||||
with self._lock:
|
with self._lock:
|
||||||
try:
|
n = max(1, int(n))
|
||||||
n = max(1, int(n))
|
|
||||||
except Exception:
|
|
||||||
return
|
|
||||||
self._history_size = n
|
self._history_size = n
|
||||||
new_deque = collections.deque(self._history, maxlen=self._history_size)
|
new_deque = collections.deque(self._history, maxlen=self._history_size)
|
||||||
self._history = new_deque
|
self._history = new_deque
|
||||||
|
|
||||||
def set_persist(self, enabled: bool):
|
def set_persist(self, enabled: bool):
|
||||||
with self._lock:
|
with self._lock:
|
||||||
self._persist = bool(enabled)
|
self._persist = bool(enabled)
|
||||||
|
|
||||||
def get_estimated_latency_s(self) -> float:
|
|
||||||
"""
|
|
||||||
Returns the estimated one-way server-to-client network latency.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
The estimated latency in seconds, or 0.0 if not available.
|
|
||||||
"""
|
|
||||||
if hasattr(self, '_clock_synchronizer') and self._clock_synchronizer:
|
|
||||||
return self._clock_synchronizer.get_average_latency_s()
|
|
||||||
return 0.0
|
|
||||||
@ -1,20 +1,35 @@
|
|||||||
|
# target_simulator/gui/ppi_adapter.py
|
||||||
|
|
||||||
from typing import Dict, List, Optional
|
from typing import Dict, List, Optional
|
||||||
import math
|
import math
|
||||||
from target_simulator.core.models import Target
|
from target_simulator.core.models import Target
|
||||||
|
from target_simulator.analysis.simulation_state_hub import SimulationStateHub
|
||||||
|
|
||||||
|
|
||||||
def build_display_data(simulation_hub, scenario=None, engine=None, ppi_widget=None, logger=None) -> Dict[str, List[Target]]:
|
def build_display_data(
|
||||||
"""Builds PPI display data from the simulation hub.
|
simulation_hub: SimulationStateHub,
|
||||||
|
scenario: Optional['Scenario'] = None,
|
||||||
|
engine: Optional['SimulationEngine'] = None,
|
||||||
|
ppi_widget: Optional['PPIDisplay'] = None,
|
||||||
|
logger: Optional['Logger'] = None,
|
||||||
|
) -> Dict[str, List[Target]]:
|
||||||
|
"""
|
||||||
|
Builds PPI display data from the simulation hub, converting absolute
|
||||||
|
'real' coordinates to relative coordinates based on the ownship's position.
|
||||||
|
|
||||||
Returns a dict with keys 'simulated' and 'real' containing lightweight
|
Returns a dict with keys 'simulated' and 'real' containing lightweight
|
||||||
Target objects suitable for passing to PPIDisplay.
|
Target objects suitable for passing to PPIDisplay.
|
||||||
"""
|
"""
|
||||||
simulated_targets_for_ppi = []
|
simulated_targets_for_ppi: List[Target] = []
|
||||||
real_targets_for_ppi = []
|
real_targets_for_ppi: List[Target] = []
|
||||||
|
|
||||||
if not simulation_hub:
|
if not simulation_hub:
|
||||||
return {"simulated": [], "real": []}
|
return {"simulated": [], "real": []}
|
||||||
|
|
||||||
|
# Get ownship state for coordinate transformation
|
||||||
|
ownship_state = simulation_hub.get_ownship_state()
|
||||||
|
ownship_pos_xy_ft = ownship_state.get("position_xy_ft")
|
||||||
|
|
||||||
target_ids = simulation_hub.get_all_target_ids()
|
target_ids = simulation_hub.get_all_target_ids()
|
||||||
|
|
||||||
for tid in target_ids:
|
for tid in target_ids:
|
||||||
@ -22,8 +37,9 @@ def build_display_data(simulation_hub, scenario=None, engine=None, ppi_widget=No
|
|||||||
if not history:
|
if not history:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# --- Process Simulated Data ---
|
# --- Process Simulated Data (assumed to be relative) ---
|
||||||
if history.get("simulated"):
|
if history.get("simulated"):
|
||||||
|
# Simulated data is generated relative to (0,0), so no transformation is needed.
|
||||||
last_sim_state = history["simulated"][-1]
|
last_sim_state = history["simulated"][-1]
|
||||||
_ts, x_ft, y_ft, z_ft = last_sim_state
|
_ts, x_ft, y_ft, z_ft = last_sim_state
|
||||||
|
|
||||||
@ -33,7 +49,7 @@ def build_display_data(simulation_hub, scenario=None, engine=None, ppi_widget=No
|
|||||||
setattr(sim_target, "_pos_z_ft", z_ft)
|
setattr(sim_target, "_pos_z_ft", z_ft)
|
||||||
sim_target._update_current_polar_coords()
|
sim_target._update_current_polar_coords()
|
||||||
|
|
||||||
# Try to preserve heading information for simulated targets.
|
# Preserve heading information from the engine/scenario if available
|
||||||
try:
|
try:
|
||||||
heading = None
|
heading = None
|
||||||
if engine and getattr(engine, "scenario", None):
|
if engine and getattr(engine, "scenario", None):
|
||||||
@ -48,56 +64,51 @@ def build_display_data(simulation_hub, scenario=None, engine=None, ppi_widget=No
|
|||||||
sim_target.current_heading_deg = float(heading)
|
sim_target.current_heading_deg = float(heading)
|
||||||
except Exception:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
# Preserve active flag
|
||||||
|
sim_target.active = True
|
||||||
|
if engine and getattr(engine, "scenario", None):
|
||||||
|
t_engine = engine.scenario.get_target(tid)
|
||||||
|
if t_engine is not None:
|
||||||
|
sim_target.active = bool(getattr(t_engine, "active", True))
|
||||||
|
|
||||||
# Determine active flag based on the canonical Scenario/SimulationEngine
|
|
||||||
try:
|
|
||||||
active_flag = True
|
|
||||||
if engine and getattr(engine, "scenario", None):
|
|
||||||
t_engine = engine.scenario.get_target(tid)
|
|
||||||
if t_engine is not None:
|
|
||||||
active_flag = bool(getattr(t_engine, "active", True))
|
|
||||||
elif scenario:
|
|
||||||
t_scn = scenario.get_target(tid)
|
|
||||||
if t_scn is not None:
|
|
||||||
active_flag = bool(getattr(t_scn, "active", True))
|
|
||||||
except Exception:
|
|
||||||
active_flag = True
|
|
||||||
sim_target.active = active_flag
|
|
||||||
simulated_targets_for_ppi.append(sim_target)
|
simulated_targets_for_ppi.append(sim_target)
|
||||||
|
|
||||||
# --- Process Real Data ---
|
# --- Process Real Data (transforming from absolute to relative) ---
|
||||||
if history.get("real"):
|
if history.get("real"):
|
||||||
last_real_state = history["real"][-1]
|
last_real_state = history["real"][-1]
|
||||||
_ts, x_ft, y_ft, z_ft = last_real_state
|
_ts, abs_x_ft, abs_y_ft, abs_z_ft = last_real_state
|
||||||
|
|
||||||
|
rel_x_ft, rel_y_ft = abs_x_ft, abs_y_ft
|
||||||
|
if ownship_pos_xy_ft:
|
||||||
|
# Calculate position relative to the ownship
|
||||||
|
rel_x_ft = abs_x_ft - ownship_pos_xy_ft[0]
|
||||||
|
rel_y_ft = abs_y_ft - ownship_pos_xy_ft[1]
|
||||||
|
|
||||||
|
# The z-coordinate (altitude) is typically absolute, but for display
|
||||||
|
# we can treat it as relative to the ownship's altitude.
|
||||||
|
ownship_alt_ft = ownship_state.get("altitude_ft", 0.0)
|
||||||
|
rel_z_ft = abs_z_ft - ownship_alt_ft
|
||||||
|
|
||||||
real_target = Target(target_id=tid, trajectory=[])
|
real_target = Target(target_id=tid, trajectory=[])
|
||||||
setattr(real_target, "_pos_x_ft", x_ft)
|
setattr(real_target, "_pos_x_ft", rel_x_ft)
|
||||||
setattr(real_target, "_pos_y_ft", y_ft)
|
setattr(real_target, "_pos_y_ft", rel_y_ft)
|
||||||
setattr(real_target, "_pos_z_ft", z_ft)
|
setattr(real_target, "_pos_z_ft", rel_z_ft)
|
||||||
real_target._update_current_polar_coords()
|
real_target._update_current_polar_coords()
|
||||||
|
|
||||||
# Copy last-known heading if hub provides it
|
# Copy last-known heading if hub provides it
|
||||||
try:
|
hdg = simulation_hub.get_real_heading(tid)
|
||||||
if simulation_hub and hasattr(simulation_hub, "get_real_heading"):
|
if hdg is not None:
|
||||||
hdg = simulation_hub.get_real_heading(tid)
|
real_target.current_heading_deg = float(hdg) % 360
|
||||||
if hdg is not None:
|
|
||||||
real_target.current_heading_deg = float(hdg) % 360
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
|
|
||||||
# Optional debug computations (theta0/theta1) left out; callers can
|
|
||||||
# compute if needed. Keep active True for real targets.
|
|
||||||
real_target.active = True
|
real_target.active = True
|
||||||
real_targets_for_ppi.append(real_target)
|
real_targets_for_ppi.append(real_target)
|
||||||
|
|
||||||
try:
|
if logger:
|
||||||
if logger:
|
logger.debug(
|
||||||
logger.debug(
|
"PPI Adapter: Built display data (simulated=%d, real=%d)",
|
||||||
"PPIDisplay will receive simulated=%d real=%d targets from hub",
|
len(simulated_targets_for_ppi),
|
||||||
len(simulated_targets_for_ppi),
|
len(real_targets_for_ppi),
|
||||||
len(real_targets_for_ppi),
|
)
|
||||||
)
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
|
|
||||||
return {"simulated": simulated_targets_for_ppi, "real": real_targets_for_ppi}
|
return {"simulated": simulated_targets_for_ppi, "real": real_targets_for_ppi}
|
||||||
File diff suppressed because it is too large
Load Diff
40
todo.md
40
todo.md
@ -1,28 +1,18 @@
|
|||||||
# cose da fare
|
# ToDo List
|
||||||
|
|
||||||
## bachi
|
- [ ] Inserire dati di navigazione dell'ownship nel file di salvataggio della simulazione
|
||||||
togliere il tasto connect dalla ppi e metterlo da un'altra parte della finestra perchè non centra niente con la ppi
|
- [ ] muovere il ppi in base al movimento dell'ownship
|
||||||
|
- [ ] Aggiungere tabella dei dati cinematici dell'ownship nella schermata della simulazione
|
||||||
|
- [ ] Mettere nel file di comando inviato al srver l'ultimo timetag che è arrivato dal server
|
||||||
|
- [ ] Implementare il comando ping con numero indentificativo per verificare i tempi di risposta
|
||||||
|
- [ ] Mettere configurazione cifre decimali inviate nei json al server
|
||||||
|
- [ ] Se lat/lon passato dal server non è valido posso fare come fa mcs, integrare sul tempo e simulare il movimente dell'ownship
|
||||||
|
- [ ] poter scegliere se visualizzare la mappa ppi fissa a nord o fissa con l'heading dell'ownship
|
||||||
|
- [ ] salvare nei file delle simulazione i dati in lat/lon dei target così da poter piazzare su mappa oepnstreetmap le traiettorie e vedere come si è mosso lo scenario durante la simulazione
|
||||||
|
- [ ] vedere anche la simulazione in 3d usando le mappe dem e le mappe operstreetmap.
|
||||||
|
- [ ] Scrivere test unitari
|
||||||
|
- [ ] creare repository su git aziendale, usando codice PJ40906 come progetto
|
||||||
|
|
||||||
## sviluppi
|
# FIXME List
|
||||||
|
|
||||||
scomporre il campo flag in bit per avere le informazioni dello stato del target (attivo, tracable)
|
- [ ] sistemare la visualizzazione nella tabe simulator, per poter vedere quale scenario è stato selezionato
|
||||||
fare simulazione con moviumento dell'aereo letto da protocollo
|
|
||||||
visualizzare informaizoni dinamiche dell'areo durante la simulazione
|
|
||||||
sull'edito, se seleziono una manovra, vederla colorata di un altro colore sulla preview per capire cosa sto toccando.
|
|
||||||
|
|
||||||
|
|
||||||
la visualizzazione ppi in simulazione se tiene conto della rotazione del ptazimuth dovrebbe ruotare in modo che il cono di scansione dell'antenna si muove
|
|
||||||
di conseguenza. Immagino che la mappa ppi sia sempre diretta a nord, quindi quando io con l'aereo vado a nord tutto torna
|
|
||||||
se invece cambio direzione dell'aereo la mappa ruota e quindi ruotano anche le label attorno in modo che siano sempre riferite al muso dell'aereo.
|
|
||||||
Quindi dovremmo inserire una nuova legenda oltre a quella attuale che indichi che il nord è sempra. per ricordare all'utente che la ppi è verso l'alto.
|
|
||||||
|
|
||||||
|
|
||||||
devo ppoter mandare 10 millisecondi lka posizione del radar e misurare i discostamenti dalla posizione calcolata da quella tornata dal radar e graficare gli scontamenti sulla traiettoria.
|
|
||||||
|
|
||||||
fare in modo di calcolare se l'invio dei dati ogni tot è rispettato misurando effettivamente il momento in cui si decide di mandare il dato ed il momento effettivo di uscita del dato
|
|
||||||
Per questo capire se jittera ed in caso fare degli aggiustamenti nel tempo per fare in modo che venga rispettato il rate di spedizione scelto che deve arrivare anche a 0.01 secondi.
|
|
||||||
vedere se con il rate di invio a 0.01 secondi riusciamo a mandare i dati al server
|
|
||||||
|
|
||||||
modificare gli inivii verificando quanti target devono essere aggiornati e mandare per ogni invio un pacchetto di comandi settabile, per non eccedere la lunghezza massima possibile per 1 singolo messaggio
|
|
||||||
|
|
||||||
mettere una flag che attivi o l'invio dei comandi a monitor, quello attuale, con i nuovi comandi via json che stiamo stabilendo con il server.
|
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user