fine ottimizzazione
This commit is contained in:
parent
9823a294b2
commit
14c0501451
64
convert.py
64
convert.py
@ -4,6 +4,7 @@ import tkinter as tk
|
||||
from tkinter import filedialog, messagebox, scrolledtext
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
class MarkdownToPDFApp:
|
||||
def __init__(self, root):
|
||||
self.root = root
|
||||
@ -18,25 +19,53 @@ class MarkdownToPDFApp:
|
||||
self.generate_pdf = tk.BooleanVar(value=True)
|
||||
|
||||
# --- UI ---
|
||||
tk.Label(root, text="Cartella Markdown:").pack(anchor="w", padx=10, pady=(10, 0))
|
||||
tk.Label(root, text="Cartella Markdown:").pack(
|
||||
anchor="w", padx=10, pady=(10, 0)
|
||||
)
|
||||
frame1 = tk.Frame(root)
|
||||
frame1.pack(fill="x", padx=10)
|
||||
tk.Entry(frame1, textvariable=self.folder_path, width=50).pack(side="left", fill="x", expand=True)
|
||||
tk.Button(frame1, text="Sfoglia...", command=self.choose_folder).pack(side="right", padx=5)
|
||||
tk.Entry(frame1, textvariable=self.folder_path, width=50).pack(
|
||||
side="left", fill="x", expand=True
|
||||
)
|
||||
tk.Button(frame1, text="Sfoglia...", command=self.choose_folder).pack(
|
||||
side="right", padx=5
|
||||
)
|
||||
|
||||
tk.Label(root, text="Nome base file output (senza estensione):").pack(anchor="w", padx=10, pady=(10, 0))
|
||||
tk.Label(root, text="Nome base file output (senza estensione):").pack(
|
||||
anchor="w", padx=10, pady=(10, 0)
|
||||
)
|
||||
tk.Entry(root, textvariable=self.output_name, width=40).pack(fill="x", padx=10)
|
||||
|
||||
tk.Checkbutton(root, text="Usa template DOCX", variable=self.use_template, command=self.toggle_template).pack(anchor="w", padx=10, pady=(10, 0))
|
||||
tk.Checkbutton(
|
||||
root,
|
||||
text="Usa template DOCX",
|
||||
variable=self.use_template,
|
||||
command=self.toggle_template,
|
||||
).pack(anchor="w", padx=10, pady=(10, 0))
|
||||
frame2 = tk.Frame(root)
|
||||
frame2.pack(fill="x", padx=10)
|
||||
tk.Entry(frame2, textvariable=self.template_path, width=50, state="disabled").pack(side="left", fill="x", expand=True)
|
||||
tk.Button(frame2, text="Seleziona template", command=self.choose_template, state="disabled").pack(side="right", padx=5)
|
||||
tk.Entry(
|
||||
frame2, textvariable=self.template_path, width=50, state="disabled"
|
||||
).pack(side="left", fill="x", expand=True)
|
||||
tk.Button(
|
||||
frame2,
|
||||
text="Seleziona template",
|
||||
command=self.choose_template,
|
||||
state="disabled",
|
||||
).pack(side="right", padx=5)
|
||||
self.template_frame = frame2
|
||||
|
||||
tk.Checkbutton(root, text="Genera anche PDF finale", variable=self.generate_pdf).pack(anchor="w", padx=10, pady=(10, 0))
|
||||
tk.Checkbutton(
|
||||
root, text="Genera anche PDF finale", variable=self.generate_pdf
|
||||
).pack(anchor="w", padx=10, pady=(10, 0))
|
||||
|
||||
tk.Button(root, text="Genera Documento", command=self.generate_output, bg="#3c9", fg="white").pack(pady=10)
|
||||
tk.Button(
|
||||
root,
|
||||
text="Genera Documento",
|
||||
command=self.generate_output,
|
||||
bg="#3c9",
|
||||
fg="white",
|
||||
).pack(pady=10)
|
||||
|
||||
tk.Label(root, text="Log:").pack(anchor="w", padx=10)
|
||||
self.log_box = scrolledtext.ScrolledText(root, height=13, state="disabled")
|
||||
@ -61,7 +90,9 @@ class MarkdownToPDFApp:
|
||||
widget.configure(state=state)
|
||||
|
||||
def choose_template(self):
|
||||
file = filedialog.askopenfilename(title="Seleziona template DOCX", filetypes=[("Word Template", "*.docx")])
|
||||
file = filedialog.askopenfilename(
|
||||
title="Seleziona template DOCX", filetypes=[("Word Template", "*.docx")]
|
||||
)
|
||||
if file:
|
||||
self.template_path.set(file)
|
||||
|
||||
@ -73,7 +104,9 @@ class MarkdownToPDFApp:
|
||||
make_pdf = self.generate_pdf.get()
|
||||
|
||||
if not folder:
|
||||
messagebox.showwarning("Attenzione", "Seleziona una cartella contenente i file Markdown.")
|
||||
messagebox.showwarning(
|
||||
"Attenzione", "Seleziona una cartella contenente i file Markdown."
|
||||
)
|
||||
return
|
||||
|
||||
folder_path = Path(folder)
|
||||
@ -83,7 +116,9 @@ class MarkdownToPDFApp:
|
||||
# Trova i file Markdown numerati
|
||||
md_files = sorted(folder_path.glob("[0-9][0-9]_*.md"))
|
||||
if not md_files:
|
||||
messagebox.showerror("Errore", "Nessun file Markdown numerato trovato nella cartella.")
|
||||
messagebox.showerror(
|
||||
"Errore", "Nessun file Markdown numerato trovato nella cartella."
|
||||
)
|
||||
return
|
||||
|
||||
self.log(f"Trovati {len(md_files)} file Markdown:")
|
||||
@ -108,7 +143,9 @@ class MarkdownToPDFApp:
|
||||
cmd_docx = ["pandoc", str(combined_md), "-o", str(output_docx)]
|
||||
if use_template:
|
||||
if not Path(template).exists():
|
||||
messagebox.showerror("Template non trovato", f"Il file {template} non esiste.")
|
||||
messagebox.showerror(
|
||||
"Template non trovato", f"Il file {template} non esiste."
|
||||
)
|
||||
return
|
||||
cmd_docx.extend(["--reference-doc", str(template)])
|
||||
|
||||
@ -141,6 +178,7 @@ class MarkdownToPDFApp:
|
||||
if combined_md.exists():
|
||||
combined_md.unlink()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
root = tk.Tk()
|
||||
app = MarkdownToPDFApp(root)
|
||||
|
||||
@ -3,7 +3,7 @@
|
||||
"scan_limit": 60,
|
||||
"max_range": 100,
|
||||
"geometry": "1492x992+113+61",
|
||||
"last_selected_scenario": "corto",
|
||||
"last_selected_scenario": "scenario3",
|
||||
"connection": {
|
||||
"target": {
|
||||
"type": "sfp",
|
||||
|
||||
@ -5,4 +5,3 @@ This package contains the main application modules (GUI, core, utils,
|
||||
and analysis). It is intentionally lightweight here; see submodules for
|
||||
details (e.g., `gui.main_view`, `core.simulation_engine`).
|
||||
"""
|
||||
|
||||
|
||||
@ -17,6 +17,7 @@ DEFAULT_VERSION = "0.0.0+unknown"
|
||||
DEFAULT_COMMIT = "Unknown"
|
||||
DEFAULT_BRANCH = "Unknown"
|
||||
|
||||
|
||||
# --- Helper Function ---
|
||||
def get_version_string(format_string=None):
|
||||
"""
|
||||
@ -44,29 +45,39 @@ def get_version_string(format_string=None):
|
||||
|
||||
replacements = {}
|
||||
try:
|
||||
replacements['version'] = __version__ if __version__ else DEFAULT_VERSION
|
||||
replacements['commit'] = GIT_COMMIT_HASH if GIT_COMMIT_HASH else DEFAULT_COMMIT
|
||||
replacements['commit_short'] = GIT_COMMIT_HASH[:7] if GIT_COMMIT_HASH and len(GIT_COMMIT_HASH) >= 7 else DEFAULT_COMMIT
|
||||
replacements['branch'] = GIT_BRANCH if GIT_BRANCH else DEFAULT_BRANCH
|
||||
replacements['timestamp'] = BUILD_TIMESTAMP if BUILD_TIMESTAMP else "Unknown"
|
||||
replacements['timestamp_short'] = BUILD_TIMESTAMP.split('T')[0] if BUILD_TIMESTAMP and 'T' in BUILD_TIMESTAMP else "Unknown"
|
||||
replacements['is_git'] = "Git" if IS_GIT_REPO else "Unknown"
|
||||
replacements['dirty'] = "-dirty" if __version__ and __version__.endswith('-dirty') else ""
|
||||
replacements["version"] = __version__ if __version__ else DEFAULT_VERSION
|
||||
replacements["commit"] = GIT_COMMIT_HASH if GIT_COMMIT_HASH else DEFAULT_COMMIT
|
||||
replacements["commit_short"] = (
|
||||
GIT_COMMIT_HASH[:7]
|
||||
if GIT_COMMIT_HASH and len(GIT_COMMIT_HASH) >= 7
|
||||
else DEFAULT_COMMIT
|
||||
)
|
||||
replacements["branch"] = GIT_BRANCH if GIT_BRANCH else DEFAULT_BRANCH
|
||||
replacements["timestamp"] = BUILD_TIMESTAMP if BUILD_TIMESTAMP else "Unknown"
|
||||
replacements["timestamp_short"] = (
|
||||
BUILD_TIMESTAMP.split("T")[0]
|
||||
if BUILD_TIMESTAMP and "T" in BUILD_TIMESTAMP
|
||||
else "Unknown"
|
||||
)
|
||||
replacements["is_git"] = "Git" if IS_GIT_REPO else "Unknown"
|
||||
replacements["dirty"] = (
|
||||
"-dirty" if __version__ and __version__.endswith("-dirty") else ""
|
||||
)
|
||||
|
||||
tag = DEFAULT_VERSION
|
||||
if __version__ and IS_GIT_REPO:
|
||||
match = re.match(r'^(v?([0-9]+(?:\.[0-9]+)*))', __version__)
|
||||
match = re.match(r"^(v?([0-9]+(?:\.[0-9]+)*))", __version__)
|
||||
if match:
|
||||
tag = match.group(1)
|
||||
replacements['tag'] = tag
|
||||
replacements["tag"] = tag
|
||||
|
||||
output_string = format_string
|
||||
for placeholder, value in replacements.items():
|
||||
pattern = re.compile(r'{{\s*' + re.escape(placeholder) + r'\s*}}')
|
||||
output_string = pattern.sub(str(value), output_string)
|
||||
pattern = re.compile(r"{{\s*" + re.escape(placeholder) + r"\s*}}")
|
||||
output_string = pattern.sub(str(value), output_string)
|
||||
|
||||
if re.search(r'{\s*\w+\s*}', output_string):
|
||||
pass # Or log a warning: print(f"Warning: Unreplaced placeholders found: {output_string}")
|
||||
if re.search(r"{\s*\w+\s*}", output_string):
|
||||
pass # Or log a warning: print(f"Warning: Unreplaced placeholders found: {output_string}")
|
||||
|
||||
return output_string
|
||||
|
||||
|
||||
@ -29,14 +29,14 @@ class SimulationStateHub:
|
||||
"""
|
||||
A thread-safe hub to store and manage the history of simulated and real
|
||||
target states for performance analysis.
|
||||
|
||||
|
||||
Thread Safety - Optimized Locking Strategy:
|
||||
- Uses fine-grained locking to minimize contention
|
||||
- Critical write paths (add_simulated_state, add_real_state) use minimal lock time
|
||||
- Bulk operations are atomic but quick
|
||||
- Designed to handle high-frequency updates from simulation/network threads
|
||||
while GUI reads concurrently without blocking
|
||||
|
||||
|
||||
Performance Notes:
|
||||
- With 32 targets at 20Hz simulation + network updates: lock contention <5%
|
||||
- Lock is held for <0.1ms per operation (append to deque)
|
||||
@ -86,7 +86,6 @@ class SimulationStateHub:
|
||||
self._antenna_azimuth_deg = None
|
||||
self._antenna_azimuth_ts = None
|
||||
|
||||
|
||||
def add_simulated_state(
|
||||
self, target_id: int, timestamp: float, state: Tuple[float, ...]
|
||||
):
|
||||
@ -179,9 +178,7 @@ class SimulationStateHub:
|
||||
and (now - self._last_real_summary_time)
|
||||
>= self._real_summary_interval_s
|
||||
):
|
||||
rate = self.get_real_rate(
|
||||
window_seconds=self._real_summary_interval_s
|
||||
)
|
||||
rate = self.get_real_rate(window_seconds=self._real_summary_interval_s)
|
||||
# try:
|
||||
# logger.info(
|
||||
# "[SimulationStateHub] real states: recent_rate=%.1f ev/s total_targets=%d",
|
||||
@ -515,4 +512,4 @@ class SimulationStateHub:
|
||||
A dictionary containing the ownship state at T=0 for the current simulation.
|
||||
"""
|
||||
with self._lock:
|
||||
return self._simulation_origin_state.copy()
|
||||
return self._simulation_origin_state.copy()
|
||||
|
||||
@ -9,15 +9,14 @@ lightweight package marker with a descriptive module docstring.
|
||||
"""
|
||||
|
||||
__all__ = [
|
||||
"command_builder",
|
||||
"communicator_interface",
|
||||
"models",
|
||||
"payload_router",
|
||||
"sfp_communicator",
|
||||
"sfp_structures",
|
||||
"sfp_transport",
|
||||
"serial_communicator",
|
||||
"simulation_engine",
|
||||
"tftp_communicator",
|
||||
"command_builder",
|
||||
"communicator_interface",
|
||||
"models",
|
||||
"payload_router",
|
||||
"sfp_communicator",
|
||||
"sfp_structures",
|
||||
"sfp_transport",
|
||||
"serial_communicator",
|
||||
"simulation_engine",
|
||||
"tftp_communicator",
|
||||
]
|
||||
|
||||
|
||||
@ -494,9 +494,13 @@ class Scenario:
|
||||
wp_data.setdefault("vertical_acceleration_g", 0.0)
|
||||
wp_data["maneuver_type"] = ManeuverType(wp_data["maneuver_type"])
|
||||
if "turn_direction" in wp_data and wp_data["turn_direction"]:
|
||||
wp_data["turn_direction"] = TurnDirection(wp_data["turn_direction"])
|
||||
wp_data["turn_direction"] = TurnDirection(
|
||||
wp_data["turn_direction"]
|
||||
)
|
||||
valid_keys = {f.name for f in fields(Waypoint)}
|
||||
filtered_wp_data = {k: v for k, v in wp_data.items() if k in valid_keys}
|
||||
filtered_wp_data = {
|
||||
k: v for k, v in wp_data.items() if k in valid_keys
|
||||
}
|
||||
waypoints.append(Waypoint(**filtered_wp_data))
|
||||
target = Target(
|
||||
target_id=target_data["target_id"],
|
||||
|
||||
@ -4,6 +4,7 @@
|
||||
Handles all serial communication with the target device.
|
||||
"""
|
||||
import time
|
||||
|
||||
try:
|
||||
import serial
|
||||
import serial.tools.list_ports
|
||||
|
||||
@ -340,15 +340,10 @@ class SFPCommunicator(CommunicatorInterface):
|
||||
|
||||
Ingressi: command (str)
|
||||
Uscite: bool - True if transport.send_script_command returned success
|
||||
Commento: compacts JSON payloads when appropriate before sending.
|
||||
Commento: Assumes command is already compacted if needed (done in send_commands).
|
||||
"""
|
||||
if not self.transport or not self._destination:
|
||||
return False
|
||||
# As a final safeguard, compact JSON payloads here as well
|
||||
try:
|
||||
command = self._compact_json_if_needed(command)
|
||||
except Exception:
|
||||
pass
|
||||
return self.transport.send_script_command(command, self._destination)
|
||||
|
||||
def _compact_json_if_needed(self, command: str) -> str:
|
||||
|
||||
@ -14,6 +14,7 @@ import logging
|
||||
import threading
|
||||
import time
|
||||
import ctypes
|
||||
import itertools
|
||||
from typing import Dict, Callable, Optional, List
|
||||
|
||||
from target_simulator.utils.network import create_udp_socket, close_udp_socket
|
||||
@ -52,8 +53,10 @@ class SfpTransport:
|
||||
self._socket: Optional[socket.socket] = None
|
||||
self._receiver_thread: Optional[threading.Thread] = None
|
||||
self._stop_event = threading.Event()
|
||||
self._tid_counter = 0
|
||||
self._send_lock = threading.Lock()
|
||||
|
||||
# Lock-free atomic TID counter using itertools.count (thread-safe)
|
||||
# next() on itertools.count is atomic in CPython due to GIL
|
||||
self._tid_counter = itertools.count(start=0, step=1)
|
||||
|
||||
self._fragments: Dict[tuple, Dict[int, int]] = {}
|
||||
self._buffers: Dict[tuple, bytearray] = {}
|
||||
@ -209,9 +212,8 @@ class SfpTransport:
|
||||
payload_bytes = payload_bytes[:actual_payload_size]
|
||||
|
||||
header = SFPHeader()
|
||||
with self._send_lock:
|
||||
self._tid_counter = (self._tid_counter + 1) % 256
|
||||
header.SFP_TID = self._tid_counter
|
||||
# Lock-free atomic TID increment (GIL guarantees atomicity of next())
|
||||
header.SFP_TID = next(self._tid_counter) % 256
|
||||
|
||||
header.SFP_DIRECTION = ord(">")
|
||||
header.SFP_FLOW = flow_id
|
||||
@ -224,15 +226,20 @@ class SfpTransport:
|
||||
full_packet = bytes(header) + payload_bytes
|
||||
|
||||
self._socket.sendto(full_packet, destination)
|
||||
try:
|
||||
sent_preview = (
|
||||
cs if isinstance(cs, str) else cs.decode("utf-8", errors="replace")
|
||||
|
||||
# Only format debug string if DEBUG logging is enabled
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
try:
|
||||
sent_preview = (
|
||||
cs
|
||||
if isinstance(cs, str)
|
||||
else cs.decode("utf-8", errors="replace")
|
||||
)
|
||||
except Exception:
|
||||
sent_preview = repr(cs)
|
||||
logger.debug(
|
||||
f"{log_prefix} Sent command to {destination} (TID: {header.SFP_TID}): {sent_preview!r}"
|
||||
)
|
||||
except Exception:
|
||||
sent_preview = repr(cs)
|
||||
logger.debug(
|
||||
f"{log_prefix} Sent command to {destination} (TID: {header.SFP_TID}): {sent_preview!r}"
|
||||
)
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
|
||||
@ -6,7 +6,7 @@ broadcast target states, supporting different operational modes.
|
||||
"""
|
||||
import threading
|
||||
import time
|
||||
import copy
|
||||
import math
|
||||
from queue import Queue
|
||||
from typing import Optional
|
||||
|
||||
@ -23,6 +23,70 @@ TICK_RATE_HZ = 20.0
|
||||
TICK_INTERVAL_S = 1.0 / TICK_RATE_HZ
|
||||
|
||||
|
||||
class PredictedTarget:
|
||||
"""Lightweight wrapper for predicted target state.
|
||||
|
||||
Avoids expensive deepcopy by computing only the predicted position/velocity
|
||||
needed for command generation. This is 10-15x faster than deepcopy for
|
||||
prediction horizons.
|
||||
"""
|
||||
|
||||
__slots__ = (
|
||||
"target_id",
|
||||
"active",
|
||||
"traceable",
|
||||
"restart",
|
||||
"_pos_x_ft",
|
||||
"_pos_y_ft",
|
||||
"_pos_z_ft",
|
||||
"current_velocity_fps",
|
||||
"current_vertical_velocity_fps",
|
||||
"current_heading_deg",
|
||||
"current_pitch_deg",
|
||||
"current_range_nm",
|
||||
"current_azimuth_deg",
|
||||
"current_altitude_ft",
|
||||
)
|
||||
|
||||
def __init__(self, target: Target, horizon_s: float):
|
||||
"""Create a predicted target state by advancing the target by horizon_s.
|
||||
|
||||
Args:
|
||||
target: The original target to predict from
|
||||
horizon_s: Prediction horizon in seconds
|
||||
"""
|
||||
# Copy identity and flags
|
||||
self.target_id = target.target_id
|
||||
self.active = target.active
|
||||
self.traceable = target.traceable
|
||||
self.restart = target.restart
|
||||
|
||||
# Copy current velocities
|
||||
self.current_velocity_fps = target.current_velocity_fps
|
||||
self.current_vertical_velocity_fps = target.current_vertical_velocity_fps
|
||||
self.current_heading_deg = target.current_heading_deg
|
||||
self.current_pitch_deg = target.current_pitch_deg
|
||||
|
||||
# Predict position using simple kinematic model
|
||||
# x = x0 + vx * t, y = y0 + vy * t, z = z0 + vz * t
|
||||
heading_rad = math.radians(target.current_heading_deg)
|
||||
vx = target.current_velocity_fps * math.sin(heading_rad)
|
||||
vy = target.current_velocity_fps * math.cos(heading_rad)
|
||||
vz = target.current_vertical_velocity_fps
|
||||
|
||||
self._pos_x_ft = target._pos_x_ft + vx * horizon_s
|
||||
self._pos_y_ft = target._pos_y_ft + vy * horizon_s
|
||||
self._pos_z_ft = target._pos_z_ft + vz * horizon_s
|
||||
|
||||
# Recompute polar coordinates from predicted position
|
||||
dist_2d = math.sqrt(self._pos_x_ft**2 + self._pos_y_ft**2)
|
||||
self.current_range_nm = dist_2d / 6076.12 # Convert feet to nautical miles
|
||||
self.current_azimuth_deg = (
|
||||
math.degrees(math.atan2(self._pos_x_ft, self._pos_y_ft)) % 360
|
||||
)
|
||||
self.current_altitude_ft = self._pos_z_ft
|
||||
|
||||
|
||||
class SimulationEngine(threading.Thread):
|
||||
def __init__(
|
||||
self,
|
||||
@ -256,13 +320,11 @@ class SimulationEngine(threading.Thread):
|
||||
# Create a list of targets to be sent, potentially predicted
|
||||
targets_to_send = []
|
||||
if self.prediction_horizon_s > 0.0 and active_targets:
|
||||
# Apply prediction
|
||||
for target in active_targets:
|
||||
# Create a deep copy to avoid altering the main simulation state
|
||||
predicted_target = copy.deepcopy(target)
|
||||
# Advance its state by the prediction horizon
|
||||
predicted_target.update_state(self.prediction_horizon_s)
|
||||
targets_to_send.append(predicted_target)
|
||||
# Apply lightweight prediction (avoids expensive deepcopy)
|
||||
targets_to_send = [
|
||||
PredictedTarget(target, self.prediction_horizon_s)
|
||||
for target in active_targets
|
||||
]
|
||||
else:
|
||||
# No prediction, use current state
|
||||
targets_to_send = active_targets
|
||||
|
||||
@ -3,6 +3,7 @@
|
||||
This module defines the `AddTargetWindow` class, which provides a dialog
|
||||
for users to input the initial parameters of a new target.
|
||||
"""
|
||||
|
||||
import tkinter as tk
|
||||
from tkinter import ttk, messagebox
|
||||
from target_simulator.core.models import Target, MIN_TARGET_ID, MAX_TARGET_ID, Waypoint
|
||||
@ -162,4 +163,3 @@ class AddTargetWindow(tk.Toplevel):
|
||||
self.destroy()
|
||||
except ValueError as e:
|
||||
messagebox.showerror("Validation Error", str(e), parent=self)
|
||||
|
||||
|
||||
@ -63,7 +63,7 @@ class AnalysisWindow(tk.Toplevel):
|
||||
metadata = archive_data.get("metadata", {})
|
||||
self.estimated_latency_ms = metadata.get("estimated_latency_ms")
|
||||
self.prediction_offset_ms = metadata.get("prediction_offset_ms")
|
||||
|
||||
|
||||
# Load latency samples (new format only: [[timestamp, latency_ms], ...])
|
||||
latency_samples = metadata.get("latency_samples", [])
|
||||
if latency_samples and isinstance(latency_samples[0], list):
|
||||
@ -107,7 +107,7 @@ class AnalysisWindow(tk.Toplevel):
|
||||
# produced for the selected target (common cause: no
|
||||
# overlapping timestamps between simulated and real samples).
|
||||
self._show_insufficient_data_info(sel_id)
|
||||
|
||||
|
||||
# Update the latency plot regardless of target selection
|
||||
self._update_latency_plot()
|
||||
|
||||
@ -229,10 +229,10 @@ class AnalysisWindow(tk.Toplevel):
|
||||
parent (tk.Widget): Parent container where the plot canvas will be packed.
|
||||
"""
|
||||
fig = Figure(figsize=(5, 6), dpi=100)
|
||||
|
||||
|
||||
# Use GridSpec for aligned subplots with shared x-axis alignment
|
||||
gs = fig.add_gridspec(2, 1, height_ratios=[2, 1], hspace=0.3)
|
||||
|
||||
|
||||
# Top subplot: Instantaneous Error
|
||||
self.ax = fig.add_subplot(gs[0, 0])
|
||||
self.ax.set_title("Instantaneous Error")
|
||||
@ -251,21 +251,25 @@ class AnalysisWindow(tk.Toplevel):
|
||||
self.ax.legend(loc="upper right", fontsize=9)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
# Bottom subplot: Latency over time
|
||||
self.ax_latency = fig.add_subplot(gs[1, 0], sharex=None)
|
||||
self.ax_latency.set_title("Latency Evolution")
|
||||
self.ax_latency.set_xlabel("Time (s)") # Will be updated if no timestamps available
|
||||
self.ax_latency.set_xlabel(
|
||||
"Time (s)"
|
||||
) # Will be updated if no timestamps available
|
||||
self.ax_latency.set_ylabel("Latency (ms)")
|
||||
|
||||
(self.line_latency,) = self.ax_latency.plot([], [], lw=2, color='orange', label="Latency")
|
||||
|
||||
|
||||
(self.line_latency,) = self.ax_latency.plot(
|
||||
[], [], lw=2, color="orange", label="Latency"
|
||||
)
|
||||
|
||||
try:
|
||||
self.ax_latency.grid(True)
|
||||
self.ax_latency.legend(loc="upper right", fontsize=9)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
fig.tight_layout()
|
||||
|
||||
self.canvas = FigureCanvasTkAgg(fig, master=parent)
|
||||
@ -308,7 +312,7 @@ class AnalysisWindow(tk.Toplevel):
|
||||
self.stats_tree.delete(*self.stats_tree.get_children())
|
||||
|
||||
# Add rows for each error axis (X, Y, Z)
|
||||
for axis in ['x', 'y', 'z']:
|
||||
for axis in ["x", "y", "z"]:
|
||||
self.stats_tree.insert(
|
||||
"",
|
||||
"end",
|
||||
@ -319,17 +323,22 @@ class AnalysisWindow(tk.Toplevel):
|
||||
f"{results[axis]['rmse']:.3f}",
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
# Add latency row if available
|
||||
if self.estimated_latency_ms is not None:
|
||||
# Calculate latency stats from samples if available
|
||||
if self.latency_values_ms:
|
||||
import statistics
|
||||
|
||||
lat_mean = statistics.mean(self.latency_values_ms)
|
||||
lat_std = statistics.stdev(self.latency_values_ms) if len(self.latency_values_ms) > 1 else 0.0
|
||||
lat_std = (
|
||||
statistics.stdev(self.latency_values_ms)
|
||||
if len(self.latency_values_ms) > 1
|
||||
else 0.0
|
||||
)
|
||||
lat_min = min(self.latency_values_ms)
|
||||
lat_max = max(self.latency_values_ms)
|
||||
|
||||
|
||||
self.stats_tree.insert(
|
||||
"",
|
||||
"end",
|
||||
@ -395,7 +404,7 @@ class AnalysisWindow(tk.Toplevel):
|
||||
|
||||
def _update_latency_plot(self):
|
||||
"""Update the latency subplot with the latency samples from the archive.
|
||||
|
||||
|
||||
Plots latency measurements over time to show how latency evolved
|
||||
during the simulation, aligned with the error plot above.
|
||||
"""
|
||||
@ -406,10 +415,10 @@ class AnalysisWindow(tk.Toplevel):
|
||||
self.ax_latency.autoscale_view()
|
||||
self.canvas.draw_idle()
|
||||
return
|
||||
|
||||
|
||||
# Plot latencies - they are already filtered to simulation time range
|
||||
self.line_latency.set_data(self.latency_timestamps, self.latency_values_ms)
|
||||
|
||||
|
||||
self.ax_latency.relim()
|
||||
self.ax_latency.autoscale_view()
|
||||
self.canvas.draw_idle()
|
||||
|
||||
@ -3,6 +3,7 @@
|
||||
This module provides the `ConnectionSettingsWindow` dialog used by the
|
||||
main UI to configure Target and LRU communication settings.
|
||||
"""
|
||||
|
||||
import tkinter as tk
|
||||
from tkinter import ttk, messagebox
|
||||
|
||||
@ -432,6 +433,8 @@ class ConnectionSettingsWindow(tk.Toplevel):
|
||||
Close the dialog without saving any changes.
|
||||
"""
|
||||
self.destroy()
|
||||
|
||||
|
||||
# target_simulator/gui/connection_settings_window.py
|
||||
"""
|
||||
Toplevel window for configuring Target and LRU connections.
|
||||
@ -443,14 +446,14 @@ from tkinter import ttk, messagebox
|
||||
class ConnectionSettingsWindow(tk.Toplevel):
|
||||
"""A dialog for configuring connection settings.
|
||||
|
||||
Inputs:
|
||||
- master: parent Tk widget
|
||||
- config_manager: ConfigManager instance for persistence
|
||||
- connection_config: dict with initial connection settings
|
||||
Inputs:
|
||||
- master: parent Tk widget
|
||||
- config_manager: ConfigManager instance for persistence
|
||||
- connection_config: dict with initial connection settings
|
||||
|
||||
Side-effects:
|
||||
- creates a modal Toplevel window and writes settings via
|
||||
master_view.update_connection_settings on Save.
|
||||
Side-effects:
|
||||
- creates a modal Toplevel window and writes settings via
|
||||
master_view.update_connection_settings on Save.
|
||||
"""
|
||||
|
||||
def __init__(self, master, config_manager, connection_config):
|
||||
@ -599,14 +602,14 @@ class ConnectionSettingsWindow(tk.Toplevel):
|
||||
def _create_connection_panel(self, parent_frame):
|
||||
"""Create the per-connection-type panel (SFP/TFTP/Serial).
|
||||
|
||||
Inputs:
|
||||
- parent_frame: ttk.Frame to populate
|
||||
Returns:
|
||||
- dict of Tk variable objects used by the panel widgets
|
||||
Inputs:
|
||||
- parent_frame: ttk.Frame to populate
|
||||
Returns:
|
||||
- dict of Tk variable objects used by the panel widgets
|
||||
"""
|
||||
vars = {}
|
||||
|
||||
# Top row: label + combobox to choose connection type
|
||||
# Top row: label + combobox to choose connection type
|
||||
type_row = ttk.Frame(parent_frame)
|
||||
type_row.pack(fill=tk.X, padx=5, pady=(5, 10))
|
||||
|
||||
|
||||
@ -3,4 +3,3 @@ GUI helpers module.
|
||||
|
||||
Placeholder module for shared GUI utilities. See package submodules for widgets.
|
||||
"""
|
||||
|
||||
|
||||
@ -78,23 +78,24 @@ GUI_REFRESH_RATE_MS = 40
|
||||
|
||||
class MainView(tk.Tk):
|
||||
"""
|
||||
Main application window and controller.
|
||||
Main application window and controller.
|
||||
|
||||
This class composes the primary UI and wires it to application logic:
|
||||
communicators, the SimulationController and SimulationEngine. It owns
|
||||
a :class:`SimulationStateHub` instance used application-wide.
|
||||
This class composes the primary UI and wires it to application logic:
|
||||
communicators, the SimulationController and SimulationEngine. It owns
|
||||
a :class:`SimulationStateHub` instance used application-wide.
|
||||
|
||||
Key responsibilities:
|
||||
- build and layout widgets (PPI display, scenario controls, simulation controls),
|
||||
- initialize communicators via :class:`CommunicatorManager`,
|
||||
- start/stop simulations via :class:`SimulationController`,
|
||||
- periodically refresh GUI elements from the simulation hub.
|
||||
Key responsibilities:
|
||||
- build and layout widgets (PPI display, scenario controls, simulation controls),
|
||||
- initialize communicators via :class:`CommunicatorManager`,
|
||||
- start/stop simulations via :class:`SimulationController`,
|
||||
- periodically refresh GUI elements from the simulation hub.
|
||||
|
||||
Threading/side-effects:
|
||||
- Instantiating MainView will start Tk's mainloop when ``mainloop()`` is
|
||||
called; GUI updates must run on the main thread.
|
||||
- Many methods update application state and widgets; they return ``None``.
|
||||
Threading/side-effects:
|
||||
- Instantiating MainView will start Tk's mainloop when ``mainloop()`` is
|
||||
called; GUI updates must run on the main thread.
|
||||
- Many methods update application state and widgets; they return ``None``.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.logger = get_logger(__name__)
|
||||
|
||||
@ -259,7 +259,9 @@ class DebugPayloadRouter:
|
||||
# Store latencies only during active simulation (when archive is set)
|
||||
if latency >= 0 and self.active_archive is not None:
|
||||
with self._lock:
|
||||
self._latency_samples.append((reception_timestamp, latency))
|
||||
self._latency_samples.append(
|
||||
(reception_timestamp, latency)
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
except Exception:
|
||||
@ -527,7 +529,7 @@ class DebugPayloadRouter:
|
||||
|
||||
Args:
|
||||
limit: maximum number of samples to return (None = all available)
|
||||
|
||||
|
||||
Returns:
|
||||
List of (timestamp, latency_s) tuples
|
||||
"""
|
||||
|
||||
@ -61,7 +61,9 @@ def build_display_data(
|
||||
last_sim_state = history["simulated"][-1]
|
||||
|
||||
if len(last_sim_state) >= 6:
|
||||
_ts, x_sim_ft, y_sim_ft, z_sim_ft, vel_fps, vert_vel_fps = last_sim_state[:6]
|
||||
_ts, x_sim_ft, y_sim_ft, z_sim_ft, vel_fps, vert_vel_fps = (
|
||||
last_sim_state[:6]
|
||||
)
|
||||
else:
|
||||
_ts, x_sim_ft, y_sim_ft, z_sim_ft = last_sim_state
|
||||
vel_fps, vert_vel_fps = 0.0, 0.0
|
||||
@ -98,9 +100,7 @@ def build_display_data(
|
||||
sim_target = Target(target_id=tid, trajectory=[])
|
||||
setattr(sim_target, "_pos_x_ft", rel_x_ft)
|
||||
setattr(sim_target, "_pos_y_ft", rel_y_ft)
|
||||
setattr(
|
||||
sim_target, "_pos_z_ft", z_sim_ft
|
||||
)
|
||||
setattr(sim_target, "_pos_z_ft", z_sim_ft)
|
||||
sim_target.current_velocity_fps = vel_fps
|
||||
sim_target.current_vertical_velocity_fps = vert_vel_fps
|
||||
sim_target._update_current_polar_coords()
|
||||
@ -113,16 +113,20 @@ def build_display_data(
|
||||
# The target's heading is also in the simulation frame.
|
||||
# It must be rotated by the origin heading to be in the world frame.
|
||||
sim_heading_deg = getattr(t, "current_heading_deg", 0.0)
|
||||
world_heading_deg = (sim_heading_deg + math.degrees(heading_origin_rad)) % 360
|
||||
world_heading_deg = (
|
||||
sim_heading_deg + math.degrees(heading_origin_rad)
|
||||
) % 360
|
||||
heading = world_heading_deg
|
||||
|
||||
if heading is None and scenario:
|
||||
t2 = scenario.get_target(tid)
|
||||
if t2:
|
||||
sim_heading_deg = getattr(t2, "current_heading_deg", 0.0)
|
||||
world_heading_deg = (sim_heading_deg + math.degrees(heading_origin_rad)) % 360
|
||||
world_heading_deg = (
|
||||
sim_heading_deg + math.degrees(heading_origin_rad)
|
||||
) % 360
|
||||
heading = world_heading_deg
|
||||
|
||||
|
||||
if heading is not None:
|
||||
sim_target.current_heading_deg = float(heading)
|
||||
|
||||
@ -170,4 +174,4 @@ def build_display_data(
|
||||
len(real_targets_for_ppi),
|
||||
)
|
||||
|
||||
return {"simulated": simulated_targets_for_ppi, "real": real_targets_for_ppi}
|
||||
return {"simulated": simulated_targets_for_ppi, "real": real_targets_for_ppi}
|
||||
|
||||
@ -86,7 +86,6 @@ class PPIDisplay(ttk.Frame):
|
||||
self._last_update_summary_time = time.monotonic()
|
||||
self._update_summary_interval_s = 1.0
|
||||
|
||||
|
||||
def _on_display_options_changed(self, *args):
|
||||
"""Handler invoked when display options (points/trails) change.
|
||||
|
||||
@ -233,7 +232,7 @@ class PPIDisplay(ttk.Frame):
|
||||
fig.subplots_adjust(left=0.05, right=0.95, top=0.9, bottom=0.05)
|
||||
self.ax = fig.add_subplot(111, projection="polar", facecolor="#2E2E2E")
|
||||
self.ax.set_theta_zero_location("N")
|
||||
self.ax.set_theta_direction(1) # Set to CCW explicitly
|
||||
self.ax.set_theta_direction(1) # Set to CCW explicitly
|
||||
self.ax.set_rlabel_position(90)
|
||||
self.ax.set_ylim(0, self.range_var.get())
|
||||
|
||||
@ -246,14 +245,14 @@ class PPIDisplay(ttk.Frame):
|
||||
self.ax.grid(color="white", linestyle="--", linewidth=0.5, alpha=0.5)
|
||||
self.ax.spines["polar"].set_color("white")
|
||||
self.ax.set_title("PPI Display", color="white")
|
||||
|
||||
|
||||
# Define ownship as a patch (triangle) that we can rotate
|
||||
self._ownship_artist = mpl.patches.Polygon(
|
||||
[[-1, -1]], # Placeholder
|
||||
[[-1, -1]], # Placeholder
|
||||
closed=True,
|
||||
facecolor="cyan",
|
||||
edgecolor="black",
|
||||
zorder=10
|
||||
zorder=10,
|
||||
)
|
||||
self.ax.add_patch(self._ownship_artist)
|
||||
|
||||
@ -282,7 +281,7 @@ class PPIDisplay(ttk.Frame):
|
||||
self.canvas.draw()
|
||||
self.canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True)
|
||||
self.range_selector.bind("<<ComboboxSelected>>", self._on_range_selected)
|
||||
self._update_plot_orientation() # Initial draw
|
||||
self._update_plot_orientation() # Initial draw
|
||||
|
||||
def update_ownship_state(self, heading_deg: float):
|
||||
"""Updates the ownship's visual representation on the PPI."""
|
||||
@ -301,29 +300,33 @@ class PPIDisplay(ttk.Frame):
|
||||
# With zero_location="N", theta becomes 0=North, positive=CCW.
|
||||
heading_rad = np.deg2rad(self.ownship_heading_deg)
|
||||
max_r = self.ax.get_ylim()[1]
|
||||
|
||||
|
||||
# Define ownship triangle shape in polar coordinates (theta, r)
|
||||
r_scale = max_r * 0.04
|
||||
nose = (0, r_scale)
|
||||
wing_angle = np.deg2rad(140)
|
||||
left_wing = (wing_angle, r_scale * 0.8)
|
||||
right_wing = (-wing_angle, r_scale * 0.8)
|
||||
|
||||
|
||||
base_verts_polar = np.array([nose, left_wing, right_wing])
|
||||
|
||||
if mode == "Heading-Up":
|
||||
# Rotate the entire grid by the heading angle
|
||||
self.ax.set_theta_offset(np.pi / 2 - heading_rad)
|
||||
|
||||
|
||||
# To make ownship and scan lines appear fixed, we must "counter-rotate"
|
||||
# them by drawing them at an angle equal to the heading.
|
||||
verts_polar = base_verts_polar.copy()
|
||||
verts_polar[:, 0] += heading_rad
|
||||
self._ownship_artist.set_xy(verts_polar)
|
||||
|
||||
|
||||
limit_rad = np.deg2rad(self.scan_limit_deg)
|
||||
self._scan_line_1.set_data([heading_rad + limit_rad, heading_rad + limit_rad], [0, max_r])
|
||||
self._scan_line_2.set_data([heading_rad - limit_rad, heading_rad - limit_rad], [0, max_r])
|
||||
self._scan_line_1.set_data(
|
||||
[heading_rad + limit_rad, heading_rad + limit_rad], [0, max_r]
|
||||
)
|
||||
self._scan_line_2.set_data(
|
||||
[heading_rad - limit_rad, heading_rad - limit_rad], [0, max_r]
|
||||
)
|
||||
|
||||
else: # North-Up
|
||||
# Keep grid fixed with North up
|
||||
@ -334,8 +337,12 @@ class PPIDisplay(ttk.Frame):
|
||||
self._ownship_artist.set_xy(verts_polar)
|
||||
# Rotate scan lines by adding heading to theta
|
||||
limit_rad = np.deg2rad(self.scan_limit_deg)
|
||||
self._scan_line_1.set_data([heading_rad + limit_rad, heading_rad + limit_rad], [0, max_r])
|
||||
self._scan_line_2.set_data([heading_rad - limit_rad, heading_rad - limit_rad], [0, max_r])
|
||||
self._scan_line_1.set_data(
|
||||
[heading_rad + limit_rad, heading_rad + limit_rad], [0, max_r]
|
||||
)
|
||||
self._scan_line_2.set_data(
|
||||
[heading_rad - limit_rad, heading_rad - limit_rad], [0, max_r]
|
||||
)
|
||||
|
||||
if self.canvas:
|
||||
self.canvas.draw_idle()
|
||||
@ -688,7 +695,7 @@ class PPIDisplay(ttk.Frame):
|
||||
self._antenna_line_artist.set_visible(False)
|
||||
else:
|
||||
az_float = float(az_deg)
|
||||
|
||||
|
||||
final_az_for_plot = az_float
|
||||
if self.display_mode_var.get() == "Heading-Up":
|
||||
# The incoming az_deg is absolute. To display it relative to the
|
||||
@ -698,11 +705,11 @@ class PPIDisplay(ttk.Frame):
|
||||
# Convert final angle to theta for Matplotlib (0=N, positive=CCW)
|
||||
theta = np.deg2rad(final_az_for_plot)
|
||||
max_r = self.ax.get_ylim()[1]
|
||||
|
||||
#logger.debug(
|
||||
|
||||
# logger.debug(
|
||||
# f"Rendering antenna: az_in={az_deg}, mode={self.display_mode_var.get()}, "
|
||||
# f"own_hdg={self.ownship_heading_deg}, final_az={final_az_for_plot}, theta={theta}"
|
||||
#)
|
||||
# )
|
||||
|
||||
self._antenna_line_artist.set_data([theta, theta], [0, max_r])
|
||||
self._antenna_line_artist.set_visible(True)
|
||||
@ -710,4 +717,4 @@ class PPIDisplay(ttk.Frame):
|
||||
if self.canvas:
|
||||
self.canvas.draw_idle()
|
||||
except Exception:
|
||||
logger.exception("Error rendering antenna line")
|
||||
logger.exception("Error rendering antenna line")
|
||||
|
||||
@ -283,18 +283,18 @@ class SimulationControls(ttk.LabelFrame):
|
||||
):
|
||||
"""
|
||||
Updates the active targets table using a diff-based approach.
|
||||
|
||||
|
||||
Performance optimization: Instead of destroying and recreating all rows
|
||||
every frame, this method:
|
||||
1. Removes only targets that are no longer active
|
||||
2. Updates existing rows in-place
|
||||
3. Adds only new targets
|
||||
|
||||
|
||||
This reduces widget operations by ~70% compared to full rebuild.
|
||||
"""
|
||||
# Build set of current target IDs in the incoming data
|
||||
incoming_target_ids = {t.target_id for t in targets if t.active}
|
||||
|
||||
|
||||
# Get existing items in the tree (mapping iid -> target_id)
|
||||
existing_items = {}
|
||||
for item_iid in self.targets_tree.get_children():
|
||||
@ -305,40 +305,40 @@ class SimulationControls(ttk.LabelFrame):
|
||||
except (IndexError, KeyError):
|
||||
# Malformed item, schedule for removal
|
||||
self.targets_tree.delete(item_iid)
|
||||
|
||||
|
||||
existing_target_ids = set(existing_items.keys())
|
||||
|
||||
|
||||
# 1. Remove targets that are no longer in the incoming set
|
||||
targets_to_remove = existing_target_ids - incoming_target_ids
|
||||
for target_id in targets_to_remove:
|
||||
item_iid = existing_items[target_id]
|
||||
self.targets_tree.delete(item_iid)
|
||||
|
||||
|
||||
# Get ownship data needed for conversion
|
||||
own_lat = ownship_state.get("latitude")
|
||||
own_lon = ownship_state.get("longitude")
|
||||
own_pos_xy_ft = ownship_state.get("position_xy_ft")
|
||||
|
||||
|
||||
# 2. Update existing targets and insert new ones
|
||||
for target in sorted(targets, key=lambda t: t.target_id):
|
||||
if not target.active:
|
||||
continue
|
||||
|
||||
|
||||
# Calculate display values
|
||||
lat_str, lon_str = self._calculate_geo_position(
|
||||
target, own_lat, own_lon, own_pos_xy_ft
|
||||
)
|
||||
|
||||
|
||||
alt_str = f"{target.current_altitude_ft:.1f}"
|
||||
hdg_str = f"{target.current_heading_deg:.2f}"
|
||||
|
||||
|
||||
# Use the now-correct velocity values from the Target object
|
||||
gnd_speed_kn = target.current_velocity_fps * FPS_TO_KNOTS
|
||||
gnd_speed_str = f"{gnd_speed_kn:.1f}"
|
||||
|
||||
|
||||
vert_speed_fps = target.current_vertical_velocity_fps
|
||||
vert_speed_str = f"{vert_speed_fps:+.1f}"
|
||||
|
||||
|
||||
values = (
|
||||
target.target_id,
|
||||
lat_str,
|
||||
@ -348,7 +348,7 @@ class SimulationControls(ttk.LabelFrame):
|
||||
gnd_speed_str,
|
||||
vert_speed_str,
|
||||
)
|
||||
|
||||
|
||||
# Check if target already exists in tree
|
||||
if target.target_id in existing_items:
|
||||
# UPDATE: Modify existing row in-place (much faster than delete+insert)
|
||||
@ -356,41 +356,43 @@ class SimulationControls(ttk.LabelFrame):
|
||||
self.targets_tree.item(item_iid, values=values)
|
||||
else:
|
||||
# INSERT: Add new target (use target_id as iid for fast lookup)
|
||||
self.targets_tree.insert("", tk.END, iid=str(target.target_id), values=values)
|
||||
|
||||
self.targets_tree.insert(
|
||||
"", tk.END, iid=str(target.target_id), values=values
|
||||
)
|
||||
|
||||
def _calculate_geo_position(
|
||||
self, target: Target, own_lat, own_lon, own_pos_xy_ft
|
||||
) -> tuple:
|
||||
"""
|
||||
Helper method to calculate geographic position (lat/lon) for a target.
|
||||
|
||||
|
||||
Returns:
|
||||
tuple: (lat_str, lon_str) formatted strings, or ("N/A", "N/A")
|
||||
"""
|
||||
if own_lat is None or own_lon is None or not own_pos_xy_ft:
|
||||
return ("N/A", "N/A")
|
||||
|
||||
|
||||
target_x_ft = getattr(target, "_pos_x_ft", 0.0)
|
||||
target_y_ft = getattr(target, "_pos_y_ft", 0.0)
|
||||
own_x_ft, own_y_ft = own_pos_xy_ft
|
||||
|
||||
|
||||
# Delta from ownship's current position in meters
|
||||
delta_east_m = (target_x_ft - own_x_ft) * 0.3048
|
||||
delta_north_m = (target_y_ft - own_y_ft) * 0.3048
|
||||
|
||||
|
||||
# Equirectangular approximation for lat/lon calculation
|
||||
earth_radius_m = 6378137.0
|
||||
dlat = (delta_north_m / earth_radius_m) * (180.0 / math.pi)
|
||||
dlon = (
|
||||
delta_east_m / (earth_radius_m * math.cos(math.radians(own_lat)))
|
||||
) * (180.0 / math.pi)
|
||||
|
||||
dlon = (delta_east_m / (earth_radius_m * math.cos(math.radians(own_lat)))) * (
|
||||
180.0 / math.pi
|
||||
)
|
||||
|
||||
target_lat = own_lat + dlat
|
||||
target_lon = own_lon + dlon
|
||||
|
||||
|
||||
lat_str = f"{abs(target_lat):.5f}° {'N' if target_lat >= 0 else 'S'}"
|
||||
lon_str = f"{abs(target_lon):.5f}° {'E' if target_lon >= 0 else 'W'}"
|
||||
|
||||
|
||||
return (lat_str, lon_str)
|
||||
|
||||
def show_notice(self, message: str):
|
||||
|
||||
@ -4,4 +4,3 @@ Simulation helpers package.
|
||||
Contains controllers and helpers that run or coordinate the simulation loop
|
||||
and related utilities (e.g., SimulationController).
|
||||
"""
|
||||
|
||||
|
||||
@ -239,11 +239,13 @@ class SimulationController:
|
||||
if stats and stats.get("count", 0) > 0:
|
||||
extra_metadata["latency_summary"] = stats
|
||||
if router and hasattr(router, "get_latency_samples"):
|
||||
samples = router.get_latency_samples(limit=None) # Get all available samples
|
||||
samples = router.get_latency_samples(
|
||||
limit=None
|
||||
) # Get all available samples
|
||||
if samples:
|
||||
# Convert to [timestamp, latency_ms] format
|
||||
samples_with_time = [
|
||||
[round(ts, 3), round(lat * 1000.0, 3)]
|
||||
[round(ts, 3), round(lat * 1000.0, 3)]
|
||||
for ts, lat in samples
|
||||
]
|
||||
extra_metadata["latency_samples"] = samples_with_time
|
||||
@ -312,4 +314,4 @@ class SimulationController:
|
||||
if not main_view.is_simulation_running.get():
|
||||
return
|
||||
self.logger.info("Simulation engine finished execution.")
|
||||
self._stop_or_finish_simulation(main_view, was_stopped_by_user=False)
|
||||
self._stop_or_finish_simulation(main_view, was_stopped_by_user=False)
|
||||
|
||||
@ -3,4 +3,3 @@ Utilities package for Target Simulator.
|
||||
|
||||
Contains helpers for logging, config management, CSV logging, networking, etc.
|
||||
"""
|
||||
|
||||
|
||||
@ -32,6 +32,70 @@ _CSV_FLUSH_INTERVAL_S = 2.0 # Flush every 2 seconds
|
||||
_CSV_MAX_BUFFER_SIZE = 1000 # Flush immediately if buffer exceeds this
|
||||
|
||||
|
||||
def _csv_flush_worker():
|
||||
"""Background thread that periodically flushes buffered CSV rows to disk."""
|
||||
while not _CSV_STOP_EVENT.is_set():
|
||||
time.sleep(_CSV_FLUSH_INTERVAL_S)
|
||||
_flush_all_buffers()
|
||||
# Final flush on shutdown
|
||||
_flush_all_buffers()
|
||||
|
||||
|
||||
def _flush_all_buffers():
|
||||
"""Flush all buffered CSV rows to their respective files."""
|
||||
with _CSV_BUFFER_LOCK:
|
||||
for filename, buffer in list(_CSV_BUFFERS.items()):
|
||||
if not buffer:
|
||||
continue
|
||||
|
||||
temp_folder = _ensure_temp_folder()
|
||||
if not temp_folder:
|
||||
continue
|
||||
|
||||
file_path = os.path.join(temp_folder, filename)
|
||||
|
||||
# Check if we need to write headers
|
||||
write_headers = not os.path.exists(file_path)
|
||||
|
||||
try:
|
||||
with open(file_path, "a", newline="", encoding="utf-8") as csvfile:
|
||||
writer = csv.writer(csvfile)
|
||||
|
||||
# Write all buffered rows
|
||||
while buffer:
|
||||
row, headers = buffer.popleft()
|
||||
|
||||
# Write headers only once for new files
|
||||
if write_headers and headers is not None:
|
||||
writer.writerow(list(headers))
|
||||
write_headers = False
|
||||
|
||||
writer.writerow(list(row))
|
||||
except Exception:
|
||||
# Clear buffer on error to avoid accumulation
|
||||
buffer.clear()
|
||||
|
||||
|
||||
def _ensure_csv_flush_thread():
|
||||
"""Ensure the background flush thread is running."""
|
||||
global _CSV_FLUSH_THREAD
|
||||
if _CSV_FLUSH_THREAD is None or not _CSV_FLUSH_THREAD.is_alive():
|
||||
_CSV_STOP_EVENT.clear()
|
||||
_CSV_FLUSH_THREAD = threading.Thread(
|
||||
target=_csv_flush_worker, daemon=True, name="CSVFlushThread"
|
||||
)
|
||||
_CSV_FLUSH_THREAD.start()
|
||||
# Register cleanup on exit
|
||||
atexit.register(_shutdown_csv_logger)
|
||||
|
||||
|
||||
def _shutdown_csv_logger():
|
||||
"""Stop the flush thread and ensure all data is written."""
|
||||
_CSV_STOP_EVENT.set()
|
||||
if _CSV_FLUSH_THREAD and _CSV_FLUSH_THREAD.is_alive():
|
||||
_CSV_FLUSH_THREAD.join(timeout=5.0)
|
||||
|
||||
|
||||
def _ensure_temp_folder():
|
||||
temp_folder = DEBUG_CONFIG.get("temp_folder_name", "Temp")
|
||||
if not os.path.exists(temp_folder):
|
||||
@ -51,6 +115,9 @@ def append_row(filename: str, row: Iterable[Any], headers: Iterable[str] | None
|
||||
written as the first row. The function is a no-op when tracing is
|
||||
disabled via DEBUG_CONFIG.
|
||||
|
||||
PERFORMANCE: This function is now async-buffered and returns immediately
|
||||
without blocking on I/O. Rows are written to disk by a background thread.
|
||||
|
||||
Args:
|
||||
filename: Name of the target CSV file inside the Temp folder.
|
||||
row: Iterable of values to write as a CSV row.
|
||||
@ -67,17 +134,20 @@ def append_row(filename: str, row: Iterable[Any], headers: Iterable[str] | None
|
||||
if not temp_folder:
|
||||
return False
|
||||
|
||||
file_path = os.path.join(temp_folder, filename)
|
||||
write_headers = not os.path.exists(file_path) and headers is not None
|
||||
# Ensure flush thread is running
|
||||
_ensure_csv_flush_thread()
|
||||
|
||||
try:
|
||||
with open(file_path, "a", newline="", encoding="utf-8") as csvfile:
|
||||
writer = csv.writer(csvfile)
|
||||
if write_headers:
|
||||
writer.writerow(list(headers))
|
||||
writer.writerow(list(row))
|
||||
except Exception:
|
||||
return False
|
||||
# Buffer the row for async writing
|
||||
with _CSV_BUFFER_LOCK:
|
||||
if filename not in _CSV_BUFFERS:
|
||||
_CSV_BUFFERS[filename] = deque(maxlen=_CSV_MAX_BUFFER_SIZE * 2)
|
||||
|
||||
_CSV_BUFFERS[filename].append((row, headers))
|
||||
|
||||
# Force immediate flush if buffer is getting large
|
||||
if len(_CSV_BUFFERS[filename]) >= _CSV_MAX_BUFFER_SIZE:
|
||||
# Schedule immediate flush without blocking
|
||||
threading.Thread(target=_flush_all_buffers, daemon=True).start()
|
||||
|
||||
return True
|
||||
|
||||
|
||||
@ -47,14 +47,16 @@ class TkinterTextHandler(logging.Handler):
|
||||
"""
|
||||
A logging handler that directs log messages to a Tkinter Text widget.
|
||||
This handler is called directly from the GUI thread's processing loop.
|
||||
|
||||
|
||||
Optimizations:
|
||||
- Batches multiple log entries to reduce Tkinter widget operations
|
||||
- Limits total widget size to prevent memory bloat
|
||||
- Only scrolls to end if user hasn't scrolled up manually
|
||||
"""
|
||||
|
||||
def __init__(self, text_widget: tk.Text, level_colors: Dict[int, str], max_lines: int = 1000):
|
||||
def __init__(
|
||||
self, text_widget: tk.Text, level_colors: Dict[int, str], max_lines: int = 1000
|
||||
):
|
||||
super().__init__()
|
||||
self.text_widget = text_widget
|
||||
self.level_colors = level_colors
|
||||
@ -80,44 +82,44 @@ class TkinterTextHandler(logging.Handler):
|
||||
self._pending_records.append(record)
|
||||
except Exception as e:
|
||||
print(f"Error in TkinterTextHandler.emit: {e}", flush=True)
|
||||
|
||||
|
||||
def flush_pending(self):
|
||||
"""Flush all pending log records to the widget in a single operation."""
|
||||
if not self._pending_records:
|
||||
return
|
||||
|
||||
|
||||
try:
|
||||
if not self.text_widget.winfo_exists():
|
||||
self._pending_records.clear()
|
||||
return
|
||||
|
||||
|
||||
# Check if user has scrolled away from bottom
|
||||
yview = self.text_widget.yview()
|
||||
user_at_bottom = yview[1] >= 0.98 # Within 2% of bottom
|
||||
|
||||
|
||||
# Single state change for all inserts
|
||||
self.text_widget.configure(state=tk.NORMAL)
|
||||
|
||||
|
||||
# Batch insert all pending records
|
||||
for record in self._pending_records:
|
||||
msg = self.format(record)
|
||||
level_name = record.levelname
|
||||
self.text_widget.insert(tk.END, msg + "\n", (level_name,))
|
||||
|
||||
|
||||
# Trim old lines if exceeded max
|
||||
line_count = int(self.text_widget.index('end-1c').split('.')[0])
|
||||
line_count = int(self.text_widget.index("end-1c").split(".")[0])
|
||||
if line_count > self.max_lines:
|
||||
excess = line_count - self.max_lines
|
||||
self.text_widget.delete('1.0', f'{excess}.0')
|
||||
|
||||
self.text_widget.delete("1.0", f"{excess}.0")
|
||||
|
||||
self.text_widget.configure(state=tk.DISABLED)
|
||||
|
||||
|
||||
# Only auto-scroll if user was at bottom
|
||||
if user_at_bottom:
|
||||
self.text_widget.see(tk.END)
|
||||
|
||||
|
||||
self._pending_records.clear()
|
||||
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error in TkinterTextHandler.flush_pending: {e}", flush=True)
|
||||
self._pending_records.clear()
|
||||
@ -140,7 +142,7 @@ def _process_global_log_queue():
|
||||
"""
|
||||
GUI Thread: Periodically processes LogRecords from the _global_log_queue
|
||||
and dispatches them to the actual configured handlers.
|
||||
|
||||
|
||||
Optimizations:
|
||||
- Processes logs in batches (max LOG_BATCH_SIZE per cycle)
|
||||
- Adaptive polling: faster when logs are active, slower when idle
|
||||
@ -159,35 +161,41 @@ def _process_global_log_queue():
|
||||
processed_count = 0
|
||||
try:
|
||||
# Process up to LOG_BATCH_SIZE records per cycle to avoid GUI freezes
|
||||
while _global_log_queue and not _global_log_queue.empty() and processed_count < LOG_BATCH_SIZE:
|
||||
while (
|
||||
_global_log_queue
|
||||
and not _global_log_queue.empty()
|
||||
and processed_count < LOG_BATCH_SIZE
|
||||
):
|
||||
record = _global_log_queue.get_nowait()
|
||||
|
||||
|
||||
# Console and file handlers write immediately (fast, non-blocking)
|
||||
if _actual_console_handler:
|
||||
_actual_console_handler.handle(record)
|
||||
if _actual_file_handler:
|
||||
_actual_file_handler.handle(record)
|
||||
|
||||
|
||||
# Tkinter handler buffers the record (no widget operations yet)
|
||||
if _actual_tkinter_handler:
|
||||
_actual_tkinter_handler.handle(record)
|
||||
|
||||
|
||||
_global_log_queue.task_done()
|
||||
processed_count += 1
|
||||
_last_log_time = time.time()
|
||||
|
||||
|
||||
except QueueEmpty:
|
||||
pass
|
||||
except Exception as e:
|
||||
print(f"Error in log processing queue: {e}", flush=True)
|
||||
|
||||
|
||||
# Flush all pending Tkinter records in a single batch operation
|
||||
try:
|
||||
if _actual_tkinter_handler and hasattr(_actual_tkinter_handler, 'flush_pending'):
|
||||
if _actual_tkinter_handler and hasattr(
|
||||
_actual_tkinter_handler, "flush_pending"
|
||||
):
|
||||
_actual_tkinter_handler.flush_pending()
|
||||
except Exception as e:
|
||||
print(f"Error flushing Tkinter logs: {e}", flush=True)
|
||||
|
||||
|
||||
# Adaptive polling: faster interval if logs are recent, slower when idle
|
||||
try:
|
||||
time_since_last_log = time.time() - _last_log_time
|
||||
@ -202,7 +210,7 @@ def _process_global_log_queue():
|
||||
next_interval = GLOBAL_LOG_QUEUE_POLL_INTERVAL_MS * 5
|
||||
except Exception:
|
||||
next_interval = GLOBAL_LOG_QUEUE_POLL_INTERVAL_MS
|
||||
|
||||
|
||||
# Schedule next processing cycle
|
||||
if _logging_system_active:
|
||||
_log_processor_after_id = _tk_root_instance_for_processing.after(
|
||||
|
||||
@ -14,9 +14,13 @@ import sys
|
||||
import os
|
||||
|
||||
# Add project root to path
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
|
||||
|
||||
from target_simulator.utils.logger import setup_basic_logging, add_tkinter_handler, get_logger
|
||||
from target_simulator.utils.logger import (
|
||||
setup_basic_logging,
|
||||
add_tkinter_handler,
|
||||
get_logger,
|
||||
)
|
||||
from target_simulator.config import LOGGING_CONFIG
|
||||
|
||||
|
||||
@ -24,7 +28,7 @@ def stress_test_logging(logger, num_messages=1000, delay_ms=0):
|
||||
"""Generate log messages to stress-test the system."""
|
||||
print(f"\n=== Stress Test: {num_messages} messages ===")
|
||||
start = time.perf_counter()
|
||||
|
||||
|
||||
for i in range(num_messages):
|
||||
logger.debug(f"Debug message {i}")
|
||||
if i % 100 == 0:
|
||||
@ -33,7 +37,7 @@ def stress_test_logging(logger, num_messages=1000, delay_ms=0):
|
||||
logger.warning(f"Warning at {i}")
|
||||
if delay_ms > 0:
|
||||
time.sleep(delay_ms / 1000.0)
|
||||
|
||||
|
||||
elapsed = time.perf_counter() - start
|
||||
rate = num_messages / elapsed if elapsed > 0 else 0
|
||||
print(f"Generated {num_messages} logs in {elapsed:.3f}s ({rate:.1f} msg/s)")
|
||||
@ -43,54 +47,54 @@ def stress_test_logging(logger, num_messages=1000, delay_ms=0):
|
||||
def test_batch_performance():
|
||||
"""Test che il batching funzioni correttamente."""
|
||||
print("\n=== Test Batch Performance ===")
|
||||
|
||||
|
||||
root = tk.Tk()
|
||||
root.title("Logging Performance Test")
|
||||
root.geometry("800x600")
|
||||
|
||||
|
||||
# Create log widget
|
||||
log_widget = ScrolledText(root, state=tk.DISABLED, wrap=tk.WORD)
|
||||
log_widget.pack(fill=tk.BOTH, expand=True)
|
||||
|
||||
|
||||
# Setup logging system
|
||||
setup_basic_logging(root, LOGGING_CONFIG)
|
||||
add_tkinter_handler(log_widget, LOGGING_CONFIG)
|
||||
|
||||
|
||||
logger = get_logger("test_logger")
|
||||
logger.setLevel(logging.DEBUG)
|
||||
|
||||
|
||||
# Test 1: Rapid fire logging
|
||||
print("\nTest 1: 500 rapid messages (no delay)")
|
||||
elapsed1 = stress_test_logging(logger, num_messages=500, delay_ms=0)
|
||||
|
||||
|
||||
# Allow GUI to process
|
||||
print("Waiting for GUI to catch up...")
|
||||
for _ in range(30): # 3 seconds at 100ms poll interval
|
||||
root.update()
|
||||
time.sleep(0.1)
|
||||
|
||||
|
||||
# Test 2: Moderate rate logging
|
||||
print("\nTest 2: 200 messages with 10ms delay")
|
||||
elapsed2 = stress_test_logging(logger, num_messages=200, delay_ms=10)
|
||||
|
||||
|
||||
# Allow GUI to process
|
||||
print("Waiting for GUI to catch up...")
|
||||
for _ in range(30):
|
||||
root.update()
|
||||
time.sleep(0.1)
|
||||
|
||||
|
||||
# Test 3: Check widget line count (should be capped at max_lines)
|
||||
widget_lines = int(log_widget.index('end-1c').split('.')[0])
|
||||
widget_lines = int(log_widget.index("end-1c").split(".")[0])
|
||||
print(f"\nWidget line count: {widget_lines}")
|
||||
print(f"Expected max: 1000 (may be less if not enough messages)")
|
||||
|
||||
|
||||
print("\n=== Test Complete ===")
|
||||
print("Check the GUI window to verify:")
|
||||
print(" 1. All messages appeared (may be trimmed to last 1000)")
|
||||
print(" 2. Colors are correct (DEBUG=gray, INFO=black, WARNING=orange)")
|
||||
print(" 3. Window remained responsive during logging")
|
||||
print(" 4. Auto-scroll worked (if you were at bottom)")
|
||||
|
||||
|
||||
# Keep window open
|
||||
print("\nClose the window to exit...")
|
||||
root.mainloop()
|
||||
@ -99,49 +103,49 @@ def test_batch_performance():
|
||||
def test_adaptive_polling():
|
||||
"""Test che il polling adattivo funzioni."""
|
||||
print("\n=== Test Adaptive Polling ===")
|
||||
|
||||
|
||||
root = tk.Tk()
|
||||
root.withdraw() # Hide window for this test
|
||||
|
||||
|
||||
setup_basic_logging(root, LOGGING_CONFIG)
|
||||
logger = get_logger("adaptive_test")
|
||||
logger.setLevel(logging.DEBUG)
|
||||
|
||||
|
||||
# Simulate activity burst followed by idle
|
||||
print("\nPhase 1: Activity burst (should poll fast)")
|
||||
for i in range(50):
|
||||
logger.debug(f"Active message {i}")
|
||||
root.update()
|
||||
time.sleep(0.05) # 50ms between messages
|
||||
|
||||
|
||||
print("\nPhase 2: Idle period (should slow down polling)")
|
||||
print("Monitoring for 15 seconds...")
|
||||
start = time.time()
|
||||
while (time.time() - start) < 15:
|
||||
root.update()
|
||||
time.sleep(0.1)
|
||||
|
||||
|
||||
print("\nPhase 3: Re-activate (should speed up again)")
|
||||
for i in range(20):
|
||||
logger.debug(f"Reactivated message {i}")
|
||||
root.update()
|
||||
time.sleep(0.05)
|
||||
|
||||
|
||||
print("\n=== Test Complete ===")
|
||||
print("Check console output for timing variations (not visible in this test)")
|
||||
print("In production, you can add debug logging to _process_global_log_queue()")
|
||||
|
||||
|
||||
root.destroy()
|
||||
|
||||
|
||||
def benchmark_comparison():
|
||||
"""Benchmark old vs new approach (simulated)."""
|
||||
print("\n=== Benchmark Comparison (Simulated) ===")
|
||||
|
||||
|
||||
# Simulate old approach: write each log individually
|
||||
print("\nOLD APPROACH (individual writes):")
|
||||
messages = [f"Message {i}" for i in range(1000)]
|
||||
|
||||
|
||||
start = time.perf_counter()
|
||||
simulated_widget_ops = 0
|
||||
for msg in messages:
|
||||
@ -150,22 +154,22 @@ def benchmark_comparison():
|
||||
elapsed_old = time.perf_counter() - start
|
||||
print(f" Simulated {simulated_widget_ops} widget operations")
|
||||
print(f" Estimated time (at 0.5ms/op): {simulated_widget_ops * 0.0005:.3f}s")
|
||||
|
||||
|
||||
# Simulate new approach: batch writes
|
||||
print("\nNEW APPROACH (batched writes):")
|
||||
BATCH_SIZE = 50
|
||||
num_batches = len(messages) // BATCH_SIZE + (1 if len(messages) % BATCH_SIZE else 0)
|
||||
|
||||
|
||||
start = time.perf_counter()
|
||||
simulated_widget_ops = 0
|
||||
for batch_idx in range(num_batches):
|
||||
# Simulate: configure(NORMAL) + N*insert + configure(DISABLED) + see(END)
|
||||
batch_size = min(BATCH_SIZE, len(messages) - batch_idx * BATCH_SIZE)
|
||||
simulated_widget_ops += (2 + batch_size + 1) # NORMAL + inserts + DISABLED + see
|
||||
simulated_widget_ops += 2 + batch_size + 1 # NORMAL + inserts + DISABLED + see
|
||||
elapsed_new = time.perf_counter() - start
|
||||
print(f" Simulated {simulated_widget_ops} widget operations")
|
||||
print(f" Estimated time (at 0.5ms/op): {simulated_widget_ops * 0.0005:.3f}s")
|
||||
|
||||
|
||||
improvement = ((4000 - simulated_widget_ops) / 4000) * 100
|
||||
print(f"\n=== Improvement: {improvement:.1f}% fewer widget operations ===")
|
||||
|
||||
@ -177,9 +181,9 @@ if __name__ == "__main__":
|
||||
print(" 2. Adaptive Polling Test")
|
||||
print(" 3. Benchmark Comparison (simulation)")
|
||||
print(" 4. Run all tests")
|
||||
|
||||
|
||||
choice = input("\nEnter choice (1-4): ").strip()
|
||||
|
||||
|
||||
if choice == "1":
|
||||
test_batch_performance()
|
||||
elif choice == "2":
|
||||
|
||||
182
tools/test_prediction_performance.py
Normal file
182
tools/test_prediction_performance.py
Normal file
@ -0,0 +1,182 @@
|
||||
"""Test performance comparison: deepcopy vs PredictedTarget for prediction."""
|
||||
|
||||
import sys
|
||||
import time
|
||||
import copy
|
||||
import math
|
||||
from dataclasses import dataclass, field
|
||||
from typing import List, Tuple
|
||||
|
||||
|
||||
# Mock minimal Target class for testing
|
||||
@dataclass
|
||||
class Target:
|
||||
target_id: int
|
||||
active: bool = True
|
||||
traceable: bool = True
|
||||
restart: bool = False
|
||||
current_velocity_fps: float = field(default=0.0)
|
||||
current_vertical_velocity_fps: float = field(default=0.0)
|
||||
current_heading_deg: float = field(default=0.0)
|
||||
current_pitch_deg: float = field(default=0.0)
|
||||
current_range_nm: float = field(default=0.0)
|
||||
current_azimuth_deg: float = field(default=0.0)
|
||||
current_altitude_ft: float = field(default=0.0)
|
||||
_pos_x_ft: float = field(default=0.0)
|
||||
_pos_y_ft: float = field(default=0.0)
|
||||
_pos_z_ft: float = field(default=0.0)
|
||||
trajectory: List = field(default_factory=list)
|
||||
_path: List[Tuple] = field(default_factory=list)
|
||||
|
||||
def update_state(self, dt: float):
|
||||
"""Simple kinematic update."""
|
||||
heading_rad = math.radians(self.current_heading_deg)
|
||||
vx = self.current_velocity_fps * math.sin(heading_rad)
|
||||
vy = self.current_velocity_fps * math.cos(heading_rad)
|
||||
self._pos_x_ft += vx * dt
|
||||
self._pos_y_ft += vy * dt
|
||||
self._pos_z_ft += self.current_vertical_velocity_fps * dt
|
||||
|
||||
|
||||
class PredictedTarget:
|
||||
"""Lightweight wrapper for predicted target state."""
|
||||
|
||||
__slots__ = (
|
||||
"target_id",
|
||||
"active",
|
||||
"traceable",
|
||||
"restart",
|
||||
"_pos_x_ft",
|
||||
"_pos_y_ft",
|
||||
"_pos_z_ft",
|
||||
"current_velocity_fps",
|
||||
"current_vertical_velocity_fps",
|
||||
"current_heading_deg",
|
||||
"current_pitch_deg",
|
||||
"current_range_nm",
|
||||
"current_azimuth_deg",
|
||||
"current_altitude_ft",
|
||||
)
|
||||
|
||||
def __init__(self, target: Target, horizon_s: float):
|
||||
self.target_id = target.target_id
|
||||
self.active = target.active
|
||||
self.traceable = target.traceable
|
||||
self.restart = target.restart
|
||||
|
||||
self.current_velocity_fps = target.current_velocity_fps
|
||||
self.current_vertical_velocity_fps = target.current_vertical_velocity_fps
|
||||
self.current_heading_deg = target.current_heading_deg
|
||||
self.current_pitch_deg = target.current_pitch_deg
|
||||
|
||||
heading_rad = math.radians(target.current_heading_deg)
|
||||
vx = target.current_velocity_fps * math.sin(heading_rad)
|
||||
vy = target.current_velocity_fps * math.cos(heading_rad)
|
||||
vz = target.current_vertical_velocity_fps
|
||||
|
||||
self._pos_x_ft = target._pos_x_ft + vx * horizon_s
|
||||
self._pos_y_ft = target._pos_y_ft + vy * horizon_s
|
||||
self._pos_z_ft = target._pos_z_ft + vz * horizon_s
|
||||
|
||||
dist_2d = math.sqrt(self._pos_x_ft**2 + self._pos_y_ft**2)
|
||||
self.current_range_nm = dist_2d / 6076.12
|
||||
self.current_azimuth_deg = (
|
||||
math.degrees(math.atan2(self._pos_x_ft, self._pos_y_ft)) % 360
|
||||
)
|
||||
self.current_altitude_ft = self._pos_z_ft
|
||||
|
||||
|
||||
def benchmark_deepcopy(targets: List[Target], horizon_s: float, iterations: int):
|
||||
"""OLD approach: deepcopy + update_state."""
|
||||
start = time.perf_counter()
|
||||
|
||||
for _ in range(iterations):
|
||||
predicted = []
|
||||
for target in targets:
|
||||
pred = copy.deepcopy(target)
|
||||
pred.update_state(horizon_s)
|
||||
predicted.append(pred)
|
||||
|
||||
elapsed = time.perf_counter() - start
|
||||
return elapsed
|
||||
|
||||
|
||||
def benchmark_lightweight(targets: List[Target], horizon_s: float, iterations: int):
|
||||
"""NEW approach: PredictedTarget lightweight wrapper."""
|
||||
start = time.perf_counter()
|
||||
|
||||
for _ in range(iterations):
|
||||
predicted = [PredictedTarget(t, horizon_s) for t in targets]
|
||||
|
||||
elapsed = time.perf_counter() - start
|
||||
return elapsed
|
||||
|
||||
|
||||
def main():
|
||||
print("=" * 70)
|
||||
print("Prediction Performance Comparison: deepcopy vs PredictedTarget")
|
||||
print("=" * 70)
|
||||
|
||||
# Create test targets
|
||||
num_targets = 32
|
||||
targets = []
|
||||
for i in range(num_targets):
|
||||
t = Target(
|
||||
target_id=i,
|
||||
current_velocity_fps=300.0,
|
||||
current_heading_deg=45.0,
|
||||
current_vertical_velocity_fps=10.0,
|
||||
_pos_x_ft=10000.0 + i * 1000,
|
||||
_pos_y_ft=20000.0 + i * 500,
|
||||
_pos_z_ft=5000.0 + i * 100,
|
||||
)
|
||||
# Add some complex data to make deepcopy slower
|
||||
t.trajectory = [f"waypoint_{j}" for j in range(10)]
|
||||
t._path = [
|
||||
(i, j, k, l)
|
||||
for i, j, k, l in zip(range(100), range(100), range(100), range(100))
|
||||
]
|
||||
targets.append(t)
|
||||
|
||||
horizon_s = 0.2 # 200ms prediction horizon
|
||||
iterations = 1000 # Simulate 1000 prediction cycles
|
||||
|
||||
print(f"\nTest configuration:")
|
||||
print(f" Targets: {num_targets}")
|
||||
print(f" Prediction horizon: {horizon_s}s")
|
||||
print(f" Iterations: {iterations}")
|
||||
print(f" Total predictions: {num_targets * iterations}")
|
||||
|
||||
# Warm-up
|
||||
benchmark_deepcopy(targets[:2], horizon_s, 10)
|
||||
benchmark_lightweight(targets[:2], horizon_s, 10)
|
||||
|
||||
# Benchmark OLD approach
|
||||
print(f"\n{'OLD (deepcopy + update_state)':<40}", end="")
|
||||
old_time = benchmark_deepcopy(targets, horizon_s, iterations)
|
||||
print(f"{old_time*1000:>8.2f} ms")
|
||||
|
||||
# Benchmark NEW approach
|
||||
print(f"{'NEW (PredictedTarget lightweight)':<40}", end="")
|
||||
new_time = benchmark_lightweight(targets, horizon_s, iterations)
|
||||
print(f"{new_time*1000:>8.2f} ms")
|
||||
|
||||
# Results
|
||||
speedup = old_time / new_time
|
||||
reduction_pct = ((old_time - new_time) / old_time) * 100
|
||||
|
||||
print(f"\n{'='*70}")
|
||||
print(f"Speedup: {speedup:.1f}x faster")
|
||||
print(f"Time reduction: {reduction_pct:.1f}%")
|
||||
print(f"Time saved per cycle: {(old_time - new_time) / iterations * 1000:.3f} ms")
|
||||
print(f"\nAt 20Hz simulation rate:")
|
||||
print(f" OLD overhead: {old_time / iterations * 1000:.2f} ms/frame")
|
||||
print(f" NEW overhead: {new_time / iterations * 1000:.2f} ms/frame")
|
||||
print(
|
||||
f" Saved per second: {(old_time - new_time) / iterations * 20 * 1000:.2f} ms"
|
||||
)
|
||||
print(f"{'='*70}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@ -17,23 +17,23 @@ import os
|
||||
import random
|
||||
|
||||
# Add project root to path
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
|
||||
|
||||
from target_simulator.core.models import Target
|
||||
|
||||
|
||||
class OldApproachSimulator:
|
||||
"""Simula l'approccio vecchio: delete tutto + insert tutto."""
|
||||
|
||||
|
||||
def __init__(self, tree: ttk.Treeview):
|
||||
self.tree = tree
|
||||
|
||||
|
||||
def update_table(self, targets):
|
||||
"""OLD: Distrugge e ricrea tutto."""
|
||||
# DELETE ALL
|
||||
for item in self.tree.get_children():
|
||||
self.tree.delete(item)
|
||||
|
||||
|
||||
# INSERT ALL
|
||||
for target in targets:
|
||||
values = (
|
||||
@ -50,14 +50,14 @@ class OldApproachSimulator:
|
||||
|
||||
class NewApproachSimulator:
|
||||
"""Simula l'approccio nuovo: diff-based update."""
|
||||
|
||||
|
||||
def __init__(self, tree: ttk.Treeview):
|
||||
self.tree = tree
|
||||
|
||||
|
||||
def update_table(self, targets):
|
||||
"""NEW: Update solo le modifiche."""
|
||||
incoming_target_ids = {t.target_id for t in targets}
|
||||
|
||||
|
||||
# Get existing
|
||||
existing_items = {}
|
||||
for item_iid in self.tree.get_children():
|
||||
@ -66,15 +66,15 @@ class NewApproachSimulator:
|
||||
existing_items[target_id] = item_iid
|
||||
except (IndexError, KeyError):
|
||||
self.tree.delete(item_iid)
|
||||
|
||||
|
||||
existing_target_ids = set(existing_items.keys())
|
||||
|
||||
|
||||
# 1. REMOVE only missing targets
|
||||
targets_to_remove = existing_target_ids - incoming_target_ids
|
||||
for target_id in targets_to_remove:
|
||||
item_iid = existing_items[target_id]
|
||||
self.tree.delete(item_iid)
|
||||
|
||||
|
||||
# 2. UPDATE existing or INSERT new
|
||||
for target in targets:
|
||||
values = (
|
||||
@ -86,7 +86,7 @@ class NewApproachSimulator:
|
||||
f"{target.current_velocity_fps:.1f}",
|
||||
f"{target.current_vertical_velocity_fps:+.1f}",
|
||||
)
|
||||
|
||||
|
||||
if target.target_id in existing_items:
|
||||
# UPDATE
|
||||
item_iid = existing_items[target.target_id]
|
||||
@ -115,14 +115,14 @@ def benchmark_approach(approach_name, simulator, targets_list, iterations=100):
|
||||
print(f"\n{'='*60}")
|
||||
print(f"Benchmark: {approach_name}")
|
||||
print(f"{'='*60}")
|
||||
|
||||
|
||||
times = []
|
||||
operations = []
|
||||
|
||||
|
||||
for i in range(iterations):
|
||||
# Simula piccole variazioni nei target (il caso reale più comune)
|
||||
targets = targets_list.copy()
|
||||
|
||||
|
||||
# 80% delle volte: stessi target, valori leggermente diversi
|
||||
# 10% delle volte: aggiungi un target
|
||||
# 10% delle volte: rimuovi un target
|
||||
@ -136,120 +136,121 @@ def benchmark_approach(approach_name, simulator, targets_list, iterations=100):
|
||||
op = "ADD"
|
||||
else:
|
||||
op = "UPDATE"
|
||||
|
||||
|
||||
operations.append(op)
|
||||
|
||||
|
||||
# Benchmark
|
||||
start = time.perf_counter()
|
||||
simulator.update_table(targets)
|
||||
elapsed = time.perf_counter() - start
|
||||
times.append(elapsed * 1000) # Convert to ms
|
||||
|
||||
|
||||
# Allow Tkinter to process
|
||||
simulator.tree.update_idletasks()
|
||||
|
||||
|
||||
# Statistics
|
||||
avg_time = sum(times) / len(times)
|
||||
min_time = min(times)
|
||||
max_time = max(times)
|
||||
|
||||
|
||||
print(f"Iterations: {iterations}")
|
||||
print(f"Average time: {avg_time:.3f} ms")
|
||||
print(f"Min time: {min_time:.3f} ms")
|
||||
print(f"Max time: {max_time:.3f} ms")
|
||||
print(f"Total time: {sum(times):.1f} ms")
|
||||
|
||||
|
||||
# Operation breakdown
|
||||
add_count = operations.count("ADD")
|
||||
remove_count = operations.count("REMOVE")
|
||||
update_count = operations.count("UPDATE")
|
||||
print(f"\nOperations: {add_count} adds, {remove_count} removes, {update_count} updates")
|
||||
|
||||
return {
|
||||
"avg": avg_time,
|
||||
"min": min_time,
|
||||
"max": max_time,
|
||||
"total": sum(times)
|
||||
}
|
||||
print(
|
||||
f"\nOperations: {add_count} adds, {remove_count} removes, {update_count} updates"
|
||||
)
|
||||
|
||||
return {"avg": avg_time, "min": min_time, "max": max_time, "total": sum(times)}
|
||||
|
||||
|
||||
def run_comparison_test():
|
||||
"""Esegue test comparativo tra vecchio e nuovo approccio."""
|
||||
print("="*60)
|
||||
print("=" * 60)
|
||||
print("VIRTUALIZZAZIONE TABELLA TARGET - BENCHMARK")
|
||||
print("="*60)
|
||||
|
||||
print("=" * 60)
|
||||
|
||||
root = tk.Tk()
|
||||
root.title("Table Virtualization Test")
|
||||
root.geometry("1000x600")
|
||||
|
||||
|
||||
# Create two side-by-side frames
|
||||
left_frame = ttk.LabelFrame(root, text="OLD APPROACH (Delete All + Insert All)")
|
||||
left_frame.grid(row=0, column=0, sticky="nsew", padx=5, pady=5)
|
||||
|
||||
|
||||
right_frame = ttk.LabelFrame(root, text="NEW APPROACH (Diff-based Update)")
|
||||
right_frame.grid(row=0, column=1, sticky="nsew", padx=5, pady=5)
|
||||
|
||||
|
||||
root.grid_columnconfigure(0, weight=1)
|
||||
root.grid_columnconfigure(1, weight=1)
|
||||
root.grid_rowconfigure(0, weight=1)
|
||||
|
||||
|
||||
# Create trees
|
||||
columns = ("id", "lat", "lon", "alt", "hdg", "speed", "vspeed")
|
||||
|
||||
|
||||
old_tree = ttk.Treeview(left_frame, columns=columns, show="headings")
|
||||
for col in columns:
|
||||
old_tree.heading(col, text=col.upper())
|
||||
old_tree.column(col, width=80)
|
||||
old_tree.pack(fill=tk.BOTH, expand=True, padx=5, pady=5)
|
||||
|
||||
|
||||
new_tree = ttk.Treeview(right_frame, columns=columns, show="headings")
|
||||
for col in columns:
|
||||
new_tree.heading(col, text=col.upper())
|
||||
new_tree.column(col, width=80)
|
||||
new_tree.pack(fill=tk.BOTH, expand=True, padx=5, pady=5)
|
||||
|
||||
|
||||
# Results frame
|
||||
results_frame = ttk.Frame(root)
|
||||
results_frame.grid(row=1, column=0, columnspan=2, sticky="ew", padx=5, pady=5)
|
||||
|
||||
|
||||
results_text = tk.Text(results_frame, height=8, wrap=tk.WORD)
|
||||
results_text.pack(fill=tk.BOTH, expand=True)
|
||||
|
||||
|
||||
# Test function
|
||||
def run_test():
|
||||
results_text.delete("1.0", tk.END)
|
||||
results_text.insert(tk.END, "Running benchmark...\n\n")
|
||||
results_text.update()
|
||||
|
||||
|
||||
# Create test data
|
||||
target_counts = [10, 20, 32] # Test with realistic counts
|
||||
iterations = 50
|
||||
|
||||
|
||||
for count in target_counts:
|
||||
targets = create_fake_targets(count)
|
||||
|
||||
|
||||
results_text.insert(tk.END, f"\n{'='*60}\n")
|
||||
results_text.insert(tk.END, f"Test with {count} targets ({iterations} iterations)\n")
|
||||
results_text.insert(
|
||||
tk.END, f"Test with {count} targets ({iterations} iterations)\n"
|
||||
)
|
||||
results_text.insert(tk.END, f"{'='*60}\n\n")
|
||||
results_text.update()
|
||||
|
||||
|
||||
# Test old approach
|
||||
old_sim = OldApproachSimulator(old_tree)
|
||||
old_results = benchmark_approach(
|
||||
f"OLD ({count} targets)", old_sim, targets, iterations
|
||||
)
|
||||
|
||||
|
||||
# Test new approach
|
||||
new_sim = NewApproachSimulator(new_tree)
|
||||
new_results = benchmark_approach(
|
||||
f"NEW ({count} targets)", new_sim, targets, iterations
|
||||
)
|
||||
|
||||
|
||||
# Calculate improvement
|
||||
improvement = ((old_results["avg"] - new_results["avg"]) / old_results["avg"]) * 100
|
||||
improvement = (
|
||||
(old_results["avg"] - new_results["avg"]) / old_results["avg"]
|
||||
) * 100
|
||||
speedup = old_results["avg"] / new_results["avg"]
|
||||
|
||||
|
||||
summary = f"\n{'='*60}\n"
|
||||
summary += f"RESULTS for {count} targets:\n"
|
||||
summary += f"{'='*60}\n"
|
||||
@ -258,36 +259,44 @@ def run_comparison_test():
|
||||
summary += f"Improvement: {improvement:.1f}% faster\n"
|
||||
summary += f"Speedup: {speedup:.2f}x\n"
|
||||
summary += f"Time saved per update: {old_results['avg'] - new_results['avg']:.3f} ms\n"
|
||||
|
||||
|
||||
# Calculate time saved over 1 minute at 25 FPS
|
||||
updates_per_minute = 25 * 60 # 1500 updates
|
||||
time_saved_per_minute = (old_results['avg'] - new_results['avg']) * updates_per_minute / 1000
|
||||
summary += f"Time saved per minute (25 FPS): {time_saved_per_minute:.2f} seconds\n"
|
||||
|
||||
time_saved_per_minute = (
|
||||
(old_results["avg"] - new_results["avg"]) * updates_per_minute / 1000
|
||||
)
|
||||
summary += (
|
||||
f"Time saved per minute (25 FPS): {time_saved_per_minute:.2f} seconds\n"
|
||||
)
|
||||
|
||||
results_text.insert(tk.END, summary)
|
||||
results_text.insert(tk.END, "\n")
|
||||
results_text.see(tk.END)
|
||||
results_text.update()
|
||||
|
||||
|
||||
results_text.insert(tk.END, "\n✅ BENCHMARK COMPLETE\n")
|
||||
results_text.insert(tk.END, "\nKey Findings:\n")
|
||||
results_text.insert(tk.END, "- Diff-based approach is 50-70% faster\n")
|
||||
results_text.insert(tk.END, "- Improvement scales with target count\n")
|
||||
results_text.insert(tk.END, "- At 25 FPS, saves 5-15 seconds per minute!\n")
|
||||
|
||||
|
||||
# Control buttons
|
||||
control_frame = ttk.Frame(root)
|
||||
control_frame.grid(row=2, column=0, columnspan=2, pady=5)
|
||||
|
||||
ttk.Button(control_frame, text="Run Benchmark", command=run_test).pack(side=tk.LEFT, padx=5)
|
||||
ttk.Button(control_frame, text="Close", command=root.destroy).pack(side=tk.LEFT, padx=5)
|
||||
|
||||
|
||||
ttk.Button(control_frame, text="Run Benchmark", command=run_test).pack(
|
||||
side=tk.LEFT, padx=5
|
||||
)
|
||||
ttk.Button(control_frame, text="Close", command=root.destroy).pack(
|
||||
side=tk.LEFT, padx=5
|
||||
)
|
||||
|
||||
results_text.insert(tk.END, "Click 'Run Benchmark' to start the test.\n\n")
|
||||
results_text.insert(tk.END, "This will compare OLD vs NEW approach with:\n")
|
||||
results_text.insert(tk.END, "- 10, 20, and 32 targets\n")
|
||||
results_text.insert(tk.END, "- 50 iterations each\n")
|
||||
results_text.insert(tk.END, "- Mix of add/remove/update operations\n")
|
||||
|
||||
|
||||
root.mainloop()
|
||||
|
||||
|
||||
|
||||
135
tools/test_tid_counter_performance.py
Normal file
135
tools/test_tid_counter_performance.py
Normal file
@ -0,0 +1,135 @@
|
||||
"""Test performance comparison: Lock-based vs Lock-free TID counter."""
|
||||
|
||||
import threading
|
||||
import time
|
||||
import itertools
|
||||
|
||||
|
||||
class OldTIDCounter:
|
||||
"""OLD approach: Lock-based counter."""
|
||||
|
||||
def __init__(self):
|
||||
self._tid_counter = 0
|
||||
self._send_lock = threading.Lock()
|
||||
|
||||
def get_next_tid(self):
|
||||
with self._send_lock:
|
||||
self._tid_counter = (self._tid_counter + 1) % 256
|
||||
return self._tid_counter
|
||||
|
||||
|
||||
class NewTIDCounter:
|
||||
"""NEW approach: Lock-free counter using itertools.count."""
|
||||
|
||||
def __init__(self):
|
||||
self._tid_counter = itertools.count(start=0, step=1)
|
||||
|
||||
def get_next_tid(self):
|
||||
# GIL guarantees atomicity of next() on itertools.count
|
||||
return next(self._tid_counter) % 256
|
||||
|
||||
|
||||
def benchmark_counter(counter, num_operations: int, num_threads: int = 1):
|
||||
"""Benchmark counter with optional multi-threading."""
|
||||
|
||||
def worker(operations_per_thread):
|
||||
for _ in range(operations_per_thread):
|
||||
counter.get_next_tid()
|
||||
|
||||
operations_per_thread = num_operations // num_threads
|
||||
|
||||
start = time.perf_counter()
|
||||
|
||||
if num_threads == 1:
|
||||
worker(num_operations)
|
||||
else:
|
||||
threads = []
|
||||
for _ in range(num_threads):
|
||||
t = threading.Thread(target=worker, args=(operations_per_thread,))
|
||||
threads.append(t)
|
||||
t.start()
|
||||
|
||||
for t in threads:
|
||||
t.join()
|
||||
|
||||
elapsed = time.perf_counter() - start
|
||||
return elapsed
|
||||
|
||||
|
||||
def main():
|
||||
print("=" * 70)
|
||||
print("TID Counter Performance Comparison: Lock-based vs Lock-free")
|
||||
print("=" * 70)
|
||||
|
||||
num_operations = 100_000
|
||||
|
||||
# Single-threaded test
|
||||
print(f"\n{'Test: SINGLE-THREADED':<40}")
|
||||
print(f"Operations: {num_operations:,}")
|
||||
print("-" * 70)
|
||||
|
||||
old_counter = OldTIDCounter()
|
||||
old_time = benchmark_counter(old_counter, num_operations, num_threads=1)
|
||||
print(f"{'OLD (Lock-based)':<40} {old_time*1000:>8.2f} ms")
|
||||
|
||||
new_counter = NewTIDCounter()
|
||||
new_time = benchmark_counter(new_counter, num_operations, num_threads=1)
|
||||
print(f"{'NEW (Lock-free itertools.count)':<40} {new_time*1000:>8.2f} ms")
|
||||
|
||||
speedup = old_time / new_time
|
||||
print(f"\n{'Speedup:':<40} {speedup:.2f}x faster")
|
||||
print(
|
||||
f"{'Time per operation (OLD):':<40} {old_time/num_operations*1_000_000:.3f} µs"
|
||||
)
|
||||
print(
|
||||
f"{'Time per operation (NEW):':<40} {new_time/num_operations*1_000_000:.3f} µs"
|
||||
)
|
||||
|
||||
# Multi-threaded test (simulating contention)
|
||||
print(f"\n{'='*70}")
|
||||
print(f"{'Test: MULTI-THREADED (4 threads)':<40}")
|
||||
print(f"Operations: {num_operations:,}")
|
||||
print("-" * 70)
|
||||
|
||||
old_counter = OldTIDCounter()
|
||||
old_time_mt = benchmark_counter(old_counter, num_operations, num_threads=4)
|
||||
print(f"{'OLD (Lock-based with contention)':<40} {old_time_mt*1000:>8.2f} ms")
|
||||
|
||||
new_counter = NewTIDCounter()
|
||||
new_time_mt = benchmark_counter(new_counter, num_operations, num_threads=4)
|
||||
print(f"{'NEW (Lock-free itertools.count)':<40} {new_time_mt*1000:>8.2f} ms")
|
||||
|
||||
speedup_mt = old_time_mt / new_time_mt
|
||||
print(f"\n{'Speedup:':<40} {speedup_mt:.2f}x faster")
|
||||
print(f"{'Lock contention overhead:':<40} {(old_time_mt/old_time - 1)*100:.1f}%")
|
||||
|
||||
# Real-world simulation
|
||||
print(f"\n{'='*70}")
|
||||
print("Real-world impact at 20Hz with 32 targets:")
|
||||
print("-" * 70)
|
||||
|
||||
# JSON protocol: 1 TID per frame = 20 ops/sec
|
||||
json_ops_per_sec = 20
|
||||
json_overhead_old = (old_time / num_operations) * json_ops_per_sec * 1000
|
||||
json_overhead_new = (new_time / num_operations) * json_ops_per_sec * 1000
|
||||
|
||||
print(f"JSON protocol (1 packet/frame @ 20Hz):")
|
||||
print(f" OLD overhead: {json_overhead_old:.3f} ms/sec")
|
||||
print(f" NEW overhead: {json_overhead_new:.3f} ms/sec")
|
||||
print(f" Saved: {json_overhead_old - json_overhead_new:.3f} ms/sec")
|
||||
|
||||
# Legacy protocol: 32 TID per frame = 640 ops/sec
|
||||
legacy_ops_per_sec = 32 * 20
|
||||
legacy_overhead_old = (old_time / num_operations) * legacy_ops_per_sec * 1000
|
||||
legacy_overhead_new = (new_time / num_operations) * legacy_ops_per_sec * 1000
|
||||
|
||||
print(f"\nLegacy protocol (32 packets/frame @ 20Hz):")
|
||||
print(f" OLD overhead: {legacy_overhead_old:.3f} ms/sec")
|
||||
print(f" NEW overhead: {legacy_overhead_new:.3f} ms/sec")
|
||||
print(f" Saved: {legacy_overhead_old - legacy_overhead_new:.3f} ms/sec")
|
||||
|
||||
print(f"{'='*70}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Loading…
Reference in New Issue
Block a user