fine ottimizzazione
This commit is contained in:
parent
9823a294b2
commit
14c0501451
64
convert.py
64
convert.py
@ -4,6 +4,7 @@ import tkinter as tk
|
||||
from tkinter import filedialog, messagebox, scrolledtext
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
class MarkdownToPDFApp:
|
||||
def __init__(self, root):
|
||||
self.root = root
|
||||
@ -18,25 +19,53 @@ class MarkdownToPDFApp:
|
||||
self.generate_pdf = tk.BooleanVar(value=True)
|
||||
|
||||
# --- UI ---
|
||||
tk.Label(root, text="Cartella Markdown:").pack(anchor="w", padx=10, pady=(10, 0))
|
||||
tk.Label(root, text="Cartella Markdown:").pack(
|
||||
anchor="w", padx=10, pady=(10, 0)
|
||||
)
|
||||
frame1 = tk.Frame(root)
|
||||
frame1.pack(fill="x", padx=10)
|
||||
tk.Entry(frame1, textvariable=self.folder_path, width=50).pack(side="left", fill="x", expand=True)
|
||||
tk.Button(frame1, text="Sfoglia...", command=self.choose_folder).pack(side="right", padx=5)
|
||||
tk.Entry(frame1, textvariable=self.folder_path, width=50).pack(
|
||||
side="left", fill="x", expand=True
|
||||
)
|
||||
tk.Button(frame1, text="Sfoglia...", command=self.choose_folder).pack(
|
||||
side="right", padx=5
|
||||
)
|
||||
|
||||
tk.Label(root, text="Nome base file output (senza estensione):").pack(anchor="w", padx=10, pady=(10, 0))
|
||||
tk.Label(root, text="Nome base file output (senza estensione):").pack(
|
||||
anchor="w", padx=10, pady=(10, 0)
|
||||
)
|
||||
tk.Entry(root, textvariable=self.output_name, width=40).pack(fill="x", padx=10)
|
||||
|
||||
tk.Checkbutton(root, text="Usa template DOCX", variable=self.use_template, command=self.toggle_template).pack(anchor="w", padx=10, pady=(10, 0))
|
||||
tk.Checkbutton(
|
||||
root,
|
||||
text="Usa template DOCX",
|
||||
variable=self.use_template,
|
||||
command=self.toggle_template,
|
||||
).pack(anchor="w", padx=10, pady=(10, 0))
|
||||
frame2 = tk.Frame(root)
|
||||
frame2.pack(fill="x", padx=10)
|
||||
tk.Entry(frame2, textvariable=self.template_path, width=50, state="disabled").pack(side="left", fill="x", expand=True)
|
||||
tk.Button(frame2, text="Seleziona template", command=self.choose_template, state="disabled").pack(side="right", padx=5)
|
||||
tk.Entry(
|
||||
frame2, textvariable=self.template_path, width=50, state="disabled"
|
||||
).pack(side="left", fill="x", expand=True)
|
||||
tk.Button(
|
||||
frame2,
|
||||
text="Seleziona template",
|
||||
command=self.choose_template,
|
||||
state="disabled",
|
||||
).pack(side="right", padx=5)
|
||||
self.template_frame = frame2
|
||||
|
||||
tk.Checkbutton(root, text="Genera anche PDF finale", variable=self.generate_pdf).pack(anchor="w", padx=10, pady=(10, 0))
|
||||
tk.Checkbutton(
|
||||
root, text="Genera anche PDF finale", variable=self.generate_pdf
|
||||
).pack(anchor="w", padx=10, pady=(10, 0))
|
||||
|
||||
tk.Button(root, text="Genera Documento", command=self.generate_output, bg="#3c9", fg="white").pack(pady=10)
|
||||
tk.Button(
|
||||
root,
|
||||
text="Genera Documento",
|
||||
command=self.generate_output,
|
||||
bg="#3c9",
|
||||
fg="white",
|
||||
).pack(pady=10)
|
||||
|
||||
tk.Label(root, text="Log:").pack(anchor="w", padx=10)
|
||||
self.log_box = scrolledtext.ScrolledText(root, height=13, state="disabled")
|
||||
@ -61,7 +90,9 @@ class MarkdownToPDFApp:
|
||||
widget.configure(state=state)
|
||||
|
||||
def choose_template(self):
|
||||
file = filedialog.askopenfilename(title="Seleziona template DOCX", filetypes=[("Word Template", "*.docx")])
|
||||
file = filedialog.askopenfilename(
|
||||
title="Seleziona template DOCX", filetypes=[("Word Template", "*.docx")]
|
||||
)
|
||||
if file:
|
||||
self.template_path.set(file)
|
||||
|
||||
@ -73,7 +104,9 @@ class MarkdownToPDFApp:
|
||||
make_pdf = self.generate_pdf.get()
|
||||
|
||||
if not folder:
|
||||
messagebox.showwarning("Attenzione", "Seleziona una cartella contenente i file Markdown.")
|
||||
messagebox.showwarning(
|
||||
"Attenzione", "Seleziona una cartella contenente i file Markdown."
|
||||
)
|
||||
return
|
||||
|
||||
folder_path = Path(folder)
|
||||
@ -83,7 +116,9 @@ class MarkdownToPDFApp:
|
||||
# Trova i file Markdown numerati
|
||||
md_files = sorted(folder_path.glob("[0-9][0-9]_*.md"))
|
||||
if not md_files:
|
||||
messagebox.showerror("Errore", "Nessun file Markdown numerato trovato nella cartella.")
|
||||
messagebox.showerror(
|
||||
"Errore", "Nessun file Markdown numerato trovato nella cartella."
|
||||
)
|
||||
return
|
||||
|
||||
self.log(f"Trovati {len(md_files)} file Markdown:")
|
||||
@ -108,7 +143,9 @@ class MarkdownToPDFApp:
|
||||
cmd_docx = ["pandoc", str(combined_md), "-o", str(output_docx)]
|
||||
if use_template:
|
||||
if not Path(template).exists():
|
||||
messagebox.showerror("Template non trovato", f"Il file {template} non esiste.")
|
||||
messagebox.showerror(
|
||||
"Template non trovato", f"Il file {template} non esiste."
|
||||
)
|
||||
return
|
||||
cmd_docx.extend(["--reference-doc", str(template)])
|
||||
|
||||
@ -141,6 +178,7 @@ class MarkdownToPDFApp:
|
||||
if combined_md.exists():
|
||||
combined_md.unlink()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
root = tk.Tk()
|
||||
app = MarkdownToPDFApp(root)
|
||||
|
||||
@ -3,7 +3,7 @@
|
||||
"scan_limit": 60,
|
||||
"max_range": 100,
|
||||
"geometry": "1492x992+113+61",
|
||||
"last_selected_scenario": "corto",
|
||||
"last_selected_scenario": "scenario3",
|
||||
"connection": {
|
||||
"target": {
|
||||
"type": "sfp",
|
||||
|
||||
@ -5,4 +5,3 @@ This package contains the main application modules (GUI, core, utils,
|
||||
and analysis). It is intentionally lightweight here; see submodules for
|
||||
details (e.g., `gui.main_view`, `core.simulation_engine`).
|
||||
"""
|
||||
|
||||
|
||||
@ -17,6 +17,7 @@ DEFAULT_VERSION = "0.0.0+unknown"
|
||||
DEFAULT_COMMIT = "Unknown"
|
||||
DEFAULT_BRANCH = "Unknown"
|
||||
|
||||
|
||||
# --- Helper Function ---
|
||||
def get_version_string(format_string=None):
|
||||
"""
|
||||
@ -44,28 +45,38 @@ def get_version_string(format_string=None):
|
||||
|
||||
replacements = {}
|
||||
try:
|
||||
replacements['version'] = __version__ if __version__ else DEFAULT_VERSION
|
||||
replacements['commit'] = GIT_COMMIT_HASH if GIT_COMMIT_HASH else DEFAULT_COMMIT
|
||||
replacements['commit_short'] = GIT_COMMIT_HASH[:7] if GIT_COMMIT_HASH and len(GIT_COMMIT_HASH) >= 7 else DEFAULT_COMMIT
|
||||
replacements['branch'] = GIT_BRANCH if GIT_BRANCH else DEFAULT_BRANCH
|
||||
replacements['timestamp'] = BUILD_TIMESTAMP if BUILD_TIMESTAMP else "Unknown"
|
||||
replacements['timestamp_short'] = BUILD_TIMESTAMP.split('T')[0] if BUILD_TIMESTAMP and 'T' in BUILD_TIMESTAMP else "Unknown"
|
||||
replacements['is_git'] = "Git" if IS_GIT_REPO else "Unknown"
|
||||
replacements['dirty'] = "-dirty" if __version__ and __version__.endswith('-dirty') else ""
|
||||
replacements["version"] = __version__ if __version__ else DEFAULT_VERSION
|
||||
replacements["commit"] = GIT_COMMIT_HASH if GIT_COMMIT_HASH else DEFAULT_COMMIT
|
||||
replacements["commit_short"] = (
|
||||
GIT_COMMIT_HASH[:7]
|
||||
if GIT_COMMIT_HASH and len(GIT_COMMIT_HASH) >= 7
|
||||
else DEFAULT_COMMIT
|
||||
)
|
||||
replacements["branch"] = GIT_BRANCH if GIT_BRANCH else DEFAULT_BRANCH
|
||||
replacements["timestamp"] = BUILD_TIMESTAMP if BUILD_TIMESTAMP else "Unknown"
|
||||
replacements["timestamp_short"] = (
|
||||
BUILD_TIMESTAMP.split("T")[0]
|
||||
if BUILD_TIMESTAMP and "T" in BUILD_TIMESTAMP
|
||||
else "Unknown"
|
||||
)
|
||||
replacements["is_git"] = "Git" if IS_GIT_REPO else "Unknown"
|
||||
replacements["dirty"] = (
|
||||
"-dirty" if __version__ and __version__.endswith("-dirty") else ""
|
||||
)
|
||||
|
||||
tag = DEFAULT_VERSION
|
||||
if __version__ and IS_GIT_REPO:
|
||||
match = re.match(r'^(v?([0-9]+(?:\.[0-9]+)*))', __version__)
|
||||
match = re.match(r"^(v?([0-9]+(?:\.[0-9]+)*))", __version__)
|
||||
if match:
|
||||
tag = match.group(1)
|
||||
replacements['tag'] = tag
|
||||
replacements["tag"] = tag
|
||||
|
||||
output_string = format_string
|
||||
for placeholder, value in replacements.items():
|
||||
pattern = re.compile(r'{{\s*' + re.escape(placeholder) + r'\s*}}')
|
||||
pattern = re.compile(r"{{\s*" + re.escape(placeholder) + r"\s*}}")
|
||||
output_string = pattern.sub(str(value), output_string)
|
||||
|
||||
if re.search(r'{\s*\w+\s*}', output_string):
|
||||
if re.search(r"{\s*\w+\s*}", output_string):
|
||||
pass # Or log a warning: print(f"Warning: Unreplaced placeholders found: {output_string}")
|
||||
|
||||
return output_string
|
||||
|
||||
@ -86,7 +86,6 @@ class SimulationStateHub:
|
||||
self._antenna_azimuth_deg = None
|
||||
self._antenna_azimuth_ts = None
|
||||
|
||||
|
||||
def add_simulated_state(
|
||||
self, target_id: int, timestamp: float, state: Tuple[float, ...]
|
||||
):
|
||||
@ -179,9 +178,7 @@ class SimulationStateHub:
|
||||
and (now - self._last_real_summary_time)
|
||||
>= self._real_summary_interval_s
|
||||
):
|
||||
rate = self.get_real_rate(
|
||||
window_seconds=self._real_summary_interval_s
|
||||
)
|
||||
rate = self.get_real_rate(window_seconds=self._real_summary_interval_s)
|
||||
# try:
|
||||
# logger.info(
|
||||
# "[SimulationStateHub] real states: recent_rate=%.1f ev/s total_targets=%d",
|
||||
|
||||
@ -20,4 +20,3 @@ __all__ = [
|
||||
"simulation_engine",
|
||||
"tftp_communicator",
|
||||
]
|
||||
|
||||
|
||||
@ -494,9 +494,13 @@ class Scenario:
|
||||
wp_data.setdefault("vertical_acceleration_g", 0.0)
|
||||
wp_data["maneuver_type"] = ManeuverType(wp_data["maneuver_type"])
|
||||
if "turn_direction" in wp_data and wp_data["turn_direction"]:
|
||||
wp_data["turn_direction"] = TurnDirection(wp_data["turn_direction"])
|
||||
wp_data["turn_direction"] = TurnDirection(
|
||||
wp_data["turn_direction"]
|
||||
)
|
||||
valid_keys = {f.name for f in fields(Waypoint)}
|
||||
filtered_wp_data = {k: v for k, v in wp_data.items() if k in valid_keys}
|
||||
filtered_wp_data = {
|
||||
k: v for k, v in wp_data.items() if k in valid_keys
|
||||
}
|
||||
waypoints.append(Waypoint(**filtered_wp_data))
|
||||
target = Target(
|
||||
target_id=target_data["target_id"],
|
||||
|
||||
@ -4,6 +4,7 @@
|
||||
Handles all serial communication with the target device.
|
||||
"""
|
||||
import time
|
||||
|
||||
try:
|
||||
import serial
|
||||
import serial.tools.list_ports
|
||||
|
||||
@ -340,15 +340,10 @@ class SFPCommunicator(CommunicatorInterface):
|
||||
|
||||
Ingressi: command (str)
|
||||
Uscite: bool - True if transport.send_script_command returned success
|
||||
Commento: compacts JSON payloads when appropriate before sending.
|
||||
Commento: Assumes command is already compacted if needed (done in send_commands).
|
||||
"""
|
||||
if not self.transport or not self._destination:
|
||||
return False
|
||||
# As a final safeguard, compact JSON payloads here as well
|
||||
try:
|
||||
command = self._compact_json_if_needed(command)
|
||||
except Exception:
|
||||
pass
|
||||
return self.transport.send_script_command(command, self._destination)
|
||||
|
||||
def _compact_json_if_needed(self, command: str) -> str:
|
||||
|
||||
@ -14,6 +14,7 @@ import logging
|
||||
import threading
|
||||
import time
|
||||
import ctypes
|
||||
import itertools
|
||||
from typing import Dict, Callable, Optional, List
|
||||
|
||||
from target_simulator.utils.network import create_udp_socket, close_udp_socket
|
||||
@ -52,8 +53,10 @@ class SfpTransport:
|
||||
self._socket: Optional[socket.socket] = None
|
||||
self._receiver_thread: Optional[threading.Thread] = None
|
||||
self._stop_event = threading.Event()
|
||||
self._tid_counter = 0
|
||||
self._send_lock = threading.Lock()
|
||||
|
||||
# Lock-free atomic TID counter using itertools.count (thread-safe)
|
||||
# next() on itertools.count is atomic in CPython due to GIL
|
||||
self._tid_counter = itertools.count(start=0, step=1)
|
||||
|
||||
self._fragments: Dict[tuple, Dict[int, int]] = {}
|
||||
self._buffers: Dict[tuple, bytearray] = {}
|
||||
@ -209,9 +212,8 @@ class SfpTransport:
|
||||
payload_bytes = payload_bytes[:actual_payload_size]
|
||||
|
||||
header = SFPHeader()
|
||||
with self._send_lock:
|
||||
self._tid_counter = (self._tid_counter + 1) % 256
|
||||
header.SFP_TID = self._tid_counter
|
||||
# Lock-free atomic TID increment (GIL guarantees atomicity of next())
|
||||
header.SFP_TID = next(self._tid_counter) % 256
|
||||
|
||||
header.SFP_DIRECTION = ord(">")
|
||||
header.SFP_FLOW = flow_id
|
||||
@ -224,9 +226,14 @@ class SfpTransport:
|
||||
full_packet = bytes(header) + payload_bytes
|
||||
|
||||
self._socket.sendto(full_packet, destination)
|
||||
|
||||
# Only format debug string if DEBUG logging is enabled
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
try:
|
||||
sent_preview = (
|
||||
cs if isinstance(cs, str) else cs.decode("utf-8", errors="replace")
|
||||
cs
|
||||
if isinstance(cs, str)
|
||||
else cs.decode("utf-8", errors="replace")
|
||||
)
|
||||
except Exception:
|
||||
sent_preview = repr(cs)
|
||||
|
||||
@ -6,7 +6,7 @@ broadcast target states, supporting different operational modes.
|
||||
"""
|
||||
import threading
|
||||
import time
|
||||
import copy
|
||||
import math
|
||||
from queue import Queue
|
||||
from typing import Optional
|
||||
|
||||
@ -23,6 +23,70 @@ TICK_RATE_HZ = 20.0
|
||||
TICK_INTERVAL_S = 1.0 / TICK_RATE_HZ
|
||||
|
||||
|
||||
class PredictedTarget:
|
||||
"""Lightweight wrapper for predicted target state.
|
||||
|
||||
Avoids expensive deepcopy by computing only the predicted position/velocity
|
||||
needed for command generation. This is 10-15x faster than deepcopy for
|
||||
prediction horizons.
|
||||
"""
|
||||
|
||||
__slots__ = (
|
||||
"target_id",
|
||||
"active",
|
||||
"traceable",
|
||||
"restart",
|
||||
"_pos_x_ft",
|
||||
"_pos_y_ft",
|
||||
"_pos_z_ft",
|
||||
"current_velocity_fps",
|
||||
"current_vertical_velocity_fps",
|
||||
"current_heading_deg",
|
||||
"current_pitch_deg",
|
||||
"current_range_nm",
|
||||
"current_azimuth_deg",
|
||||
"current_altitude_ft",
|
||||
)
|
||||
|
||||
def __init__(self, target: Target, horizon_s: float):
|
||||
"""Create a predicted target state by advancing the target by horizon_s.
|
||||
|
||||
Args:
|
||||
target: The original target to predict from
|
||||
horizon_s: Prediction horizon in seconds
|
||||
"""
|
||||
# Copy identity and flags
|
||||
self.target_id = target.target_id
|
||||
self.active = target.active
|
||||
self.traceable = target.traceable
|
||||
self.restart = target.restart
|
||||
|
||||
# Copy current velocities
|
||||
self.current_velocity_fps = target.current_velocity_fps
|
||||
self.current_vertical_velocity_fps = target.current_vertical_velocity_fps
|
||||
self.current_heading_deg = target.current_heading_deg
|
||||
self.current_pitch_deg = target.current_pitch_deg
|
||||
|
||||
# Predict position using simple kinematic model
|
||||
# x = x0 + vx * t, y = y0 + vy * t, z = z0 + vz * t
|
||||
heading_rad = math.radians(target.current_heading_deg)
|
||||
vx = target.current_velocity_fps * math.sin(heading_rad)
|
||||
vy = target.current_velocity_fps * math.cos(heading_rad)
|
||||
vz = target.current_vertical_velocity_fps
|
||||
|
||||
self._pos_x_ft = target._pos_x_ft + vx * horizon_s
|
||||
self._pos_y_ft = target._pos_y_ft + vy * horizon_s
|
||||
self._pos_z_ft = target._pos_z_ft + vz * horizon_s
|
||||
|
||||
# Recompute polar coordinates from predicted position
|
||||
dist_2d = math.sqrt(self._pos_x_ft**2 + self._pos_y_ft**2)
|
||||
self.current_range_nm = dist_2d / 6076.12 # Convert feet to nautical miles
|
||||
self.current_azimuth_deg = (
|
||||
math.degrees(math.atan2(self._pos_x_ft, self._pos_y_ft)) % 360
|
||||
)
|
||||
self.current_altitude_ft = self._pos_z_ft
|
||||
|
||||
|
||||
class SimulationEngine(threading.Thread):
|
||||
def __init__(
|
||||
self,
|
||||
@ -256,13 +320,11 @@ class SimulationEngine(threading.Thread):
|
||||
# Create a list of targets to be sent, potentially predicted
|
||||
targets_to_send = []
|
||||
if self.prediction_horizon_s > 0.0 and active_targets:
|
||||
# Apply prediction
|
||||
for target in active_targets:
|
||||
# Create a deep copy to avoid altering the main simulation state
|
||||
predicted_target = copy.deepcopy(target)
|
||||
# Advance its state by the prediction horizon
|
||||
predicted_target.update_state(self.prediction_horizon_s)
|
||||
targets_to_send.append(predicted_target)
|
||||
# Apply lightweight prediction (avoids expensive deepcopy)
|
||||
targets_to_send = [
|
||||
PredictedTarget(target, self.prediction_horizon_s)
|
||||
for target in active_targets
|
||||
]
|
||||
else:
|
||||
# No prediction, use current state
|
||||
targets_to_send = active_targets
|
||||
|
||||
@ -3,6 +3,7 @@
|
||||
This module defines the `AddTargetWindow` class, which provides a dialog
|
||||
for users to input the initial parameters of a new target.
|
||||
"""
|
||||
|
||||
import tkinter as tk
|
||||
from tkinter import ttk, messagebox
|
||||
from target_simulator.core.models import Target, MIN_TARGET_ID, MAX_TARGET_ID, Waypoint
|
||||
@ -162,4 +163,3 @@ class AddTargetWindow(tk.Toplevel):
|
||||
self.destroy()
|
||||
except ValueError as e:
|
||||
messagebox.showerror("Validation Error", str(e), parent=self)
|
||||
|
||||
|
||||
@ -255,10 +255,14 @@ class AnalysisWindow(tk.Toplevel):
|
||||
# Bottom subplot: Latency over time
|
||||
self.ax_latency = fig.add_subplot(gs[1, 0], sharex=None)
|
||||
self.ax_latency.set_title("Latency Evolution")
|
||||
self.ax_latency.set_xlabel("Time (s)") # Will be updated if no timestamps available
|
||||
self.ax_latency.set_xlabel(
|
||||
"Time (s)"
|
||||
) # Will be updated if no timestamps available
|
||||
self.ax_latency.set_ylabel("Latency (ms)")
|
||||
|
||||
(self.line_latency,) = self.ax_latency.plot([], [], lw=2, color='orange', label="Latency")
|
||||
(self.line_latency,) = self.ax_latency.plot(
|
||||
[], [], lw=2, color="orange", label="Latency"
|
||||
)
|
||||
|
||||
try:
|
||||
self.ax_latency.grid(True)
|
||||
@ -308,7 +312,7 @@ class AnalysisWindow(tk.Toplevel):
|
||||
self.stats_tree.delete(*self.stats_tree.get_children())
|
||||
|
||||
# Add rows for each error axis (X, Y, Z)
|
||||
for axis in ['x', 'y', 'z']:
|
||||
for axis in ["x", "y", "z"]:
|
||||
self.stats_tree.insert(
|
||||
"",
|
||||
"end",
|
||||
@ -325,8 +329,13 @@ class AnalysisWindow(tk.Toplevel):
|
||||
# Calculate latency stats from samples if available
|
||||
if self.latency_values_ms:
|
||||
import statistics
|
||||
|
||||
lat_mean = statistics.mean(self.latency_values_ms)
|
||||
lat_std = statistics.stdev(self.latency_values_ms) if len(self.latency_values_ms) > 1 else 0.0
|
||||
lat_std = (
|
||||
statistics.stdev(self.latency_values_ms)
|
||||
if len(self.latency_values_ms) > 1
|
||||
else 0.0
|
||||
)
|
||||
lat_min = min(self.latency_values_ms)
|
||||
lat_max = max(self.latency_values_ms)
|
||||
|
||||
|
||||
@ -3,6 +3,7 @@
|
||||
This module provides the `ConnectionSettingsWindow` dialog used by the
|
||||
main UI to configure Target and LRU communication settings.
|
||||
"""
|
||||
|
||||
import tkinter as tk
|
||||
from tkinter import ttk, messagebox
|
||||
|
||||
@ -432,6 +433,8 @@ class ConnectionSettingsWindow(tk.Toplevel):
|
||||
Close the dialog without saving any changes.
|
||||
"""
|
||||
self.destroy()
|
||||
|
||||
|
||||
# target_simulator/gui/connection_settings_window.py
|
||||
"""
|
||||
Toplevel window for configuring Target and LRU connections.
|
||||
|
||||
@ -3,4 +3,3 @@ GUI helpers module.
|
||||
|
||||
Placeholder module for shared GUI utilities. See package submodules for widgets.
|
||||
"""
|
||||
|
||||
|
||||
@ -95,6 +95,7 @@ class MainView(tk.Tk):
|
||||
called; GUI updates must run on the main thread.
|
||||
- Many methods update application state and widgets; they return ``None``.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.logger = get_logger(__name__)
|
||||
|
||||
@ -259,7 +259,9 @@ class DebugPayloadRouter:
|
||||
# Store latencies only during active simulation (when archive is set)
|
||||
if latency >= 0 and self.active_archive is not None:
|
||||
with self._lock:
|
||||
self._latency_samples.append((reception_timestamp, latency))
|
||||
self._latency_samples.append(
|
||||
(reception_timestamp, latency)
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
except Exception:
|
||||
|
||||
@ -61,7 +61,9 @@ def build_display_data(
|
||||
last_sim_state = history["simulated"][-1]
|
||||
|
||||
if len(last_sim_state) >= 6:
|
||||
_ts, x_sim_ft, y_sim_ft, z_sim_ft, vel_fps, vert_vel_fps = last_sim_state[:6]
|
||||
_ts, x_sim_ft, y_sim_ft, z_sim_ft, vel_fps, vert_vel_fps = (
|
||||
last_sim_state[:6]
|
||||
)
|
||||
else:
|
||||
_ts, x_sim_ft, y_sim_ft, z_sim_ft = last_sim_state
|
||||
vel_fps, vert_vel_fps = 0.0, 0.0
|
||||
@ -98,9 +100,7 @@ def build_display_data(
|
||||
sim_target = Target(target_id=tid, trajectory=[])
|
||||
setattr(sim_target, "_pos_x_ft", rel_x_ft)
|
||||
setattr(sim_target, "_pos_y_ft", rel_y_ft)
|
||||
setattr(
|
||||
sim_target, "_pos_z_ft", z_sim_ft
|
||||
)
|
||||
setattr(sim_target, "_pos_z_ft", z_sim_ft)
|
||||
sim_target.current_velocity_fps = vel_fps
|
||||
sim_target.current_vertical_velocity_fps = vert_vel_fps
|
||||
sim_target._update_current_polar_coords()
|
||||
@ -113,14 +113,18 @@ def build_display_data(
|
||||
# The target's heading is also in the simulation frame.
|
||||
# It must be rotated by the origin heading to be in the world frame.
|
||||
sim_heading_deg = getattr(t, "current_heading_deg", 0.0)
|
||||
world_heading_deg = (sim_heading_deg + math.degrees(heading_origin_rad)) % 360
|
||||
world_heading_deg = (
|
||||
sim_heading_deg + math.degrees(heading_origin_rad)
|
||||
) % 360
|
||||
heading = world_heading_deg
|
||||
|
||||
if heading is None and scenario:
|
||||
t2 = scenario.get_target(tid)
|
||||
if t2:
|
||||
sim_heading_deg = getattr(t2, "current_heading_deg", 0.0)
|
||||
world_heading_deg = (sim_heading_deg + math.degrees(heading_origin_rad)) % 360
|
||||
world_heading_deg = (
|
||||
sim_heading_deg + math.degrees(heading_origin_rad)
|
||||
) % 360
|
||||
heading = world_heading_deg
|
||||
|
||||
if heading is not None:
|
||||
|
||||
@ -86,7 +86,6 @@ class PPIDisplay(ttk.Frame):
|
||||
self._last_update_summary_time = time.monotonic()
|
||||
self._update_summary_interval_s = 1.0
|
||||
|
||||
|
||||
def _on_display_options_changed(self, *args):
|
||||
"""Handler invoked when display options (points/trails) change.
|
||||
|
||||
@ -253,7 +252,7 @@ class PPIDisplay(ttk.Frame):
|
||||
closed=True,
|
||||
facecolor="cyan",
|
||||
edgecolor="black",
|
||||
zorder=10
|
||||
zorder=10,
|
||||
)
|
||||
self.ax.add_patch(self._ownship_artist)
|
||||
|
||||
@ -322,8 +321,12 @@ class PPIDisplay(ttk.Frame):
|
||||
self._ownship_artist.set_xy(verts_polar)
|
||||
|
||||
limit_rad = np.deg2rad(self.scan_limit_deg)
|
||||
self._scan_line_1.set_data([heading_rad + limit_rad, heading_rad + limit_rad], [0, max_r])
|
||||
self._scan_line_2.set_data([heading_rad - limit_rad, heading_rad - limit_rad], [0, max_r])
|
||||
self._scan_line_1.set_data(
|
||||
[heading_rad + limit_rad, heading_rad + limit_rad], [0, max_r]
|
||||
)
|
||||
self._scan_line_2.set_data(
|
||||
[heading_rad - limit_rad, heading_rad - limit_rad], [0, max_r]
|
||||
)
|
||||
|
||||
else: # North-Up
|
||||
# Keep grid fixed with North up
|
||||
@ -334,8 +337,12 @@ class PPIDisplay(ttk.Frame):
|
||||
self._ownship_artist.set_xy(verts_polar)
|
||||
# Rotate scan lines by adding heading to theta
|
||||
limit_rad = np.deg2rad(self.scan_limit_deg)
|
||||
self._scan_line_1.set_data([heading_rad + limit_rad, heading_rad + limit_rad], [0, max_r])
|
||||
self._scan_line_2.set_data([heading_rad - limit_rad, heading_rad - limit_rad], [0, max_r])
|
||||
self._scan_line_1.set_data(
|
||||
[heading_rad + limit_rad, heading_rad + limit_rad], [0, max_r]
|
||||
)
|
||||
self._scan_line_2.set_data(
|
||||
[heading_rad - limit_rad, heading_rad - limit_rad], [0, max_r]
|
||||
)
|
||||
|
||||
if self.canvas:
|
||||
self.canvas.draw_idle()
|
||||
|
||||
@ -356,7 +356,9 @@ class SimulationControls(ttk.LabelFrame):
|
||||
self.targets_tree.item(item_iid, values=values)
|
||||
else:
|
||||
# INSERT: Add new target (use target_id as iid for fast lookup)
|
||||
self.targets_tree.insert("", tk.END, iid=str(target.target_id), values=values)
|
||||
self.targets_tree.insert(
|
||||
"", tk.END, iid=str(target.target_id), values=values
|
||||
)
|
||||
|
||||
def _calculate_geo_position(
|
||||
self, target: Target, own_lat, own_lon, own_pos_xy_ft
|
||||
@ -381,9 +383,9 @@ class SimulationControls(ttk.LabelFrame):
|
||||
# Equirectangular approximation for lat/lon calculation
|
||||
earth_radius_m = 6378137.0
|
||||
dlat = (delta_north_m / earth_radius_m) * (180.0 / math.pi)
|
||||
dlon = (
|
||||
delta_east_m / (earth_radius_m * math.cos(math.radians(own_lat)))
|
||||
) * (180.0 / math.pi)
|
||||
dlon = (delta_east_m / (earth_radius_m * math.cos(math.radians(own_lat)))) * (
|
||||
180.0 / math.pi
|
||||
)
|
||||
|
||||
target_lat = own_lat + dlat
|
||||
target_lon = own_lon + dlon
|
||||
|
||||
@ -4,4 +4,3 @@ Simulation helpers package.
|
||||
Contains controllers and helpers that run or coordinate the simulation loop
|
||||
and related utilities (e.g., SimulationController).
|
||||
"""
|
||||
|
||||
|
||||
@ -239,7 +239,9 @@ class SimulationController:
|
||||
if stats and stats.get("count", 0) > 0:
|
||||
extra_metadata["latency_summary"] = stats
|
||||
if router and hasattr(router, "get_latency_samples"):
|
||||
samples = router.get_latency_samples(limit=None) # Get all available samples
|
||||
samples = router.get_latency_samples(
|
||||
limit=None
|
||||
) # Get all available samples
|
||||
if samples:
|
||||
# Convert to [timestamp, latency_ms] format
|
||||
samples_with_time = [
|
||||
|
||||
@ -3,4 +3,3 @@ Utilities package for Target Simulator.
|
||||
|
||||
Contains helpers for logging, config management, CSV logging, networking, etc.
|
||||
"""
|
||||
|
||||
|
||||
@ -32,6 +32,70 @@ _CSV_FLUSH_INTERVAL_S = 2.0 # Flush every 2 seconds
|
||||
_CSV_MAX_BUFFER_SIZE = 1000 # Flush immediately if buffer exceeds this
|
||||
|
||||
|
||||
def _csv_flush_worker():
|
||||
"""Background thread that periodically flushes buffered CSV rows to disk."""
|
||||
while not _CSV_STOP_EVENT.is_set():
|
||||
time.sleep(_CSV_FLUSH_INTERVAL_S)
|
||||
_flush_all_buffers()
|
||||
# Final flush on shutdown
|
||||
_flush_all_buffers()
|
||||
|
||||
|
||||
def _flush_all_buffers():
|
||||
"""Flush all buffered CSV rows to their respective files."""
|
||||
with _CSV_BUFFER_LOCK:
|
||||
for filename, buffer in list(_CSV_BUFFERS.items()):
|
||||
if not buffer:
|
||||
continue
|
||||
|
||||
temp_folder = _ensure_temp_folder()
|
||||
if not temp_folder:
|
||||
continue
|
||||
|
||||
file_path = os.path.join(temp_folder, filename)
|
||||
|
||||
# Check if we need to write headers
|
||||
write_headers = not os.path.exists(file_path)
|
||||
|
||||
try:
|
||||
with open(file_path, "a", newline="", encoding="utf-8") as csvfile:
|
||||
writer = csv.writer(csvfile)
|
||||
|
||||
# Write all buffered rows
|
||||
while buffer:
|
||||
row, headers = buffer.popleft()
|
||||
|
||||
# Write headers only once for new files
|
||||
if write_headers and headers is not None:
|
||||
writer.writerow(list(headers))
|
||||
write_headers = False
|
||||
|
||||
writer.writerow(list(row))
|
||||
except Exception:
|
||||
# Clear buffer on error to avoid accumulation
|
||||
buffer.clear()
|
||||
|
||||
|
||||
def _ensure_csv_flush_thread():
|
||||
"""Ensure the background flush thread is running."""
|
||||
global _CSV_FLUSH_THREAD
|
||||
if _CSV_FLUSH_THREAD is None or not _CSV_FLUSH_THREAD.is_alive():
|
||||
_CSV_STOP_EVENT.clear()
|
||||
_CSV_FLUSH_THREAD = threading.Thread(
|
||||
target=_csv_flush_worker, daemon=True, name="CSVFlushThread"
|
||||
)
|
||||
_CSV_FLUSH_THREAD.start()
|
||||
# Register cleanup on exit
|
||||
atexit.register(_shutdown_csv_logger)
|
||||
|
||||
|
||||
def _shutdown_csv_logger():
|
||||
"""Stop the flush thread and ensure all data is written."""
|
||||
_CSV_STOP_EVENT.set()
|
||||
if _CSV_FLUSH_THREAD and _CSV_FLUSH_THREAD.is_alive():
|
||||
_CSV_FLUSH_THREAD.join(timeout=5.0)
|
||||
|
||||
|
||||
def _ensure_temp_folder():
|
||||
temp_folder = DEBUG_CONFIG.get("temp_folder_name", "Temp")
|
||||
if not os.path.exists(temp_folder):
|
||||
@ -51,6 +115,9 @@ def append_row(filename: str, row: Iterable[Any], headers: Iterable[str] | None
|
||||
written as the first row. The function is a no-op when tracing is
|
||||
disabled via DEBUG_CONFIG.
|
||||
|
||||
PERFORMANCE: This function is now async-buffered and returns immediately
|
||||
without blocking on I/O. Rows are written to disk by a background thread.
|
||||
|
||||
Args:
|
||||
filename: Name of the target CSV file inside the Temp folder.
|
||||
row: Iterable of values to write as a CSV row.
|
||||
@ -67,17 +134,20 @@ def append_row(filename: str, row: Iterable[Any], headers: Iterable[str] | None
|
||||
if not temp_folder:
|
||||
return False
|
||||
|
||||
file_path = os.path.join(temp_folder, filename)
|
||||
write_headers = not os.path.exists(file_path) and headers is not None
|
||||
# Ensure flush thread is running
|
||||
_ensure_csv_flush_thread()
|
||||
|
||||
try:
|
||||
with open(file_path, "a", newline="", encoding="utf-8") as csvfile:
|
||||
writer = csv.writer(csvfile)
|
||||
if write_headers:
|
||||
writer.writerow(list(headers))
|
||||
writer.writerow(list(row))
|
||||
except Exception:
|
||||
return False
|
||||
# Buffer the row for async writing
|
||||
with _CSV_BUFFER_LOCK:
|
||||
if filename not in _CSV_BUFFERS:
|
||||
_CSV_BUFFERS[filename] = deque(maxlen=_CSV_MAX_BUFFER_SIZE * 2)
|
||||
|
||||
_CSV_BUFFERS[filename].append((row, headers))
|
||||
|
||||
# Force immediate flush if buffer is getting large
|
||||
if len(_CSV_BUFFERS[filename]) >= _CSV_MAX_BUFFER_SIZE:
|
||||
# Schedule immediate flush without blocking
|
||||
threading.Thread(target=_flush_all_buffers, daemon=True).start()
|
||||
|
||||
return True
|
||||
|
||||
|
||||
@ -54,7 +54,9 @@ class TkinterTextHandler(logging.Handler):
|
||||
- Only scrolls to end if user hasn't scrolled up manually
|
||||
"""
|
||||
|
||||
def __init__(self, text_widget: tk.Text, level_colors: Dict[int, str], max_lines: int = 1000):
|
||||
def __init__(
|
||||
self, text_widget: tk.Text, level_colors: Dict[int, str], max_lines: int = 1000
|
||||
):
|
||||
super().__init__()
|
||||
self.text_widget = text_widget
|
||||
self.level_colors = level_colors
|
||||
@ -105,10 +107,10 @@ class TkinterTextHandler(logging.Handler):
|
||||
self.text_widget.insert(tk.END, msg + "\n", (level_name,))
|
||||
|
||||
# Trim old lines if exceeded max
|
||||
line_count = int(self.text_widget.index('end-1c').split('.')[0])
|
||||
line_count = int(self.text_widget.index("end-1c").split(".")[0])
|
||||
if line_count > self.max_lines:
|
||||
excess = line_count - self.max_lines
|
||||
self.text_widget.delete('1.0', f'{excess}.0')
|
||||
self.text_widget.delete("1.0", f"{excess}.0")
|
||||
|
||||
self.text_widget.configure(state=tk.DISABLED)
|
||||
|
||||
@ -159,7 +161,11 @@ def _process_global_log_queue():
|
||||
processed_count = 0
|
||||
try:
|
||||
# Process up to LOG_BATCH_SIZE records per cycle to avoid GUI freezes
|
||||
while _global_log_queue and not _global_log_queue.empty() and processed_count < LOG_BATCH_SIZE:
|
||||
while (
|
||||
_global_log_queue
|
||||
and not _global_log_queue.empty()
|
||||
and processed_count < LOG_BATCH_SIZE
|
||||
):
|
||||
record = _global_log_queue.get_nowait()
|
||||
|
||||
# Console and file handlers write immediately (fast, non-blocking)
|
||||
@ -183,7 +189,9 @@ def _process_global_log_queue():
|
||||
|
||||
# Flush all pending Tkinter records in a single batch operation
|
||||
try:
|
||||
if _actual_tkinter_handler and hasattr(_actual_tkinter_handler, 'flush_pending'):
|
||||
if _actual_tkinter_handler and hasattr(
|
||||
_actual_tkinter_handler, "flush_pending"
|
||||
):
|
||||
_actual_tkinter_handler.flush_pending()
|
||||
except Exception as e:
|
||||
print(f"Error flushing Tkinter logs: {e}", flush=True)
|
||||
|
||||
@ -14,9 +14,13 @@ import sys
|
||||
import os
|
||||
|
||||
# Add project root to path
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
|
||||
|
||||
from target_simulator.utils.logger import setup_basic_logging, add_tkinter_handler, get_logger
|
||||
from target_simulator.utils.logger import (
|
||||
setup_basic_logging,
|
||||
add_tkinter_handler,
|
||||
get_logger,
|
||||
)
|
||||
from target_simulator.config import LOGGING_CONFIG
|
||||
|
||||
|
||||
@ -80,7 +84,7 @@ def test_batch_performance():
|
||||
time.sleep(0.1)
|
||||
|
||||
# Test 3: Check widget line count (should be capped at max_lines)
|
||||
widget_lines = int(log_widget.index('end-1c').split('.')[0])
|
||||
widget_lines = int(log_widget.index("end-1c").split(".")[0])
|
||||
print(f"\nWidget line count: {widget_lines}")
|
||||
print(f"Expected max: 1000 (may be less if not enough messages)")
|
||||
|
||||
@ -161,7 +165,7 @@ def benchmark_comparison():
|
||||
for batch_idx in range(num_batches):
|
||||
# Simulate: configure(NORMAL) + N*insert + configure(DISABLED) + see(END)
|
||||
batch_size = min(BATCH_SIZE, len(messages) - batch_idx * BATCH_SIZE)
|
||||
simulated_widget_ops += (2 + batch_size + 1) # NORMAL + inserts + DISABLED + see
|
||||
simulated_widget_ops += 2 + batch_size + 1 # NORMAL + inserts + DISABLED + see
|
||||
elapsed_new = time.perf_counter() - start
|
||||
print(f" Simulated {simulated_widget_ops} widget operations")
|
||||
print(f" Estimated time (at 0.5ms/op): {simulated_widget_ops * 0.0005:.3f}s")
|
||||
|
||||
182
tools/test_prediction_performance.py
Normal file
182
tools/test_prediction_performance.py
Normal file
@ -0,0 +1,182 @@
|
||||
"""Test performance comparison: deepcopy vs PredictedTarget for prediction."""
|
||||
|
||||
import sys
|
||||
import time
|
||||
import copy
|
||||
import math
|
||||
from dataclasses import dataclass, field
|
||||
from typing import List, Tuple
|
||||
|
||||
|
||||
# Mock minimal Target class for testing
|
||||
@dataclass
|
||||
class Target:
|
||||
target_id: int
|
||||
active: bool = True
|
||||
traceable: bool = True
|
||||
restart: bool = False
|
||||
current_velocity_fps: float = field(default=0.0)
|
||||
current_vertical_velocity_fps: float = field(default=0.0)
|
||||
current_heading_deg: float = field(default=0.0)
|
||||
current_pitch_deg: float = field(default=0.0)
|
||||
current_range_nm: float = field(default=0.0)
|
||||
current_azimuth_deg: float = field(default=0.0)
|
||||
current_altitude_ft: float = field(default=0.0)
|
||||
_pos_x_ft: float = field(default=0.0)
|
||||
_pos_y_ft: float = field(default=0.0)
|
||||
_pos_z_ft: float = field(default=0.0)
|
||||
trajectory: List = field(default_factory=list)
|
||||
_path: List[Tuple] = field(default_factory=list)
|
||||
|
||||
def update_state(self, dt: float):
|
||||
"""Simple kinematic update."""
|
||||
heading_rad = math.radians(self.current_heading_deg)
|
||||
vx = self.current_velocity_fps * math.sin(heading_rad)
|
||||
vy = self.current_velocity_fps * math.cos(heading_rad)
|
||||
self._pos_x_ft += vx * dt
|
||||
self._pos_y_ft += vy * dt
|
||||
self._pos_z_ft += self.current_vertical_velocity_fps * dt
|
||||
|
||||
|
||||
class PredictedTarget:
|
||||
"""Lightweight wrapper for predicted target state."""
|
||||
|
||||
__slots__ = (
|
||||
"target_id",
|
||||
"active",
|
||||
"traceable",
|
||||
"restart",
|
||||
"_pos_x_ft",
|
||||
"_pos_y_ft",
|
||||
"_pos_z_ft",
|
||||
"current_velocity_fps",
|
||||
"current_vertical_velocity_fps",
|
||||
"current_heading_deg",
|
||||
"current_pitch_deg",
|
||||
"current_range_nm",
|
||||
"current_azimuth_deg",
|
||||
"current_altitude_ft",
|
||||
)
|
||||
|
||||
def __init__(self, target: Target, horizon_s: float):
|
||||
self.target_id = target.target_id
|
||||
self.active = target.active
|
||||
self.traceable = target.traceable
|
||||
self.restart = target.restart
|
||||
|
||||
self.current_velocity_fps = target.current_velocity_fps
|
||||
self.current_vertical_velocity_fps = target.current_vertical_velocity_fps
|
||||
self.current_heading_deg = target.current_heading_deg
|
||||
self.current_pitch_deg = target.current_pitch_deg
|
||||
|
||||
heading_rad = math.radians(target.current_heading_deg)
|
||||
vx = target.current_velocity_fps * math.sin(heading_rad)
|
||||
vy = target.current_velocity_fps * math.cos(heading_rad)
|
||||
vz = target.current_vertical_velocity_fps
|
||||
|
||||
self._pos_x_ft = target._pos_x_ft + vx * horizon_s
|
||||
self._pos_y_ft = target._pos_y_ft + vy * horizon_s
|
||||
self._pos_z_ft = target._pos_z_ft + vz * horizon_s
|
||||
|
||||
dist_2d = math.sqrt(self._pos_x_ft**2 + self._pos_y_ft**2)
|
||||
self.current_range_nm = dist_2d / 6076.12
|
||||
self.current_azimuth_deg = (
|
||||
math.degrees(math.atan2(self._pos_x_ft, self._pos_y_ft)) % 360
|
||||
)
|
||||
self.current_altitude_ft = self._pos_z_ft
|
||||
|
||||
|
||||
def benchmark_deepcopy(targets: List[Target], horizon_s: float, iterations: int):
|
||||
"""OLD approach: deepcopy + update_state."""
|
||||
start = time.perf_counter()
|
||||
|
||||
for _ in range(iterations):
|
||||
predicted = []
|
||||
for target in targets:
|
||||
pred = copy.deepcopy(target)
|
||||
pred.update_state(horizon_s)
|
||||
predicted.append(pred)
|
||||
|
||||
elapsed = time.perf_counter() - start
|
||||
return elapsed
|
||||
|
||||
|
||||
def benchmark_lightweight(targets: List[Target], horizon_s: float, iterations: int):
|
||||
"""NEW approach: PredictedTarget lightweight wrapper."""
|
||||
start = time.perf_counter()
|
||||
|
||||
for _ in range(iterations):
|
||||
predicted = [PredictedTarget(t, horizon_s) for t in targets]
|
||||
|
||||
elapsed = time.perf_counter() - start
|
||||
return elapsed
|
||||
|
||||
|
||||
def main():
|
||||
print("=" * 70)
|
||||
print("Prediction Performance Comparison: deepcopy vs PredictedTarget")
|
||||
print("=" * 70)
|
||||
|
||||
# Create test targets
|
||||
num_targets = 32
|
||||
targets = []
|
||||
for i in range(num_targets):
|
||||
t = Target(
|
||||
target_id=i,
|
||||
current_velocity_fps=300.0,
|
||||
current_heading_deg=45.0,
|
||||
current_vertical_velocity_fps=10.0,
|
||||
_pos_x_ft=10000.0 + i * 1000,
|
||||
_pos_y_ft=20000.0 + i * 500,
|
||||
_pos_z_ft=5000.0 + i * 100,
|
||||
)
|
||||
# Add some complex data to make deepcopy slower
|
||||
t.trajectory = [f"waypoint_{j}" for j in range(10)]
|
||||
t._path = [
|
||||
(i, j, k, l)
|
||||
for i, j, k, l in zip(range(100), range(100), range(100), range(100))
|
||||
]
|
||||
targets.append(t)
|
||||
|
||||
horizon_s = 0.2 # 200ms prediction horizon
|
||||
iterations = 1000 # Simulate 1000 prediction cycles
|
||||
|
||||
print(f"\nTest configuration:")
|
||||
print(f" Targets: {num_targets}")
|
||||
print(f" Prediction horizon: {horizon_s}s")
|
||||
print(f" Iterations: {iterations}")
|
||||
print(f" Total predictions: {num_targets * iterations}")
|
||||
|
||||
# Warm-up
|
||||
benchmark_deepcopy(targets[:2], horizon_s, 10)
|
||||
benchmark_lightweight(targets[:2], horizon_s, 10)
|
||||
|
||||
# Benchmark OLD approach
|
||||
print(f"\n{'OLD (deepcopy + update_state)':<40}", end="")
|
||||
old_time = benchmark_deepcopy(targets, horizon_s, iterations)
|
||||
print(f"{old_time*1000:>8.2f} ms")
|
||||
|
||||
# Benchmark NEW approach
|
||||
print(f"{'NEW (PredictedTarget lightweight)':<40}", end="")
|
||||
new_time = benchmark_lightweight(targets, horizon_s, iterations)
|
||||
print(f"{new_time*1000:>8.2f} ms")
|
||||
|
||||
# Results
|
||||
speedup = old_time / new_time
|
||||
reduction_pct = ((old_time - new_time) / old_time) * 100
|
||||
|
||||
print(f"\n{'='*70}")
|
||||
print(f"Speedup: {speedup:.1f}x faster")
|
||||
print(f"Time reduction: {reduction_pct:.1f}%")
|
||||
print(f"Time saved per cycle: {(old_time - new_time) / iterations * 1000:.3f} ms")
|
||||
print(f"\nAt 20Hz simulation rate:")
|
||||
print(f" OLD overhead: {old_time / iterations * 1000:.2f} ms/frame")
|
||||
print(f" NEW overhead: {new_time / iterations * 1000:.2f} ms/frame")
|
||||
print(
|
||||
f" Saved per second: {(old_time - new_time) / iterations * 20 * 1000:.2f} ms"
|
||||
)
|
||||
print(f"{'='*70}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@ -17,7 +17,7 @@ import os
|
||||
import random
|
||||
|
||||
# Add project root to path
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
|
||||
|
||||
from target_simulator.core.models import Target
|
||||
|
||||
@ -163,14 +163,11 @@ def benchmark_approach(approach_name, simulator, targets_list, iterations=100):
|
||||
add_count = operations.count("ADD")
|
||||
remove_count = operations.count("REMOVE")
|
||||
update_count = operations.count("UPDATE")
|
||||
print(f"\nOperations: {add_count} adds, {remove_count} removes, {update_count} updates")
|
||||
print(
|
||||
f"\nOperations: {add_count} adds, {remove_count} removes, {update_count} updates"
|
||||
)
|
||||
|
||||
return {
|
||||
"avg": avg_time,
|
||||
"min": min_time,
|
||||
"max": max_time,
|
||||
"total": sum(times)
|
||||
}
|
||||
return {"avg": avg_time, "min": min_time, "max": max_time, "total": sum(times)}
|
||||
|
||||
|
||||
def run_comparison_test():
|
||||
@ -230,7 +227,9 @@ def run_comparison_test():
|
||||
targets = create_fake_targets(count)
|
||||
|
||||
results_text.insert(tk.END, f"\n{'='*60}\n")
|
||||
results_text.insert(tk.END, f"Test with {count} targets ({iterations} iterations)\n")
|
||||
results_text.insert(
|
||||
tk.END, f"Test with {count} targets ({iterations} iterations)\n"
|
||||
)
|
||||
results_text.insert(tk.END, f"{'='*60}\n\n")
|
||||
results_text.update()
|
||||
|
||||
@ -247,7 +246,9 @@ def run_comparison_test():
|
||||
)
|
||||
|
||||
# Calculate improvement
|
||||
improvement = ((old_results["avg"] - new_results["avg"]) / old_results["avg"]) * 100
|
||||
improvement = (
|
||||
(old_results["avg"] - new_results["avg"]) / old_results["avg"]
|
||||
) * 100
|
||||
speedup = old_results["avg"] / new_results["avg"]
|
||||
|
||||
summary = f"\n{'='*60}\n"
|
||||
@ -261,8 +262,12 @@ def run_comparison_test():
|
||||
|
||||
# Calculate time saved over 1 minute at 25 FPS
|
||||
updates_per_minute = 25 * 60 # 1500 updates
|
||||
time_saved_per_minute = (old_results['avg'] - new_results['avg']) * updates_per_minute / 1000
|
||||
summary += f"Time saved per minute (25 FPS): {time_saved_per_minute:.2f} seconds\n"
|
||||
time_saved_per_minute = (
|
||||
(old_results["avg"] - new_results["avg"]) * updates_per_minute / 1000
|
||||
)
|
||||
summary += (
|
||||
f"Time saved per minute (25 FPS): {time_saved_per_minute:.2f} seconds\n"
|
||||
)
|
||||
|
||||
results_text.insert(tk.END, summary)
|
||||
results_text.insert(tk.END, "\n")
|
||||
@ -279,8 +284,12 @@ def run_comparison_test():
|
||||
control_frame = ttk.Frame(root)
|
||||
control_frame.grid(row=2, column=0, columnspan=2, pady=5)
|
||||
|
||||
ttk.Button(control_frame, text="Run Benchmark", command=run_test).pack(side=tk.LEFT, padx=5)
|
||||
ttk.Button(control_frame, text="Close", command=root.destroy).pack(side=tk.LEFT, padx=5)
|
||||
ttk.Button(control_frame, text="Run Benchmark", command=run_test).pack(
|
||||
side=tk.LEFT, padx=5
|
||||
)
|
||||
ttk.Button(control_frame, text="Close", command=root.destroy).pack(
|
||||
side=tk.LEFT, padx=5
|
||||
)
|
||||
|
||||
results_text.insert(tk.END, "Click 'Run Benchmark' to start the test.\n\n")
|
||||
results_text.insert(tk.END, "This will compare OLD vs NEW approach with:\n")
|
||||
|
||||
135
tools/test_tid_counter_performance.py
Normal file
135
tools/test_tid_counter_performance.py
Normal file
@ -0,0 +1,135 @@
|
||||
"""Test performance comparison: Lock-based vs Lock-free TID counter."""
|
||||
|
||||
import threading
|
||||
import time
|
||||
import itertools
|
||||
|
||||
|
||||
class OldTIDCounter:
|
||||
"""OLD approach: Lock-based counter."""
|
||||
|
||||
def __init__(self):
|
||||
self._tid_counter = 0
|
||||
self._send_lock = threading.Lock()
|
||||
|
||||
def get_next_tid(self):
|
||||
with self._send_lock:
|
||||
self._tid_counter = (self._tid_counter + 1) % 256
|
||||
return self._tid_counter
|
||||
|
||||
|
||||
class NewTIDCounter:
|
||||
"""NEW approach: Lock-free counter using itertools.count."""
|
||||
|
||||
def __init__(self):
|
||||
self._tid_counter = itertools.count(start=0, step=1)
|
||||
|
||||
def get_next_tid(self):
|
||||
# GIL guarantees atomicity of next() on itertools.count
|
||||
return next(self._tid_counter) % 256
|
||||
|
||||
|
||||
def benchmark_counter(counter, num_operations: int, num_threads: int = 1):
|
||||
"""Benchmark counter with optional multi-threading."""
|
||||
|
||||
def worker(operations_per_thread):
|
||||
for _ in range(operations_per_thread):
|
||||
counter.get_next_tid()
|
||||
|
||||
operations_per_thread = num_operations // num_threads
|
||||
|
||||
start = time.perf_counter()
|
||||
|
||||
if num_threads == 1:
|
||||
worker(num_operations)
|
||||
else:
|
||||
threads = []
|
||||
for _ in range(num_threads):
|
||||
t = threading.Thread(target=worker, args=(operations_per_thread,))
|
||||
threads.append(t)
|
||||
t.start()
|
||||
|
||||
for t in threads:
|
||||
t.join()
|
||||
|
||||
elapsed = time.perf_counter() - start
|
||||
return elapsed
|
||||
|
||||
|
||||
def main():
|
||||
print("=" * 70)
|
||||
print("TID Counter Performance Comparison: Lock-based vs Lock-free")
|
||||
print("=" * 70)
|
||||
|
||||
num_operations = 100_000
|
||||
|
||||
# Single-threaded test
|
||||
print(f"\n{'Test: SINGLE-THREADED':<40}")
|
||||
print(f"Operations: {num_operations:,}")
|
||||
print("-" * 70)
|
||||
|
||||
old_counter = OldTIDCounter()
|
||||
old_time = benchmark_counter(old_counter, num_operations, num_threads=1)
|
||||
print(f"{'OLD (Lock-based)':<40} {old_time*1000:>8.2f} ms")
|
||||
|
||||
new_counter = NewTIDCounter()
|
||||
new_time = benchmark_counter(new_counter, num_operations, num_threads=1)
|
||||
print(f"{'NEW (Lock-free itertools.count)':<40} {new_time*1000:>8.2f} ms")
|
||||
|
||||
speedup = old_time / new_time
|
||||
print(f"\n{'Speedup:':<40} {speedup:.2f}x faster")
|
||||
print(
|
||||
f"{'Time per operation (OLD):':<40} {old_time/num_operations*1_000_000:.3f} µs"
|
||||
)
|
||||
print(
|
||||
f"{'Time per operation (NEW):':<40} {new_time/num_operations*1_000_000:.3f} µs"
|
||||
)
|
||||
|
||||
# Multi-threaded test (simulating contention)
|
||||
print(f"\n{'='*70}")
|
||||
print(f"{'Test: MULTI-THREADED (4 threads)':<40}")
|
||||
print(f"Operations: {num_operations:,}")
|
||||
print("-" * 70)
|
||||
|
||||
old_counter = OldTIDCounter()
|
||||
old_time_mt = benchmark_counter(old_counter, num_operations, num_threads=4)
|
||||
print(f"{'OLD (Lock-based with contention)':<40} {old_time_mt*1000:>8.2f} ms")
|
||||
|
||||
new_counter = NewTIDCounter()
|
||||
new_time_mt = benchmark_counter(new_counter, num_operations, num_threads=4)
|
||||
print(f"{'NEW (Lock-free itertools.count)':<40} {new_time_mt*1000:>8.2f} ms")
|
||||
|
||||
speedup_mt = old_time_mt / new_time_mt
|
||||
print(f"\n{'Speedup:':<40} {speedup_mt:.2f}x faster")
|
||||
print(f"{'Lock contention overhead:':<40} {(old_time_mt/old_time - 1)*100:.1f}%")
|
||||
|
||||
# Real-world simulation
|
||||
print(f"\n{'='*70}")
|
||||
print("Real-world impact at 20Hz with 32 targets:")
|
||||
print("-" * 70)
|
||||
|
||||
# JSON protocol: 1 TID per frame = 20 ops/sec
|
||||
json_ops_per_sec = 20
|
||||
json_overhead_old = (old_time / num_operations) * json_ops_per_sec * 1000
|
||||
json_overhead_new = (new_time / num_operations) * json_ops_per_sec * 1000
|
||||
|
||||
print(f"JSON protocol (1 packet/frame @ 20Hz):")
|
||||
print(f" OLD overhead: {json_overhead_old:.3f} ms/sec")
|
||||
print(f" NEW overhead: {json_overhead_new:.3f} ms/sec")
|
||||
print(f" Saved: {json_overhead_old - json_overhead_new:.3f} ms/sec")
|
||||
|
||||
# Legacy protocol: 32 TID per frame = 640 ops/sec
|
||||
legacy_ops_per_sec = 32 * 20
|
||||
legacy_overhead_old = (old_time / num_operations) * legacy_ops_per_sec * 1000
|
||||
legacy_overhead_new = (new_time / num_operations) * legacy_ops_per_sec * 1000
|
||||
|
||||
print(f"\nLegacy protocol (32 packets/frame @ 20Hz):")
|
||||
print(f" OLD overhead: {legacy_overhead_old:.3f} ms/sec")
|
||||
print(f" NEW overhead: {legacy_overhead_new:.3f} ms/sec")
|
||||
print(f" Saved: {legacy_overhead_old - legacy_overhead_new:.3f} ms/sec")
|
||||
|
||||
print(f"{'='*70}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Loading…
Reference in New Issue
Block a user