sistemata la visualizzazione con tutti i dati degli scostamenti

This commit is contained in:
VALLONGOL 2025-11-13 20:40:35 +01:00
parent b58d7f1022
commit 19fae5309e
4 changed files with 204 additions and 22 deletions

31
debug_brackets.py Normal file
View File

@ -0,0 +1,31 @@
import json
data = json.load(open('archive_simulations/20251113_170236_scenario_dritto.json'))
sim = data['simulation_results']['0']['simulated']
real = data['simulation_results']['0']['real']
print(f"Total simulated states: {len(sim)}")
print(f"Total real states: {len(real)}")
print(f"Sim range: {sim[0][0]:.2f} to {sim[-1][0]:.2f}")
print(f"Real range: {real[0][0]:.2f} to {real[-1][0]:.2f}")
# Check how many real states find brackets
bracketed = 0
first_bracketed_time = None
last_bracketed_time = None
for real_state in real:
real_ts = real_state[0]
# Find bracketing points
for i in range(len(sim) - 1):
if sim[i][0] <= real_ts <= sim[i + 1][0]:
bracketed += 1
if first_bracketed_time is None:
first_bracketed_time = real_ts
last_bracketed_time = real_ts
break
print(f"\nBracketed real states: {bracketed} / {len(real)} ({100*bracketed/len(real):.1f}%)")
if first_bracketed_time and last_bracketed_time:
print(f"Bracketed time range: {first_bracketed_time:.2f} to {last_bracketed_time:.2f}")
print(f"Bracketed duration: {last_bracketed_time - first_bracketed_time:.2f}s")

9
debug_targets.py Normal file
View File

@ -0,0 +1,9 @@
import json
data = json.load(open('archive_simulations/20251113_170236_scenario_dritto.json'))
targets = list(data['simulation_results'].keys())
print(f'Targets in file: {targets}')
for t in targets:
real_count = len(data['simulation_results'][t]['real'])
sim_count = len(data['simulation_results'][t]['simulated'])
print(f'Target {t}: {real_count} real states, {sim_count} simulated states')

View File

@ -30,7 +30,10 @@ def main():
"""Initializes and runs the application.""" """Initializes and runs the application."""
import logging import logging
# Silence verbose loggers from third-party libraries
logging.getLogger("matplotlib").setLevel(logging.WARNING) logging.getLogger("matplotlib").setLevel(logging.WARNING)
logging.getLogger("PIL").setLevel(logging.INFO)
logging.getLogger("PIL.PngImagePlugin").setLevel(logging.INFO)
app = MainView() app = MainView()

View File

@ -37,6 +37,7 @@ class AnalysisWindow(tk.Toplevel):
# State variables # State variables
self.selected_target_id = tk.IntVar(value=0) self.selected_target_id = tk.IntVar(value=0)
self._active = True self._active = True
self._filtered_errors = None # Cache for spike-filtered errors used in statistics
# Carica i dati e inizializza l'analizzatore # Carica i dati e inizializza l'analizzatore
self._load_data_and_setup(archive_filepath) self._load_data_and_setup(archive_filepath)
@ -74,8 +75,9 @@ class AnalysisWindow(tk.Toplevel):
self.latency_timestamps = [] self.latency_timestamps = []
self.latency_values_ms = [] self.latency_values_ms = []
# Create a temporary hub and populate it with historical data # Create a temporary hub with unlimited history size for analysis
self._hub = SimulationStateHub() # The default history_size of 200 is too small for full simulation playback
self._hub = SimulationStateHub(history_size=100000)
results = archive_data.get("simulation_results", {}) results = archive_data.get("simulation_results", {})
for target_id_str, data in results.items(): for target_id_str, data in results.items():
target_id = int(target_id_str) target_id = int(target_id_str)
@ -231,7 +233,8 @@ class AnalysisWindow(tk.Toplevel):
fig = Figure(figsize=(5, 6), dpi=100) fig = Figure(figsize=(5, 6), dpi=100)
# Use GridSpec for aligned subplots with shared x-axis alignment # Use GridSpec for aligned subplots with shared x-axis alignment
gs = fig.add_gridspec(2, 1, height_ratios=[2, 1], hspace=0.3) # Increased top margin to avoid title overlap with toolbar
gs = fig.add_gridspec(2, 1, height_ratios=[2, 1], hspace=0.35, top=0.95)
# Top subplot: Instantaneous Error # Top subplot: Instantaneous Error
self.ax = fig.add_subplot(gs[0, 0]) self.ax = fig.add_subplot(gs[0, 0])
@ -252,8 +255,8 @@ class AnalysisWindow(tk.Toplevel):
except Exception: except Exception:
pass pass
# Bottom subplot: Latency over time # Bottom subplot: Latency over time - SHARE X-AXIS with top plot for synchronized zoom
self.ax_latency = fig.add_subplot(gs[1, 0], sharex=None) self.ax_latency = fig.add_subplot(gs[1, 0], sharex=self.ax)
self.ax_latency.set_title("Latency Evolution") self.ax_latency.set_title("Latency Evolution")
self.ax_latency.set_xlabel( self.ax_latency.set_xlabel(
"Time (s)" "Time (s)"
@ -272,9 +275,24 @@ class AnalysisWindow(tk.Toplevel):
fig.tight_layout() fig.tight_layout()
self.canvas = FigureCanvasTkAgg(fig, master=parent) # Create frame for toolbar and canvas
plot_container = ttk.Frame(parent)
plot_container.pack(fill=tk.BOTH, expand=True)
# Add matplotlib navigation toolbar for zoom/pan functionality at the top
from matplotlib.backends.backend_tkagg import NavigationToolbar2Tk
toolbar_frame = ttk.Frame(plot_container)
toolbar_frame.pack(side=tk.TOP, fill=tk.X)
self.canvas = FigureCanvasTkAgg(fig, master=plot_container)
toolbar = NavigationToolbar2Tk(self.canvas, toolbar_frame)
toolbar.update()
# Pack canvas after toolbar so it's below
self.canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True)
self.canvas.draw() self.canvas.draw()
self.canvas.get_tk_widget().pack(fill=tk.BOTH, expand=True)
def _update_target_selector(self): def _update_target_selector(self):
"""Refresh the target selector Combobox with IDs from the state hub. """Refresh the target selector Combobox with IDs from the state hub.
@ -304,6 +322,8 @@ class AnalysisWindow(tk.Toplevel):
def _update_stats_table(self, results: Dict): def _update_stats_table(self, results: Dict):
"""Populate the stats Treeview with aggregated error metrics. """Populate the stats Treeview with aggregated error metrics.
Uses filtered error data (excluding spikes) if available.
Args: Args:
results (Dict): A mapping containing 'x', 'y', 'z' metrics and results (Dict): A mapping containing 'x', 'y', 'z' metrics and
@ -311,18 +331,54 @@ class AnalysisWindow(tk.Toplevel):
""" """
self.stats_tree.delete(*self.stats_tree.get_children()) self.stats_tree.delete(*self.stats_tree.get_children())
# Add rows for each error axis (X, Y, Z) # Use filtered errors if available, otherwise fall back to analyzer results
for axis in ["x", "y", "z"]: if hasattr(self, '_filtered_errors') and self._filtered_errors:
self.stats_tree.insert( import math
"", # Compute statistics from filtered data
"end", for axis in ["x", "y", "z"]:
values=( errors = self._filtered_errors[axis]
f"Error {axis.upper()}", if errors:
f"{results[axis]['mean']:.3f}", n = len(errors)
f"{results[axis]['std_dev']:.3f}", mean = sum(errors) / n
f"{results[axis]['rmse']:.3f}", variance = sum((x - mean) ** 2 for x in errors) / n
), std_dev = math.sqrt(variance)
) rmse = math.sqrt(sum(x**2 for x in errors) / n)
self.stats_tree.insert(
"",
"end",
values=(
f"Error {axis.upper()}",
f"{mean:.3f}",
f"{std_dev:.3f}",
f"{rmse:.3f}",
),
)
else:
# No data after filtering
self.stats_tree.insert(
"",
"end",
values=(
f"Error {axis.upper()}",
"N/A",
"N/A",
"N/A",
),
)
else:
# Fallback to analyzer results (unfiltered)
for axis in ["x", "y", "z"]:
self.stats_tree.insert(
"",
"end",
values=(
f"Error {axis.upper()}",
f"{results[axis]['mean']:.3f}",
f"{results[axis]['std_dev']:.3f}",
f"{results[axis]['rmse']:.3f}",
),
)
# Add latency row if available # Add latency row if available
if self.estimated_latency_ms is not None: if self.estimated_latency_ms is not None:
@ -364,6 +420,8 @@ class AnalysisWindow(tk.Toplevel):
def _update_plot(self, target_id: int): def _update_plot(self, target_id: int):
"""Update the matplotlib plot for the given target using hub history. """Update the matplotlib plot for the given target using hub history.
Also computes and stores filtered errors (excluding outliers) for statistics.
Args: Args:
target_id (int): The identifier of the target to plot errors for. target_id (int): The identifier of the target to plot errors for.
@ -376,6 +434,8 @@ class AnalysisWindow(tk.Toplevel):
self.ax.relim() self.ax.relim()
self.ax.autoscale_view() self.ax.autoscale_view()
self.canvas.draw_idle() self.canvas.draw_idle()
# Clear filtered data cache
self._filtered_errors = None
return return
times, errors_x, errors_y, errors_z = [], [], [], [] times, errors_x, errors_y, errors_z = [], [], [], []
@ -394,9 +454,88 @@ class AnalysisWindow(tk.Toplevel):
errors_y.append(real_y - interp_y) errors_y.append(real_y - interp_y)
errors_z.append(real_z - interp_z) errors_z.append(real_z - interp_z)
self.line_x.set_data(times, errors_x) if not times:
self.line_y.set_data(times, errors_y) self.line_x.set_data([], [])
self.line_z.set_data(times, errors_z) self.line_y.set_data([], [])
self.line_z.set_data([], [])
self.ax.relim()
self.ax.autoscale_view()
self.canvas.draw_idle()
self._filtered_errors = None
return
# Filter initial transient/acquisition spikes
# Use a threshold based on the median of stable data (after initial period)
filtered_times, filtered_x, filtered_y, filtered_z = [], [], [], []
outlier_times, outlier_x, outlier_y, outlier_z = [], [], [], []
# Take a sample window after initial seconds to compute typical error magnitude
min_time = min(times)
sample_window_start = min_time + 5.0 # Skip first 5 seconds
sample_window_end = min_time + 15.0 # Sample from 5s to 15s
sample_errors = []
for i, t in enumerate(times):
if sample_window_start <= t <= sample_window_end:
err_magnitude = (errors_x[i]**2 + errors_y[i]**2 + errors_z[i]**2) ** 0.5
sample_errors.append(err_magnitude)
# Compute threshold: median of sample + 10x (very permissive for normal errors)
if sample_errors:
import statistics
median_err = statistics.median(sample_errors)
# Use 20x median as threshold to catch only extreme outliers
threshold = max(median_err * 20, 500.0) # At least 500 ft threshold
else:
# Fallback if no sample data available
threshold = 1000.0
# Classify points as normal or outliers
for i, t in enumerate(times):
err_magnitude = (errors_x[i]**2 + errors_y[i]**2 + errors_z[i]**2) ** 0.5
if err_magnitude > threshold:
outlier_times.append(t)
outlier_x.append(errors_x[i])
outlier_y.append(errors_y[i])
outlier_z.append(errors_z[i])
else:
filtered_times.append(t)
filtered_x.append(errors_x[i])
filtered_y.append(errors_y[i])
filtered_z.append(errors_z[i])
# Store filtered errors for statistics computation
self._filtered_errors = {
'x': filtered_x,
'y': filtered_y,
'z': filtered_z
}
# Plot filtered (normal) data
self.line_x.set_data(filtered_times, filtered_x)
self.line_y.set_data(filtered_times, filtered_y)
self.line_z.set_data(filtered_times, filtered_z)
# Add annotation if outliers were detected
# Clear previous annotations
for txt in getattr(self.ax, '_spike_annotations', []):
txt.remove()
self.ax._spike_annotations = []
if outlier_times:
# Add text annotation about filtered spikes
outlier_count = len(outlier_times)
max_outlier_mag = max((outlier_x[i]**2 + outlier_y[i]**2 + outlier_z[i]**2)**0.5
for i in range(len(outlier_times)))
annotation_text = (f"{outlier_count} acquisition spike(s) filtered\n"
f"(max error: {max_outlier_mag:.0f} ft at t={outlier_times[0]:.1f}s)\n"
f"Spikes excluded from statistics")
txt = self.ax.text(0.02, 0.98, annotation_text,
transform=self.ax.transAxes,
verticalalignment='top',
bbox=dict(boxstyle='round', facecolor='yellow', alpha=0.7),
fontsize=9)
self.ax._spike_annotations = [txt]
self.ax.relim() self.ax.relim()
self.ax.autoscale_view() self.ax.autoscale_view()