433 lines
18 KiB
Python
433 lines
18 KiB
Python
# target_simulator/analysis/simulation_archive.py
|
|
|
|
import os
|
|
import json
|
|
import time
|
|
from datetime import datetime
|
|
from typing import Dict, List, Any, Tuple, Optional
|
|
import math
|
|
import csv
|
|
import threading
|
|
|
|
from target_simulator.core.models import Scenario
|
|
from target_simulator.utils.csv_logger import append_row, flush_all_csv_buffers
|
|
|
|
# Prefer pyproj for accurate geodesic calculations; fall back to a simple
|
|
# equirectangular approximation when pyproj is not available.
|
|
try:
|
|
from pyproj import Geod
|
|
|
|
_GEOD = Geod(ellps="WGS84")
|
|
_HAS_PYPROJ = True
|
|
except ImportError:
|
|
_GEOD = None
|
|
_HAS_PYPROJ = False
|
|
|
|
# Define the structure for a recorded state
|
|
RecordedState = Tuple[float, float, float, float] # (timestamp, x_ft, y_ft, z_ft)
|
|
|
|
|
|
class SimulationArchive:
|
|
"""
|
|
Manages data collection for a single simulation run and saves it to a file.
|
|
Positions are streamed to an efficient CSV trail file for fast analysis.
|
|
"""
|
|
|
|
ARCHIVE_FOLDER = "archive_simulations"
|
|
TRAIL_HEADERS = ["timestamp", "source", "target_id", "x_ft", "y_ft", "z_ft"]
|
|
LATENCY_HEADERS = ["timestamp", "latency_ms"]
|
|
|
|
def __init__(self, scenario: Scenario):
|
|
"""Initializes a new archive session for a given scenario."""
|
|
self.start_time = time.monotonic()
|
|
self.scenario_name = scenario.name
|
|
self.scenario_data = scenario.to_dict()
|
|
|
|
self.recorded_data: Dict[int, Dict[str, List[RecordedState]]] = {}
|
|
self.ownship_trajectory: List[Dict[str, Any]] = []
|
|
self.recorded_geopos: Dict[int, List[Dict[str, Any]]] = {}
|
|
|
|
self._ensure_archive_directory()
|
|
|
|
# Generate a unique temporary trail filename based on precise start time
|
|
ts_str_precise = datetime.now().strftime("%Y%m%d_%H%M%S_%f")
|
|
self._temp_trail_filename = self._get_trail_filename(ts_str=ts_str_precise)
|
|
self._temp_latency_filename = self._get_latency_filename(ts_str=ts_str_precise)
|
|
self._cleanup_stale_trail_file(self._temp_trail_filename)
|
|
self._cleanup_stale_trail_file(self._temp_latency_filename)
|
|
|
|
# --- MathLab export setup ---
|
|
try:
|
|
from target_simulator.utils.config_manager import ConfigManager
|
|
|
|
gm = ConfigManager()
|
|
general = gm.get_general_settings() or {}
|
|
math_cfg = general.get("mathlab_export", {}) or {}
|
|
self._mathlab_enabled = bool(math_cfg.get("enabled", False))
|
|
self._mathlab_folder = math_cfg.get("folder") or ""
|
|
except Exception:
|
|
self._mathlab_enabled = False
|
|
self._mathlab_folder = ""
|
|
|
|
self._mathlab_lock = threading.Lock()
|
|
self._mathlab_filepath = ""
|
|
if self._mathlab_enabled:
|
|
try:
|
|
# Use provided folder or fallback to archive folder
|
|
folder = self._mathlab_folder or self.ARCHIVE_FOLDER
|
|
os.makedirs(folder, exist_ok=True)
|
|
ts_str_precise = datetime.now().strftime("%Y%m%d_%H%M%S_%f")
|
|
safe_scenario_name = "".join(
|
|
c for c in self.scenario_name if c.isalnum() or c in (" ", "_")
|
|
).rstrip()
|
|
math_filename = f"{ts_str_precise}_{safe_scenario_name}.mathlab.csv"
|
|
self._mathlab_filepath = os.path.join(folder, math_filename)
|
|
|
|
# Write initial metadata and headers
|
|
with open(self._mathlab_filepath, "w", newline="", encoding="utf-8") as mf:
|
|
writer = csv.writer(mf)
|
|
# Initial timetag (UTC) and scenario name as comments for MathLab parser
|
|
writer.writerow([f"#start_timestamp_utc: {datetime.utcnow().isoformat()}"])
|
|
writer.writerow([f"#scenario: {self.scenario_name}"])
|
|
# Column headers: timetag (s since start), position in meters, altitude (m), velocity (m/s), heading (deg), rcs, amplitude
|
|
headers = [
|
|
"timetag_s",
|
|
"target_id",
|
|
"x_m",
|
|
"y_m",
|
|
"z_m",
|
|
"altitude_m",
|
|
"velocity_m_s",
|
|
"heading_deg",
|
|
"rcs",
|
|
"amplitude",
|
|
]
|
|
writer.writerow(headers)
|
|
except Exception:
|
|
# Disable on failure to avoid crashing archival flow
|
|
self._mathlab_enabled = False
|
|
self._mathlab_filepath = ""
|
|
|
|
def _ensure_archive_directory(self):
|
|
"""Creates the main archive directory if it does not exist."""
|
|
if not os.path.exists(self.ARCHIVE_FOLDER):
|
|
try:
|
|
os.makedirs(self.ARCHIVE_FOLDER)
|
|
except OSError as e:
|
|
print(f"Error creating archive directory: {e}")
|
|
|
|
def _get_trail_filename(self, ts_str: Optional[str] = None) -> str:
|
|
"""Generates a filename for the CSV trail file."""
|
|
if not ts_str:
|
|
ts_str = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
safe_scenario_name = "".join(
|
|
c for c in self.scenario_name if c.isalnum() or c in (" ", "_")
|
|
).rstrip()
|
|
return f"{ts_str}_{safe_scenario_name}.trail.csv"
|
|
|
|
def _get_latency_filename(self, ts_str: Optional[str] = None) -> str:
|
|
"""Generates a filename for the CSV latency file."""
|
|
if not ts_str:
|
|
ts_str = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
safe_scenario_name = "".join(
|
|
c for c in self.scenario_name if c.isalnum() or c in (" ", "_")
|
|
).rstrip()
|
|
return f"{ts_str}_{safe_scenario_name}.latency.csv"
|
|
|
|
def _cleanup_stale_trail_file(self, filename: str):
|
|
"""Removes the specified trail file from the temp folder if it exists."""
|
|
from target_simulator.config import DEBUG_CONFIG
|
|
|
|
temp_folder = DEBUG_CONFIG.get("temp_folder_name", "Temp")
|
|
filepath = os.path.join(temp_folder, filename)
|
|
|
|
if os.path.exists(filepath):
|
|
try:
|
|
os.remove(filepath)
|
|
except OSError:
|
|
pass # Non-critical if removal fails
|
|
|
|
def add_simulated_state(
|
|
self, target_id: int, timestamp: float, state: Tuple[float, ...]
|
|
):
|
|
"""Adds a simulated state to the archive and streams it to the trail file."""
|
|
# This part is kept for backward compatibility and direct access if needed
|
|
if target_id not in self.recorded_data:
|
|
self.recorded_data[target_id] = {"simulated": [], "real": []}
|
|
full_state: RecordedState = (timestamp, state[0], state[1], state[2])
|
|
self.recorded_data[target_id]["simulated"].append(full_state)
|
|
|
|
# Stream to the temporary CSV trail file asynchronously
|
|
row = [timestamp, "simulated", target_id, state[0], state[1], state[2]]
|
|
append_row(self._temp_trail_filename, row, headers=self.TRAIL_HEADERS)
|
|
|
|
def add_real_state(
|
|
self, target_id: int, timestamp: float, state: Tuple[float, ...]
|
|
):
|
|
"""Adds a real state (from the server) to the archive and streams it."""
|
|
if target_id not in self.recorded_data:
|
|
self.recorded_data[target_id] = {"simulated": [], "real": []}
|
|
full_state: RecordedState = (timestamp, state[0], state[1], state[2])
|
|
self.recorded_data[target_id]["real"].append(full_state)
|
|
|
|
# Stream to the temporary CSV trail file asynchronously
|
|
row = [timestamp, "real", target_id, state[0], state[1], state[2]]
|
|
append_row(self._temp_trail_filename, row, headers=self.TRAIL_HEADERS)
|
|
|
|
try:
|
|
self._compute_and_store_geopos(target_id, timestamp, state)
|
|
except Exception:
|
|
pass
|
|
|
|
def _compute_and_store_geopos(
|
|
self, target_id: int, timestamp: float, state: Tuple[float, ...]
|
|
):
|
|
"""Compute georeferenced lat/lon for a real state and store it."""
|
|
if not self.ownship_trajectory:
|
|
return
|
|
|
|
best_ownship = min(
|
|
self.ownship_trajectory,
|
|
key=lambda s: abs(s.get("timestamp", 0.0) - timestamp),
|
|
)
|
|
own_lat, own_lon, own_pos = (
|
|
best_ownship.get("latitude"),
|
|
best_ownship.get("longitude"),
|
|
best_ownship.get("position_xy_ft"),
|
|
)
|
|
|
|
if any(v is None for v in [own_lat, own_lon, own_pos]):
|
|
return
|
|
|
|
target_x_ft, target_y_ft = float(state[0]), float(state[1])
|
|
own_x_ft, own_y_ft = float(own_pos[0]), float(own_pos[1])
|
|
|
|
# Convert deltas based on the new convention: x=North, y=West
|
|
# Geodetic calculations require North and East displacements.
|
|
delta_north_m = (target_x_ft - own_x_ft) * 0.3048
|
|
delta_east_m = -(target_y_ft - own_y_ft) * 0.3048 # Invert West to get East
|
|
|
|
target_lat, target_lon = None, None
|
|
if _HAS_PYPROJ and _GEOD:
|
|
try:
|
|
distance_m = math.hypot(delta_east_m, delta_north_m)
|
|
az_deg = math.degrees(math.atan2(delta_east_m, delta_north_m))
|
|
lon2, lat2, _ = _GEOD.fwd(
|
|
float(own_lon), float(own_lat), az_deg, distance_m
|
|
)
|
|
target_lat, target_lon = lat2, lon2
|
|
except Exception:
|
|
target_lat, target_lon = None, None
|
|
|
|
if target_lat is None:
|
|
R = 6378137.0
|
|
dlat = (delta_north_m / R) * (180.0 / math.pi)
|
|
lat_rad = math.radians(float(own_lat))
|
|
dlon = (delta_east_m / (R * math.cos(lat_rad))) * (180.0 / math.pi)
|
|
target_lat = float(own_lat) + dlat
|
|
target_lon = float(own_lon) + dlon
|
|
|
|
if target_id not in self.recorded_geopos:
|
|
self.recorded_geopos[target_id] = []
|
|
self.recorded_geopos[target_id].append(
|
|
{
|
|
"timestamp": timestamp,
|
|
"lat": round(target_lat, 7),
|
|
"lon": round(target_lon, 7),
|
|
"alt_ft": float(state[2]) if len(state) > 2 else None,
|
|
}
|
|
)
|
|
|
|
def add_ownship_state(self, state: Dict[str, Any]):
|
|
"""Adds an ownship state sample to the archive's trajectory."""
|
|
self.ownship_trajectory.append(state)
|
|
|
|
def add_mathlab_state(self, target_obj: Any, timestamp: float):
|
|
"""Append a row for MathLab export for the given target state.
|
|
|
|
Rows contain: timetag (s since archive start), x_m, y_m, z_m, altitude_m,
|
|
velocity_m_s, heading_deg, rcs, amplitude.
|
|
"""
|
|
if not getattr(self, "_mathlab_enabled", False):
|
|
return
|
|
if not getattr(self, "_mathlab_filepath", None):
|
|
return
|
|
|
|
try:
|
|
with self._mathlab_lock:
|
|
# Compute timetag relative to archive start
|
|
timetag_s = float(timestamp - self.start_time)
|
|
|
|
# Position: prefer explicit attributes, fall back to zeros
|
|
x_ft = float(getattr(target_obj, "_pos_x_ft", getattr(target_obj, "x_ft", 0.0)))
|
|
y_ft = float(getattr(target_obj, "_pos_y_ft", getattr(target_obj, "y_ft", 0.0)))
|
|
z_ft = float(getattr(target_obj, "_pos_z_ft", getattr(target_obj, "z_ft", 0.0)))
|
|
|
|
# Altitude may be stored in current_altitude_ft or _pos_z_ft
|
|
alt_ft = getattr(target_obj, "current_altitude_ft", None)
|
|
if alt_ft is None:
|
|
alt_ft = z_ft
|
|
|
|
# Velocity in feet per second -> m/s
|
|
vel_fps = float(getattr(target_obj, "current_velocity_fps", 0.0))
|
|
|
|
heading_deg = float(getattr(target_obj, "current_heading_deg", 0.0))
|
|
|
|
rcs = getattr(target_obj, "rcs", "")
|
|
amplitude = getattr(target_obj, "amplitude", "")
|
|
|
|
# Convert to metric
|
|
x_m = x_ft * 0.3048
|
|
y_m = y_ft * 0.3048
|
|
z_m = z_ft * 0.3048
|
|
altitude_m = float(alt_ft) * 0.3048 if alt_ft is not None else ""
|
|
velocity_m_s = vel_fps * 0.3048
|
|
|
|
row = [
|
|
f"{timetag_s:.6f}",
|
|
f"{getattr(target_obj, 'target_id', '')}",
|
|
f"{x_m:.6f}",
|
|
f"{y_m:.6f}",
|
|
f"{z_m:.6f}",
|
|
f"{altitude_m:.6f}" if altitude_m != "" else "",
|
|
f"{velocity_m_s:.6f}",
|
|
f"{heading_deg:.6f}",
|
|
f"{rcs}",
|
|
f"{amplitude}",
|
|
]
|
|
|
|
try:
|
|
with open(self._mathlab_filepath, "a", newline="", encoding="utf-8") as mf:
|
|
writer = csv.writer(mf)
|
|
writer.writerow(row)
|
|
except Exception:
|
|
# Non-fatal: don't interrupt simulation on failure to write
|
|
pass
|
|
except Exception:
|
|
# Swallow errors to avoid impacting simulation engine
|
|
pass
|
|
|
|
def add_latency_sample(self, timestamp: float, latency_ms: float):
|
|
"""Adds a latency sample to the latency CSV file."""
|
|
row = [timestamp, latency_ms]
|
|
append_row(self._temp_latency_filename, row, headers=self.LATENCY_HEADERS)
|
|
|
|
def save(self, extra_metadata: Optional[Dict[str, Any]] = None) -> str:
|
|
"""
|
|
Saves the simulation archive and performance data to separate files,
|
|
and finalizes the trail file by renaming it.
|
|
"""
|
|
end_time = time.monotonic()
|
|
|
|
# --- MODIFIED PART START ---
|
|
# Force a synchronous flush of all CSV data before proceeding
|
|
flush_all_csv_buffers()
|
|
# --- MODIFIED PART END ---
|
|
|
|
ts_str = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
safe_scenario_name = "".join(
|
|
c for c in self.scenario_name if c.isalnum() or c in (" ", "_")
|
|
).rstrip()
|
|
base_filename = f"{ts_str}_{safe_scenario_name}"
|
|
|
|
# --- Finalize Trail File ---
|
|
from target_simulator.config import DEBUG_CONFIG
|
|
|
|
temp_folder = DEBUG_CONFIG.get("temp_folder_name", "Temp")
|
|
|
|
final_trail_filename = f"{base_filename}.trail.csv"
|
|
try:
|
|
temp_trail_path = os.path.join(temp_folder, self._temp_trail_filename)
|
|
final_trail_path = os.path.join(self.ARCHIVE_FOLDER, final_trail_filename)
|
|
|
|
if os.path.exists(temp_trail_path):
|
|
os.rename(temp_trail_path, final_trail_path)
|
|
else:
|
|
print(
|
|
f"Warning: Temporary trail file not found at {temp_trail_path}. No trail file will be saved."
|
|
)
|
|
final_trail_filename = ""
|
|
except OSError as e:
|
|
print(f"Warning: could not rename trail file: {e}")
|
|
final_trail_filename = self._temp_trail_filename
|
|
|
|
# --- Finalize Latency File ---
|
|
final_latency_filename = f"{base_filename}.latency.csv"
|
|
try:
|
|
temp_latency_path = os.path.join(temp_folder, self._temp_latency_filename)
|
|
final_latency_path = os.path.join(
|
|
self.ARCHIVE_FOLDER, final_latency_filename
|
|
)
|
|
|
|
if os.path.exists(temp_latency_path):
|
|
os.rename(temp_latency_path, final_latency_path)
|
|
else:
|
|
print(
|
|
f"Warning: Temporary latency file not found at {temp_latency_path}. No latency file will be saved."
|
|
)
|
|
final_latency_filename = ""
|
|
except OSError as e:
|
|
print(f"Warning: could not rename latency file: {e}")
|
|
final_latency_filename = self._temp_latency_filename
|
|
|
|
metadata = {
|
|
"scenario_name": self.scenario_name,
|
|
"start_timestamp_utc": datetime.utcnow().isoformat(),
|
|
"duration_seconds": end_time - self.start_time,
|
|
}
|
|
if final_trail_filename:
|
|
metadata["trail_file"] = final_trail_filename
|
|
if final_latency_filename:
|
|
metadata["latency_file"] = final_latency_filename
|
|
|
|
if extra_metadata:
|
|
metadata.update(extra_metadata)
|
|
|
|
performance_samples = metadata.pop("performance_samples", None)
|
|
|
|
if performance_samples:
|
|
import csv
|
|
|
|
perf_filename = f"{base_filename}.perf.csv"
|
|
perf_filepath = os.path.join(self.ARCHIVE_FOLDER, perf_filename)
|
|
headers = list(performance_samples[0].keys()) if performance_samples else []
|
|
try:
|
|
with open(perf_filepath, "w", newline="", encoding="utf-8") as f:
|
|
writer = csv.writer(f)
|
|
writer.writerow(
|
|
[f"# Scenario Name: {metadata.get('scenario_name')}"]
|
|
)
|
|
writer.writerow(
|
|
[
|
|
f"# Start Timestamp (UTC): {metadata.get('start_timestamp_utc')}"
|
|
]
|
|
)
|
|
writer.writerow([f"# Source File: {base_filename}.json"])
|
|
if headers:
|
|
writer.writerow(headers)
|
|
for sample in performance_samples:
|
|
writer.writerow([sample.get(h, "") for h in headers])
|
|
print(f"Performance data saved to: {perf_filepath}")
|
|
except IOError as e:
|
|
print(f"Error saving performance data CSV: {e}")
|
|
|
|
archive_content = {
|
|
"metadata": metadata,
|
|
"scenario_definition": self.scenario_data,
|
|
"ownship_trajectory": self.ownship_trajectory,
|
|
"simulation_results": self.recorded_data,
|
|
"simulation_geopos": self.recorded_geopos,
|
|
}
|
|
|
|
filename = f"{base_filename}.json"
|
|
filepath = os.path.join(self.ARCHIVE_FOLDER, filename)
|
|
|
|
try:
|
|
with open(filepath, "w", encoding="utf-8") as f:
|
|
json.dump(archive_content, f, indent=4)
|
|
print(f"Simulation archive saved to: {filepath}")
|
|
return filepath
|
|
except IOError as e:
|
|
print(f"Error saving simulation archive: {e}")
|
|
return ""
|