273 lines
12 KiB
Python
273 lines
12 KiB
Python
# target_simulator/analysis/simulation_archive.py
|
|
|
|
import os
|
|
import json
|
|
import time
|
|
from datetime import datetime
|
|
from typing import Dict, List, Any, Tuple, Optional
|
|
import math
|
|
|
|
from target_simulator.core.models import Scenario
|
|
from target_simulator.utils.csv_logger import append_row, flush_all_csv_buffers
|
|
|
|
# Prefer pyproj for accurate geodesic calculations; fall back to a simple
|
|
# equirectangular approximation when pyproj is not available.
|
|
try:
|
|
from pyproj import Geod
|
|
_GEOD = Geod(ellps="WGS84")
|
|
_HAS_PYPROJ = True
|
|
except ImportError:
|
|
_GEOD = None
|
|
_HAS_PYPROJ = False
|
|
|
|
# Define the structure for a recorded state
|
|
RecordedState = Tuple[float, float, float, float] # (timestamp, x_ft, y_ft, z_ft)
|
|
|
|
|
|
class SimulationArchive:
|
|
"""
|
|
Manages data collection for a single simulation run and saves it to a file.
|
|
Positions are streamed to an efficient CSV trail file for fast analysis.
|
|
"""
|
|
|
|
ARCHIVE_FOLDER = "archive_simulations"
|
|
TRAIL_HEADERS = ["timestamp", "source", "target_id", "x_ft", "y_ft", "z_ft"]
|
|
LATENCY_HEADERS = ["timestamp", "latency_ms"]
|
|
|
|
def __init__(self, scenario: Scenario):
|
|
"""Initializes a new archive session for a given scenario."""
|
|
self.start_time = time.monotonic()
|
|
self.scenario_name = scenario.name
|
|
self.scenario_data = scenario.to_dict()
|
|
|
|
self.recorded_data: Dict[int, Dict[str, List[RecordedState]]] = {}
|
|
self.ownship_trajectory: List[Dict[str, Any]] = []
|
|
self.recorded_geopos: Dict[int, List[Dict[str, Any]]] = {}
|
|
|
|
self._ensure_archive_directory()
|
|
|
|
# Generate a unique temporary trail filename based on precise start time
|
|
ts_str_precise = datetime.now().strftime("%Y%m%d_%H%M%S_%f")
|
|
self._temp_trail_filename = self._get_trail_filename(ts_str=ts_str_precise)
|
|
self._temp_latency_filename = self._get_latency_filename(ts_str=ts_str_precise)
|
|
self._cleanup_stale_trail_file(self._temp_trail_filename)
|
|
self._cleanup_stale_trail_file(self._temp_latency_filename)
|
|
|
|
def _ensure_archive_directory(self):
|
|
"""Creates the main archive directory if it does not exist."""
|
|
if not os.path.exists(self.ARCHIVE_FOLDER):
|
|
try:
|
|
os.makedirs(self.ARCHIVE_FOLDER)
|
|
except OSError as e:
|
|
print(f"Error creating archive directory: {e}")
|
|
|
|
def _get_trail_filename(self, ts_str: Optional[str] = None) -> str:
|
|
"""Generates a filename for the CSV trail file."""
|
|
if not ts_str:
|
|
ts_str = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
safe_scenario_name = "".join(
|
|
c for c in self.scenario_name if c.isalnum() or c in (" ", "_")
|
|
).rstrip()
|
|
return f"{ts_str}_{safe_scenario_name}.trail.csv"
|
|
|
|
def _get_latency_filename(self, ts_str: Optional[str] = None) -> str:
|
|
"""Generates a filename for the CSV latency file."""
|
|
if not ts_str:
|
|
ts_str = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
safe_scenario_name = "".join(
|
|
c for c in self.scenario_name if c.isalnum() or c in (" ", "_")
|
|
).rstrip()
|
|
return f"{ts_str}_{safe_scenario_name}.latency.csv"
|
|
|
|
def _cleanup_stale_trail_file(self, filename: str):
|
|
"""Removes the specified trail file if it exists."""
|
|
filepath = os.path.join(self.ARCHIVE_FOLDER, filename)
|
|
if os.path.exists(filepath):
|
|
try:
|
|
os.remove(filepath)
|
|
except OSError:
|
|
pass # Non-critical if removal fails
|
|
|
|
def add_simulated_state(self, target_id: int, timestamp: float, state: Tuple[float, ...]):
|
|
"""Adds a simulated state to the archive and streams it to the trail file."""
|
|
# This part is kept for backward compatibility and direct access if needed
|
|
if target_id not in self.recorded_data:
|
|
self.recorded_data[target_id] = {"simulated": [], "real": []}
|
|
full_state: RecordedState = (timestamp, state[0], state[1], state[2])
|
|
self.recorded_data[target_id]["simulated"].append(full_state)
|
|
|
|
# Stream to the temporary CSV trail file asynchronously
|
|
row = [timestamp, "simulated", target_id, state[0], state[1], state[2]]
|
|
append_row(self._temp_trail_filename, row, headers=self.TRAIL_HEADERS)
|
|
|
|
def add_real_state(self, target_id: int, timestamp: float, state: Tuple[float, ...]):
|
|
"""Adds a real state (from the server) to the archive and streams it."""
|
|
if target_id not in self.recorded_data:
|
|
self.recorded_data[target_id] = {"simulated": [], "real": []}
|
|
full_state: RecordedState = (timestamp, state[0], state[1], state[2])
|
|
self.recorded_data[target_id]["real"].append(full_state)
|
|
|
|
# Stream to the temporary CSV trail file asynchronously
|
|
row = [timestamp, "real", target_id, state[0], state[1], state[2]]
|
|
append_row(self._temp_trail_filename, row, headers=self.TRAIL_HEADERS)
|
|
|
|
try:
|
|
self._compute_and_store_geopos(target_id, timestamp, state)
|
|
except Exception:
|
|
pass
|
|
|
|
def _compute_and_store_geopos(self, target_id: int, timestamp: float, state: Tuple[float, ...]):
|
|
"""Compute georeferenced lat/lon for a real state and store it."""
|
|
if not self.ownship_trajectory:
|
|
return
|
|
|
|
best_ownship = min(self.ownship_trajectory, key=lambda s: abs(s.get("timestamp", 0.0) - timestamp))
|
|
own_lat, own_lon, own_pos = best_ownship.get("latitude"), best_ownship.get("longitude"), best_ownship.get("position_xy_ft")
|
|
|
|
if any(v is None for v in [own_lat, own_lon, own_pos]):
|
|
return
|
|
|
|
target_x_ft, target_y_ft = float(state[0]), float(state[1])
|
|
own_x_ft, own_y_ft = float(own_pos[0]), float(own_pos[1])
|
|
|
|
delta_east_m = (target_x_ft - own_x_ft) * 0.3048
|
|
delta_north_m = (target_y_ft - own_y_ft) * 0.3048
|
|
|
|
target_lat, target_lon = None, None
|
|
if _HAS_PYPROJ and _GEOD:
|
|
try:
|
|
distance_m = math.hypot(delta_east_m, delta_north_m)
|
|
az_deg = math.degrees(math.atan2(delta_east_m, delta_north_m))
|
|
lon2, lat2, _ = _GEOD.fwd(float(own_lon), float(own_lat), az_deg, distance_m)
|
|
target_lat, target_lon = lat2, lon2
|
|
except Exception:
|
|
target_lat, target_lon = None, None
|
|
|
|
if target_lat is None:
|
|
R = 6378137.0
|
|
dlat = (delta_north_m / R) * (180.0 / math.pi)
|
|
lat_rad = math.radians(float(own_lat))
|
|
dlon = (delta_east_m / (R * math.cos(lat_rad))) * (180.0 / math.pi)
|
|
target_lat = float(own_lat) + dlat
|
|
target_lon = float(own_lon) + dlon
|
|
|
|
if target_id not in self.recorded_geopos:
|
|
self.recorded_geopos[target_id] = []
|
|
self.recorded_geopos[target_id].append({
|
|
"timestamp": timestamp, "lat": round(target_lat, 7), "lon": round(target_lon, 7),
|
|
"alt_ft": float(state[2]) if len(state) > 2 else None,
|
|
})
|
|
|
|
def add_ownship_state(self, state: Dict[str, Any]):
|
|
"""Adds an ownship state sample to the archive's trajectory."""
|
|
self.ownship_trajectory.append(state)
|
|
|
|
def add_latency_sample(self, timestamp: float, latency_ms: float):
|
|
"""Adds a latency sample to the latency CSV file."""
|
|
row = [timestamp, latency_ms]
|
|
append_row(self._temp_latency_filename, row, headers=self.LATENCY_HEADERS)
|
|
|
|
def save(self, extra_metadata: Optional[Dict[str, Any]] = None) -> str:
|
|
"""
|
|
Saves the simulation archive and performance data to separate files,
|
|
and finalizes the trail file by renaming it.
|
|
"""
|
|
end_time = time.monotonic()
|
|
|
|
# --- MODIFIED PART START ---
|
|
# Force a synchronous flush of all CSV data before proceeding
|
|
flush_all_csv_buffers()
|
|
# --- MODIFIED PART END ---
|
|
|
|
ts_str = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
safe_scenario_name = "".join(
|
|
c for c in self.scenario_name if c.isalnum() or c in (" ", "_")
|
|
).rstrip()
|
|
base_filename = f"{ts_str}_{safe_scenario_name}"
|
|
|
|
# --- Finalize Trail File ---
|
|
from target_simulator.config import DEBUG_CONFIG
|
|
temp_folder = DEBUG_CONFIG.get("temp_folder_name", "Temp")
|
|
|
|
final_trail_filename = f"{base_filename}.trail.csv"
|
|
try:
|
|
temp_trail_path = os.path.join(temp_folder, self._temp_trail_filename)
|
|
final_trail_path = os.path.join(self.ARCHIVE_FOLDER, final_trail_filename)
|
|
|
|
if os.path.exists(temp_trail_path):
|
|
os.rename(temp_trail_path, final_trail_path)
|
|
else:
|
|
print(f"Warning: Temporary trail file not found at {temp_trail_path}. No trail file will be saved.")
|
|
final_trail_filename = ""
|
|
except OSError as e:
|
|
print(f"Warning: could not rename trail file: {e}")
|
|
final_trail_filename = self._temp_trail_filename
|
|
|
|
# --- Finalize Latency File ---
|
|
final_latency_filename = f"{base_filename}.latency.csv"
|
|
try:
|
|
temp_latency_path = os.path.join(temp_folder, self._temp_latency_filename)
|
|
final_latency_path = os.path.join(self.ARCHIVE_FOLDER, final_latency_filename)
|
|
|
|
if os.path.exists(temp_latency_path):
|
|
os.rename(temp_latency_path, final_latency_path)
|
|
else:
|
|
print(f"Warning: Temporary latency file not found at {temp_latency_path}. No latency file will be saved.")
|
|
final_latency_filename = ""
|
|
except OSError as e:
|
|
print(f"Warning: could not rename latency file: {e}")
|
|
final_latency_filename = self._temp_latency_filename
|
|
|
|
metadata = {
|
|
"scenario_name": self.scenario_name,
|
|
"start_timestamp_utc": datetime.utcnow().isoformat(),
|
|
"duration_seconds": end_time - self.start_time,
|
|
}
|
|
if final_trail_filename:
|
|
metadata["trail_file"] = final_trail_filename
|
|
if final_latency_filename:
|
|
metadata["latency_file"] = final_latency_filename
|
|
|
|
if extra_metadata:
|
|
metadata.update(extra_metadata)
|
|
|
|
performance_samples = metadata.pop("performance_samples", None)
|
|
|
|
if performance_samples:
|
|
import csv
|
|
perf_filename = f"{base_filename}.perf.csv"
|
|
perf_filepath = os.path.join(self.ARCHIVE_FOLDER, perf_filename)
|
|
headers = list(performance_samples[0].keys()) if performance_samples else []
|
|
try:
|
|
with open(perf_filepath, "w", newline="", encoding="utf-8") as f:
|
|
writer = csv.writer(f)
|
|
writer.writerow([f"# Scenario Name: {metadata.get('scenario_name')}"])
|
|
writer.writerow([f"# Start Timestamp (UTC): {metadata.get('start_timestamp_utc')}"])
|
|
writer.writerow([f"# Source File: {base_filename}.json"])
|
|
if headers:
|
|
writer.writerow(headers)
|
|
for sample in performance_samples:
|
|
writer.writerow([sample.get(h, "") for h in headers])
|
|
print(f"Performance data saved to: {perf_filepath}")
|
|
except IOError as e:
|
|
print(f"Error saving performance data CSV: {e}")
|
|
|
|
archive_content = {
|
|
"metadata": metadata,
|
|
"scenario_definition": self.scenario_data,
|
|
"ownship_trajectory": self.ownship_trajectory,
|
|
"simulation_results": self.recorded_data,
|
|
"simulation_geopos": self.recorded_geopos,
|
|
}
|
|
|
|
filename = f"{base_filename}.json"
|
|
filepath = os.path.join(self.ARCHIVE_FOLDER, filename)
|
|
|
|
try:
|
|
with open(filepath, "w", encoding="utf-8") as f:
|
|
json.dump(archive_content, f, indent=4)
|
|
print(f"Simulation archive saved to: {filepath}")
|
|
return filepath
|
|
except IOError as e:
|
|
print(f"Error saving simulation archive: {e}")
|
|
return "" |