add storyboard and summary

This commit is contained in:
VALLONGOL 2025-07-10 10:07:17 +02:00
parent 3081bea918
commit b6e54a7331
3 changed files with 447 additions and 228 deletions

View File

@ -19,7 +19,10 @@ import ctypes
import threading import threading
import shutil import shutil
import time import time
import pandas as pd try:
import pandas as pd
except ImportError:
pd = None
# Required for timeline generation from the summary CSV # Required for timeline generation from the summary CSV
try: try:
@ -136,6 +139,11 @@ class AppController:
self.last_generated_out_file: Optional[Path] = None self.last_generated_out_file: Optional[Path] = None
self.current_flight_folder_path: Optional[Path] = None
self.last_flight_summary_df: Optional["pd.DataFrame"] = None
self.total_files_for_analysis: int = 0
def bind_view(self, view): def bind_view(self, view):
self.view = view self.view = view
self._load_initial_config() self._load_initial_config()
@ -215,45 +223,218 @@ class AppController:
[("Recorder Data", "*.rec"), ("All files", "*.*")], [("Recorder Data", "*.rec"), ("All files", "*.*")],
) )
def _parse_summary_txt_and_populate_timeline(self, txt_path: Path): def _make_columns_unique(self, columns: List[str]) -> List[str]:
""" """
Parses the text output from g_reconverter's post-processing Takes a list of column names and makes them unique by appending '_n' to duplicates.
and builds a DataFrame to populate the timeline. Example: ['Mode', 'Batch', 'Mode'] -> ['Mode_1', 'Batch', 'Mode_2']
"""
seen = {}
unique_cols = []
for col in columns:
if col in seen:
seen[col] += 1
unique_cols.append(f"{col}_{seen[col]}")
else:
seen[col] = 1
unique_cols.append(col)
# In caso di un solo duplicato, è più pulito non avere '_1' sul primo
# Rielabora la lista per aggiungere il suffisso solo se necessario
final_cols = []
counts = {col: columns.count(col) for col in set(columns)}
for col in unique_cols:
base_col = col.rsplit('_', 1)[0]
if counts.get(base_col, 0) > 1:
final_cols.append(col)
elif counts.get(col, 0) == 1:
final_cols.append(col)
# Assicuriamoci che la logica di rinomina sia robusta
# Un approccio più semplice e sicuro:
final_cols = []
counts = {}
for col in columns:
if col in counts:
counts[col] += 1
final_cols.append(f"{col}.{counts[col]}")
else:
counts[col] = 1
final_cols.append(col)
return final_cols
def _parse_and_save_storyboard(self, txt_path: Path, output_dir: Path) -> Optional["pd.DataFrame"]:
"""
Parses the raw, semicolon-delimited text file into a complete DataFrame.
This version uses a manual pre-processing step for maximum compatibility
and handles duplicate column names.
"""
if pd is None:
log.error("Pandas library is not installed.")
return None
try:
with open(txt_path, 'r', encoding='utf-8', errors='ignore') as f:
# 1. Legge l'header e lo pulisce
header_line = f.readline()
raw_columns = [h.strip() for h in header_line.strip().split(';') if h.strip()]
# 2. Rende i nomi delle colonne unici
unique_column_names = self._make_columns_unique(raw_columns)
num_columns = len(unique_column_names)
# 3. Pre-processa le righe per assicurare che abbiano il numero corretto di colonne
clean_data = []
reader = csv.reader(f, delimiter=';')
for row in reader:
if len(row) >= num_columns:
clean_row = [field.strip() for field in row[:num_columns]]
clean_data.append(clean_row)
if not clean_data:
log.warning(f"No valid data rows could be parsed from {txt_path.name}")
return None
# 4. Crea il DataFrame da dati già puliti e con header unico
storyboard_df = pd.DataFrame(clean_data, columns=unique_column_names)
# Converte le colonne numeriche importanti
storyboard_df['Batch'] = pd.to_numeric(storyboard_df['Batch'], errors='coerce')
storyboard_df['TTAG'] = pd.to_numeric(storyboard_df['TTAG'], errors='coerce')
storyboard_df.dropna(subset=['Batch', 'TTAG'], inplace=True)
storyboard_df['Batch'] = storyboard_df['Batch'].astype(int)
storyboard_df['TTAG'] = storyboard_df['TTAG'].astype(int)
except Exception as e:
log.error(f"Failed to read or process summary file {txt_path.name}: {e}")
return None
if storyboard_df.empty:
log.warning(f"DataFrame is empty after cleaning {txt_path.name}")
return None
# Salva la storyboard completa
csv_path = output_dir / "flight_storyboard.csv"
json_path = output_dir / "flight_storyboard.json"
log.info(f"Saving full storyboard to {csv_path}")
storyboard_df.to_csv(csv_path, index=False)
log.info(f"Saving full storyboard to {json_path}")
storyboard_df.to_json(json_path, orient="records", indent=4)
return storyboard_df
def _create_and_save_summary(self, storyboard_df: "pd.DataFrame", output_dir: Path) -> "pd.DataFrame":
"""
Aggregates the full storyboard DataFrame into a human-readable summary.
"""
df = storyboard_df.copy()
# Usa i nomi delle colonne unici che abbiamo creato
df['status'] = (
df['Mode'].astype(str) + '-' + df['Mode.2'].astype(str) + ' | ' +
df['Scal.2'].astype(str) + ' | ' + 'wf_' + df['WF'].astype(str) +
'-' + df['WF.2'].astype(str)
)
# ... (il resto della funzione _create_and_save_summary rimane identico) ...
df['status_changed'] = df['status'].ne(df['status'].shift())
change_indices = df[df['status_changed']].index.tolist()
if not change_indices or change_indices[0] != 0:
change_indices.insert(0, 0)
if df.index[-1] + 1 not in change_indices:
change_indices.append(df.index[-1] + 1)
summary_records = []
for i in range(len(change_indices) - 1):
start_loc = change_indices[i]
end_loc = change_indices[i+1] - 1
segment = df.loc[start_loc:end_loc]
if segment.empty: continue
TICK_DURATION_S = 64e-9
summary_records.append({
'Segment (Mode | Scale | WF)': segment['status'].iloc[0],
'Start Batch': segment['Batch'].iloc[0],
'End Batch': segment['Batch'].iloc[-1],
'Batch Count': segment['Batch'].iloc[-1] - segment['Batch'].iloc[0] + 1,
'Duration (s)': (segment['TTAG'].iloc[-1] - segment['TTAG'].iloc[0]) * TICK_DURATION_S,
'Start File': segment['file'].iloc[0],
'End File': segment['file'].iloc[-1],
'# Files': segment['file'].nunique()
})
summary_df = pd.DataFrame(summary_records)
# Salva il riassunto
csv_path = output_dir / "flight_summary.csv"
json_path = output_dir / "flight_summary.json"
log.info(f"Saving aggregated summary to {csv_path}")
summary_df.to_csv(csv_path, index=False)
log.info(f"Saving aggregated summary to {json_path}")
summary_df.to_json(json_path, orient="records", indent=4)
return summary_df
def _parse_summary_txt_to_dataframe(self, txt_path: Path) -> Optional["pd.DataFrame"]:
"""
Parses the text file and returns a DataFrame, or None on failure.
""" """
if pd is None: if pd is None:
log.error("Pandas library not installed. Cannot process summary.") log.error("Pandas library not installed. Cannot process summary.")
return return None
# Esempio di riga da parsare: # Indici delle colonne
# ++DSPS@33[0]: 0x8284004 185357 (32491231): 249x43 COL_FILENAME, COL_BATCH, COL_TTAG = 0, 2, 3
# L'espressione regolare cerca: batch, timetag, e il nome del blocco COL_MODE_1, COL_MODE_2 = 4, 6
line_regex = re.compile(r"\+\+(\w+)@\w+\[\d+\]: \S+\s+(\d+)\s+\((\d+)\)") COL_SCALE_STR = 14
COL_WF_NUM, COL_WF_STR = 17, 18
records = [] records = []
with open(txt_path, 'r', encoding='utf-8', errors='ignore') as f: with open(txt_path, 'r', encoding='utf-8', errors='ignore') as f:
for line in f: reader = csv.reader(f, delimiter=';')
match = line_regex.search(line) try:
if match: next(reader) # Salta l'header
block_name = match.group(1) except StopIteration:
batch_id = int(match.group(2)) return None
timetag = int(match.group(3))
# Per ora, usiamo il nome del blocco (DSPS) come modo, for row in reader:
# in futuro potremmo estrarre il vero modo se presente nella riga if len(row) > COL_WF_STR:
mode = block_name try:
filename, batch_id_str, ttag_str = row[COL_FILENAME].strip(), row[COL_BATCH].strip(), row[COL_TTAG].strip()
mode1, mode2 = row[COL_MODE_1].strip(), row[COL_MODE_2].strip()
scale = row[COL_SCALE_STR].strip()
wf_num, wf_str = row[COL_WF_NUM].strip(), row[COL_WF_STR].strip()
records.append({ if not all([filename, batch_id_str, ttag_str, mode1, mode2, scale, wf_num, wf_str]):
"batch_id": batch_id, continue
"ttag": timetag,
"master_mode": mode wf_combined = f"wf_{wf_num}-{wf_str}"
}) status_str = f"{mode1}-{mode2} | {scale} | {wf_combined}"
records.append({
"batch_id": int(batch_id_str), "ttag": int(ttag_str),
"status": status_str, "filename": filename
})
except (ValueError, IndexError):
continue
if not records: if not records:
log.warning(f"No valid data rows could be parsed from summary file {txt_path.name}") log.warning(f"No valid data rows parsed from {txt_path.name}")
return return None
return pd.DataFrame(records)
def open_current_flight_folder(self):
"""Opens the folder for the last analyzed flight."""
if self.current_flight_folder_path and self.current_flight_folder_path.is_dir():
self.open_folder_from_path(str(self.current_flight_folder_path))
else:
log.warning("No flight folder available. Please run an analysis first.")
messagebox.showinfo("No Folder", "No flight folder has been created yet. Please run an analysis first.", parent=self.view)
df = pd.DataFrame(records)
self._populate_timeline_from_dataframe(df)
def _prepare_out_processor_files(self) -> bool: def _prepare_out_processor_files(self) -> bool:
self.output_file_handles.clear() self.output_file_handles.clear()
@ -586,186 +767,199 @@ class AppController:
def select_and_analyze_flight_folder(self): def select_and_analyze_flight_folder(self):
""" """
Opens a dialog to select a folder and then starts the analysis Opens a dialog to select a folder, checks for existing analysis,
in a separate thread to keep the GUI responsive. and then starts the analysis or loads previous results.
""" """
initial_dir = self.config_manager.get("last_flight_folder") initial_dir = self.config_manager.get("last_flight_folder")
if new_dir := filedialog.askdirectory( if new_dir := filedialog.askdirectory(initialdir=initial_dir, title="Select Folder with Flight Recordings"):
initialdir=initial_dir, title="Select Folder with Flight Recordings" self.config_manager.set("last_flight_folder", new_dir)
): self.config_manager.save_config()
# --- Update GUI immediately to show something is happening ---
self.view.analyzer_rec_folder_var.set(new_dir) self.view.analyzer_rec_folder_var.set(new_dir)
self.view.analyzer_info_var.set("Scanning folder, please wait...")
self.view.start_analysis_button.config(state=tk.DISABLED)
# Force GUI update
self.view.update_idletasks()
# --- Run the slow analysis in a background thread --- flight_name = self._generate_flight_name(Path(new_dir))
analysis_thread = threading.Thread( self.view.analyzer_flight_name_var.set(flight_name)
target=self._analyze_folder_worker,
args=(new_dir,), # Abilita subito il pulsante di analisi
daemon=True self.view.start_analysis_button.config(state=tk.NORMAL)
)
analysis_thread.start() workspace_dir = Path.cwd() / "flight_workspace"
flight_dir = workspace_dir / flight_name
summary_csv = flight_dir / "flight_summary.csv"
if summary_csv.is_file():
response = messagebox.askyesno(
"Previous Analysis Found",
"An analysis for this flight already exists.\n\n"
"Do you want to load the previous results?",
parent=self.view
)
if response:
log.info(f"Loading previous analysis from {flight_dir}")
self.current_flight_folder_path = flight_dir
self._load_previous_analysis(summary_csv)
return
# Se l'utente preme "No" o non ci sono analisi precedenti, si prepara
self.view.analyzer_info_var.set("Ready to start new analysis.")
def _analyze_folder_worker(self, dir_path_str: str): def _analyze_folder_worker(self, dir_path_str: str):
""" """Pre-analysis worker: counts files and enables the start button."""
Worker thread function to perform the slow task of scanning folder contents.
"""
try: try:
folder_path = Path(dir_path_str) folder_path = Path(dir_path_str)
self.config_manager.set("last_flight_folder", dir_path_str)
self.config_manager.save_config()
rec_files = sorted([f for f in folder_path.glob("*.rec")]) rec_files = sorted([f for f in folder_path.glob("*.rec")])
if not rec_files: if not rec_files:
self.view.analyzer_info_var.set("No .rec files found in the selected folder.") self.view.analyzer_info_var.set("No .rec files found.")
# The button remains disabled, which is correct
return return
total_size_bytes = sum(f.stat().st_size for f in rec_files) # === MODIFICA QUI: Salva il numero totale di file ===
total_size_mb = total_size_bytes / (1024 * 1024) self.total_files_for_analysis = len(rec_files)
file_count = len(rec_files) total_size_mb = sum(f.stat().st_size for f in rec_files) / (1024 * 1024)
info_text = ( info_text = (
f"Found {file_count} .rec files, " f"Found {self.total_files_for_analysis} .rec files, "
f"Total size: {total_size_mb:.2f} MB. Ready for analysis." f"Total size: {total_size_mb:.2f} MB. Ready for analysis."
) )
self.view.analyzer_info_var.set(info_text) self.view.analyzer_info_var.set(info_text)
# Generate default flight name
first_file_name = rec_files[0].stem
match = re.search(r"(\d{2})-(\d{2})-(\d{2})-(\d{2})-(\d{2})-(\d{2})", first_file_name)
if match:
yy, mo, dd, hh, mi, ss = match.groups()
flight_name = f"{yy}{mo}{dd}_{hh}{mi}{ss}_Flight"
self.view.analyzer_flight_name_var.set(flight_name)
else:
self.view.analyzer_flight_name_var.set(f"{folder_path.name}_Flight")
# Enable the analysis button on the main thread
self.view.start_analysis_button.config(state=tk.NORMAL) self.view.start_analysis_button.config(state=tk.NORMAL)
except Exception as e: except Exception as e:
log.error(f"Error in folder analysis worker: {e}", exc_info=True) log.error(f"Error in folder pre-analysis worker: {e}", exc_info=True)
self.view.analyzer_info_var.set(f"Error during folder analysis: {e}") self.view.analyzer_info_var.set(f"Error: {e}")
self.view.start_analysis_button.config(state=tk.DISABLED)
def start_flight_analysis(self): def start_flight_analysis(self):
""" """
Starts the main flight analysis process in a background thread. Starts the main flight analysis process, asking for confirmation
This orchestrates g_reconverter and the python parser. if a previous analysis exists.
""" """
if self.is_processing: if self.is_processing:
log.warning("Cannot start flight analysis, a process is already running.") log.warning("Analysis already in progress.")
return return
flight_name = self.view.analyzer_flight_name_var.get().strip() flight_name = self.view.analyzer_flight_name_var.get().strip()
rec_folder = self.view.analyzer_rec_folder_var.get() rec_folder = self.view.analyzer_rec_folder_var.get()
if not flight_name or not rec_folder: if not flight_name or not rec_folder:
messagebox.showerror( messagebox.showerror("Setup Incomplete", "Select a folder and provide a flight name.", parent=self.view)
"Setup Incomplete",
"Please select a recordings folder and provide a flight name.",
parent=self.view
)
return return
# --- Network Path Check --- # === MODIFICA CHIAVE QUI ===
if rec_folder.startswith("\\\\"): # Controlla se l'analisi esiste già e chiede conferma per sovrascriverla
log.warning(f"Network path detected: {rec_folder}") workspace_dir = Path.cwd() / "flight_workspace"
flight_dir = workspace_dir / flight_name
if flight_dir.exists():
response = messagebox.askyesno( response = messagebox.askyesno(
"Network Path Warning", "Confirm Re-analysis",
f"The selected folder '{rec_folder}' is on a network path.\n\n" "This will delete the previous analysis results for this flight and start a new one.\n"
"External tools like g_reconverter.exe may fail to access these paths directly.\n\n" "This process might take a long time.\n\n"
"It is strongly recommended to use a local folder for analysis.\n\n" "Are you sure you want to continue?",
"Do you want to continue anyway?", parent=self.view,
parent=self.view icon='warning'
) )
if not response: if not response: # L'utente ha premuto "No"
log.info("User cancelled analysis due to network path warning.") log.info("User cancelled re-analysis.")
return return
# --- Prepare the UI for a long-running task --- # Pulisce la directory precedente, se esiste, prima di iniziare
if flight_dir.exists():
log.info(f"Removing previous analysis directory: {flight_dir}")
shutil.rmtree(flight_dir)
# Resetta la UI e lancia il worker
self.is_processing = True self.is_processing = True
self.view.start_processing_ui() self.view.start_processing_ui()
self.view.analyzer_info_var.set(f"Starting analysis for flight: {flight_name}...") self.view.analyzer_progress_var.set(0)
self.view.analyzer_progress_text_var.set("Starting...")
self.view.analyzer_info_var.set(f"Starting new analysis for flight: {flight_name}...")
self.view.update_idletasks() self.view.update_idletasks()
# --- Launch the worker thread to handle the analysis --- analysis_thread = threading.Thread(target=self._flight_analysis_worker, args=(rec_folder, flight_name), daemon=True)
analysis_thread = threading.Thread(
target=self._flight_analysis_worker,
args=(rec_folder, flight_name),
daemon=True
)
analysis_thread.start() analysis_thread.start()
def _generate_flight_name(self, folder_path: Path) -> str:
"""Generates a default flight name based on folder or file timestamp."""
rec_files = sorted([f for f in folder_path.glob("*.rec")])
if not rec_files:
return f"{folder_path.name}_Flight"
first_file_name = rec_files[0].stem
match = re.search(r"(\d{2})-(\d{2})-(\d{2})-(\d{2})-(\d{2})-(\d{2})", first_file_name)
if match:
yy, mo, dd, hh, mi, ss = match.groups()
return f"{yy}{mo}{dd}_{hh}{mi}{ss}_Flight"
return f"{folder_path.name}_Flight"
def _load_previous_analysis(self, summary_csv_path: Path):
"""Loads a previously saved summary CSV and populates the GUI."""
if pd is None:
log.error("Cannot load previous analysis: Pandas is not installed.")
return
try:
summary_df = pd.read_csv(summary_csv_path)
self._populate_timeline_from_dataframe(summary_df)
self.view.analyzer_info_var.set("Successfully loaded previous analysis.")
self.view.open_flight_folder_button.config(state=tk.NORMAL)
self.view.export_segment_button.config(state=tk.NORMAL)
except Exception as e:
log.error(f"Failed to load previous analysis from {summary_csv_path}: {e}")
messagebox.showerror("Load Error", f"Could not load the summary file:\n{e}", parent=self.view)
def _flight_analysis_worker(self, rec_folder_str: str, flight_name: str): def _flight_analysis_worker(self, rec_folder_str: str, flight_name: str):
""" """
Worker thread that performs the full flight summary analysis. Worker thread that performs the full flight summary analysis.
This version uses g_reconverter's text post-processing output. Generates both a raw storyboard and an aggregated summary.
""" """
self.current_flight_folder_path = None
self.view.open_flight_folder_button.config(state=tk.DISABLED)
try: try:
# 1. Setup workspace directory
workspace_dir = Path.cwd() / "flight_workspace" workspace_dir = Path.cwd() / "flight_workspace"
flight_dir = workspace_dir / flight_name flight_dir = workspace_dir / flight_name
flight_dir.mkdir(parents=True, exist_ok=True) flight_dir.mkdir(parents=True, exist_ok=True)
self.current_flight_folder_path = flight_dir
log.info(f"Workspace for flight '{flight_name}' created at: {flight_dir}") log.info(f"Workspace for flight '{flight_name}' created at: {flight_dir}")
# 2. Prepare for g_reconverter # ... (logica di chiamata a g_reconverter invariata) ...
rec_files = sorted(Path(rec_folder_str).glob("*.rec")) rec_files = sorted(Path(rec_folder_str).glob("*.rec"))
if not rec_files: raise FileNotFoundError("No .rec files found to process.") if not rec_files: raise FileNotFoundError("No .rec files found.")
first_rec_file = str(rec_files[0]) first_rec_file = str(rec_files[0])
num_rec_files = len(rec_files) num_rec_files = len(rec_files)
# NUOVO: L'output è un file di testo, non un .out binario
summary_txt_path = flight_dir / "pp-flight_summary.txt"
# 3. Build the g_reconverter command for post-processing
cpp_config = self.config_manager.get_cpp_converter_config() cpp_config = self.config_manager.get_cpp_converter_config()
exe_path = cpp_config.get("cpp_executable_path") exe_path = cpp_config.get("cpp_executable_path")
if not exe_path or not Path(exe_path).is_file(): if not exe_path or not Path(exe_path).is_file(): raise ValueError("g_reconverter.exe not found.")
raise ValueError("g_reconverter.exe path is not valid.") command_list = [exe_path, first_rec_file, f"/n={num_rec_files}", "/p=1", "/a"]
log.info(f"Running g_reconverter: {' '.join(command_list)}")
# NOTA: Usiamo /p=1 per generare il file di post-processing (es. pp-summary.txt)
# L'opzione /o non è necessaria in questa modalità, l'output è automatico
command_list = [
exe_path,
first_rec_file,
f"/n={num_rec_files}",
f"/p=1", # Livello di post-processing, genera il file di testo
"//q" # Quiet mode per un log più pulito
]
log.info(f"Running g_reconverter for text summary: {' '.join(command_list)}")
# 4. Run g_reconverter. Il suo working directory è flight_dir
# quindi il file pp-summary.txt verrà creato lì.
self._launch_worker(run_cpp_converter, (command_list, self.result_queue, str(flight_dir))) self._launch_worker(run_cpp_converter, (command_list, self.result_queue, str(flight_dir)))
self.worker_process.join() # Attendi che il processo C++ finisca self.worker_process.join()
log.info("g_reconverter summary generation finished.") log.info("g_reconverter process finished.")
time.sleep(0.5) time.sleep(0.5)
# 5. Cerca il file di output testuale. Il nome è definito dal C++. all_txt_files = list(flight_dir.glob("pp-*.txt"))
# Cerchiamo un file che inizi con "pp-" e finisca con ".txt" summary_files = [f for f in all_txt_files if 'aesa' not in f.name.lower()]
generated_files = list(flight_dir.glob("pp-*.txt")) if not summary_files: raise FileNotFoundError("Main summary file not found.")
if not generated_files:
log.error(f"g_reconverter did not produce the expected text summary file in {flight_dir}")
raise FileNotFoundError("Post-processing text file not found.")
summary_txt_path = generated_files[0] summary_txt_path = summary_files[0]
log.info(f"Found post-processing summary file: {summary_txt_path}") log.info(f"Found main summary file: {summary_txt_path.name}")
# 6. NUOVO: Parsa il file di testo e popola la timeline # 1. Parsing del file di testo in un DataFrame completo e salvataggio della storyboard
self.view.analyzer_info_var.set("Parsing text summary...") self.view.analyzer_info_var.set("Parsing log and saving storyboard...")
self._parse_summary_txt_and_populate_timeline(summary_txt_path) storyboard_df = self._parse_and_save_storyboard(summary_txt_path, flight_dir)
log.info("Flight analysis summary completed successfully.") if storyboard_df is None or storyboard_df.empty:
self.view.analyzer_info_var.set("Analysis complete. Timeline populated.") raise ValueError("Parsing storyboard failed or resulted in empty data.")
# 2. Creazione e salvataggio del riassunto aggregato
self.view.analyzer_info_var.set("Aggregating data for summary...")
summary_df = self._create_and_save_summary(storyboard_df, flight_dir)
# 3. Aggiornamento della GUI con i dati del riassunto
self._populate_timeline_from_dataframe(summary_df)
log.info("Flight analysis complete. Storyboard and summary saved.")
self.view.analyzer_info_var.set("Analysis complete. Files saved.")
self.view.export_segment_button.config(state=tk.NORMAL) self.view.export_segment_button.config(state=tk.NORMAL)
self.view.open_flight_folder_button.config(state=tk.NORMAL)
except Exception as e: except Exception as e:
log.error(f"Flight analysis worker failed: {e}", exc_info=True) log.error(f"Flight analysis worker failed: {e}", exc_info=True)
@ -774,6 +968,7 @@ class AppController:
self.is_processing = False self.is_processing = False
self.view.master.after(100, lambda: self.view.update_ui_for_processing_state(False)) self.view.master.after(100, lambda: self.view.update_ui_for_processing_state(False))
def _create_default_summary_profile(self) -> ExportProfile: def _create_default_summary_profile(self) -> ExportProfile:
"""Creates a hardcoded default profile for flight summary generation.""" """Creates a hardcoded default profile for flight summary generation."""
log.debug("Creating default profile for flight summary.") log.debug("Creating default profile for flight summary.")
@ -813,51 +1008,26 @@ class AppController:
log.error(f"Failed to open summary CSV file for writing: {e}") log.error(f"Failed to open summary CSV file for writing: {e}")
raise raise
def _populate_timeline_from_dataframe(self, df: "pd.DataFrame"): def _populate_timeline_from_dataframe(self, summary_df: "pd.DataFrame"):
"""Analyzes the summary dataframe and populates the timeline treeview.""" """Populates the GUI timeline from the aggregated summary DataFrame."""
if df.empty: if summary_df.empty:
log.warning("Timeline population skipped: summary dataframe is empty.") log.warning("Timeline population skipped: summary dataframe is empty.")
return return
# Simple state change detection self.view.flight_timeline_tree.delete(*self.view.flight_timeline_tree.get_children())
# Create a new column 'mode_changed' that is True when the mode is different from the previous row
df['mode_changed'] = df['master_mode'].ne(df['master_mode'].shift())
# Get indices where the mode changes (the first row is always a change)
change_indices = df[df['mode_changed']].index.tolist()
# Ensure the last index is included to mark the end of the last segment
if df.index[-1] not in change_indices:
change_indices.append(df.index[-1] + 1)
# Clear existing timeline view
for item in self.view.flight_timeline_tree.get_children():
self.view.flight_timeline_tree.delete(item)
# Create segments
for i in range(len(change_indices) - 1):
start_index = change_indices[i]
end_index = change_indices[i+1] - 1
segment = df.loc[start_index:end_index]
mode_name = segment['master_mode'].iloc[0]
start_batch = segment['batch_id'].iloc[0]
end_batch = segment['batch_id'].iloc[-1]
# Approximate duration: (end_ttag - start_ttag) / clock_frequency
# Assuming clock is 50 MHz (20 ns per tick), a common value. This might need to be adjusted.
# This is just an example. A more accurate duration would require more data.
start_ttag = segment['ttag'].iloc[0]
end_ttag = segment['ttag'].iloc[-1]
if pd.api.types.is_numeric_dtype(df['ttag']) and end_ttag > start_ttag:
duration_s = (end_ttag - start_ttag) * 20e-9
duration_str = f"{duration_s:.2f}"
else:
duration_str = "N/A"
for _, row in summary_df.iterrows():
duration_str = f"{row['Duration (s)']:.2f}"
self.view.flight_timeline_tree.insert( self.view.flight_timeline_tree.insert(
"", "", "end",
"end", values=(
values=(mode_name, start_batch, end_batch, duration_str) row['Segment (Mode | Scale | WF)'],
row['Start Batch'],
row['End Batch'],
row['Batch Count'],
duration_str,
row['Start File'],
row['End File'],
row['# Files']
)
) )

View File

@ -7,11 +7,14 @@ Handles subprocess creation, output streaming to a queue, and error management.
import subprocess import subprocess
import os import os
import re
import multiprocessing as mp import multiprocessing as mp
from ..utils import logger from ..utils import logger
log = logger.get_logger(__name__) log = logger.get_logger(__name__)
FILE_PROGRESS_REGEX = re.compile(r"g_reconvert-I\[_.*?\.rec\]: \[(\d+)\]")
def run_cpp_converter( def run_cpp_converter(
command_list: list[str], result_queue: mp.Queue, output_directory: str command_list: list[str], result_queue: mp.Queue, output_directory: str
@ -55,8 +58,8 @@ def run_cpp_converter(
encoding="utf-8", encoding="utf-8",
errors="replace", errors="replace",
bufsize=1, bufsize=1,
creationflags=creationflags, creationflags=subprocess.CREATE_NO_WINDOW if os.name == "nt" else 0,
cwd=output_directory, # Set the current working directory cwd=output_directory,
) )
log.info( log.info(
@ -66,19 +69,33 @@ def run_cpp_converter(
if process.stdout: if process.stdout:
for line in iter(process.stdout.readline, ""): for line in iter(process.stdout.readline, ""):
line_lower = line.lower().strip() line_lower = line.lower().strip()
msg_type = "INFO" # Default type
progress_match = FILE_PROGRESS_REGEX.search(line)
if progress_match:
file_number = int(progress_match.group(1))
result_queue.put(
{"type": "file_progress", "file_number": file_number}
)
# === FINE MODIFICA ===
line_lower = line.lower().strip()
if "g_reconvert-w" in line_lower or "changed: a-err" in line_lower:
continue
msg_type = "INFO"
if ( if (
"error" in line_lower "error" in line_lower
or "fail" in line_lower or "fail" in line_lower
or "exception" in line_lower or "exception" in line_lower
): ):
msg_type = "ERROR" msg_type = "ERROR"
elif "warn" in line_lower: elif "warn" in line_lower: # Questo può rimanere per altri tipi di warning
msg_type = "WARNING" msg_type = "WARNING"
elif "success" in line_lower or "finish" in line_lower: elif "success" in line_lower or "finish" in line_lower:
msg_type = "SUCCESS" msg_type = "SUCCESS"
# Send raw line to be logged by the main thread's logger # Invia la riga (se non è stata filtrata) alla coda
result_queue.put( result_queue.put(
{"type": "log", "level": msg_type, "message": line.strip()} {"type": "log", "level": msg_type, "message": line.strip()}
) )
@ -102,7 +119,6 @@ def run_cpp_converter(
) )
except FileNotFoundError: except FileNotFoundError:
# This case is now handled by the explicit os.path.exists check
pass pass
except PermissionError: except PermissionError:
error_msg = ( error_msg = (

View File

@ -45,7 +45,7 @@ class MainWindow(tk.Frame):
self.master.title( self.master.title(
f"Radar Data Reader & Processor - {WRAPPER_APP_VERSION_STRING}" f"Radar Data Reader & Processor - {WRAPPER_APP_VERSION_STRING}"
) )
self.master.geometry("800x750") self.master.geometry("1280x800")
self._create_widgets() self._create_widgets()
self._setup_gui_logging(logging_config) self._setup_gui_logging(logging_config)
@ -80,6 +80,9 @@ class MainWindow(tk.Frame):
self.analyzer_flight_name_var = tk.StringVar() self.analyzer_flight_name_var = tk.StringVar()
self.analyzer_info_var = tk.StringVar(value="Please select a folder and a flight name.") self.analyzer_info_var = tk.StringVar(value="Please select a folder and a flight name.")
self.analyzer_progress_var = tk.DoubleVar(value=0)
self.analyzer_progress_text_var = tk.StringVar(value="N/A")
def _create_widgets(self): def _create_widgets(self):
menu_bar = tk.Menu(self.master) menu_bar = tk.Menu(self.master)
self.master.config(menu=menu_bar) self.master.config(menu=menu_bar)
@ -130,29 +133,20 @@ class MainWindow(tk.Frame):
def _create_flight_analyzer_tab(self, parent): def _create_flight_analyzer_tab(self, parent):
"""Creates the widgets for the new Flight Analyzer wizard tab.""" """Creates the widgets for the new Flight Analyzer wizard tab."""
parent.columnconfigure(0, weight=1) parent.columnconfigure(0, weight=1)
# La riga 2 conterrà la timeline, quindi le diamo peso per espandersi # La riga della timeline (ora la 3) deve espandersi verticalmente
parent.rowconfigure(2, weight=1) parent.rowconfigure(3, weight=1)
# --- Frame 1: Input e Setup del Volo --- # --- Frame 1: Input e Setup del Volo ---
setup_frame = ttk.LabelFrame(parent, text="Flight Setup") setup_frame = ttk.LabelFrame(parent, text="Flight Setup")
setup_frame.grid(row=0, column=0, sticky="ew", padx=5, pady=5) setup_frame.grid(row=0, column=0, sticky="ew", padx=5, pady=5)
setup_frame.columnconfigure(1, weight=1) setup_frame.columnconfigure(1, weight=1)
# Selezione cartella REC ttk.Label(setup_frame, text="Recordings Folder:").grid(row=0, column=0, padx=5, pady=5, sticky="w")
ttk.Label(setup_frame, text="Recordings Folder:").grid( rec_folder_entry = ttk.Entry(setup_frame, textvariable=self.analyzer_rec_folder_var, state="readonly")
row=0, column=0, padx=5, pady=5, sticky="w"
)
rec_folder_entry = ttk.Entry(
setup_frame, textvariable=self.analyzer_rec_folder_var, state="readonly"
)
rec_folder_entry.grid(row=0, column=1, sticky="ew", padx=5) rec_folder_entry.grid(row=0, column=1, sticky="ew", padx=5)
ttk.Button( ttk.Button(setup_frame, text="Browse...", command=self.controller.select_and_analyze_flight_folder).grid(row=0, column=2, padx=5)
setup_frame, text="Browse...", command=self.controller.select_and_analyze_flight_folder).grid(row=0, column=2, padx=5)
# Nome del Volo ttk.Label(setup_frame, text="Flight Name:").grid(row=1, column=0, padx=5, pady=5, sticky="w")
ttk.Label(setup_frame, text="Flight Name:").grid(
row=1, column=0, padx=5, pady=5, sticky="w"
)
flight_name_entry = ttk.Entry(setup_frame, textvariable=self.analyzer_flight_name_var) flight_name_entry = ttk.Entry(setup_frame, textvariable=self.analyzer_flight_name_var)
flight_name_entry.grid(row=1, column=1, columnspan=2, sticky="ew", padx=5) flight_name_entry.grid(row=1, column=1, columnspan=2, sticky="ew", padx=5)
@ -164,41 +158,73 @@ class MainWindow(tk.Frame):
action_frame, text="Start Flight Analysis", command=self.controller.start_flight_analysis, action_frame, text="Start Flight Analysis", command=self.controller.start_flight_analysis,
state=tk.DISABLED state=tk.DISABLED
) )
self.start_analysis_button.pack(side=tk.LEFT) self.start_analysis_button.pack(side=tk.LEFT, padx=(0, 5))
# Area per info preliminari
info_label = ttk.Label(action_frame, textvariable=self.analyzer_info_var) info_label = ttk.Label(action_frame, textvariable=self.analyzer_info_var)
info_label.pack(side=tk.LEFT, padx=20) info_label.pack(side=tk.LEFT, padx=20)
# --- Frame 3: Progress Bar ---
progress_frame = ttk.Frame(parent)
progress_frame.grid(row=2, column=0, sticky="ew", padx=5, pady=5)
progress_frame.columnconfigure(1, weight=1)
# --- Frame 3: Risultati e Timeline --- ttk.Label(progress_frame, text="Analysis Progress:").grid(row=0, column=0, sticky="w")
self.analyzer_progressbar = ttk.Progressbar(progress_frame, variable=self.analyzer_progress_var)
self.analyzer_progressbar.grid(row=0, column=1, sticky="ew", padx=5)
ttk.Label(progress_frame, textvariable=self.analyzer_progress_text_var).grid(row=0, column=2, sticky="w")
# --- Frame 4: Risultati e Azioni sui Risultati ---
results_frame = ttk.LabelFrame(parent, text="Flight Summary & Segments") results_frame = ttk.LabelFrame(parent, text="Flight Summary & Segments")
results_frame.grid(row=2, column=0, sticky="nsew", padx=5, pady=5) results_frame.grid(row=3, column=0, sticky="nsew", padx=5, pady=5)
results_frame.columnconfigure(0, weight=1) results_frame.columnconfigure(0, weight=1)
results_frame.rowconfigure(0, weight=1) results_frame.rowconfigure(0, weight=1)
# Tabella per la timeline
self.flight_timeline_tree = ttk.Treeview( self.flight_timeline_tree = ttk.Treeview(
results_frame, results_frame,
columns=("mode", "start_batch", "end_batch", "duration"), columns=("status", "start_batch", "end_batch", "batch_count", "duration", "start_file", "end_file", "file_count"),
show="headings" show="headings"
) )
self.flight_timeline_tree.heading("mode", text="Mode") self.flight_timeline_tree.heading("status", text="Segment (Mode | Scale | WF)")
self.flight_timeline_tree.heading("start_batch", text="Start Batch") self.flight_timeline_tree.heading("start_batch", text="Start Batch")
self.flight_timeline_tree.heading("end_batch", text="End Batch") self.flight_timeline_tree.heading("end_batch", text="End Batch")
self.flight_timeline_tree.heading("batch_count", text="Batch Count")
self.flight_timeline_tree.heading("duration", text="Duration (s)") self.flight_timeline_tree.heading("duration", text="Duration (s)")
self.flight_timeline_tree.grid(row=0, column=0, sticky="nsew") self.flight_timeline_tree.heading("start_file", text="Start File")
self.flight_timeline_tree.heading("end_file", text="End File")
self.flight_timeline_tree.heading("file_count", text="# Files")
self.flight_timeline_tree.column("status", width=250, stretch=True)
self.flight_timeline_tree.column("start_batch", width=90, anchor="center")
self.flight_timeline_tree.column("end_batch", width=90, anchor="center")
self.flight_timeline_tree.column("batch_count", width=90, anchor="center")
self.flight_timeline_tree.column("duration", width=90, anchor="center")
self.flight_timeline_tree.column("start_file", width=200, stretch=True)
self.flight_timeline_tree.column("end_file", width=200, stretch=True)
self.flight_timeline_tree.column("file_count", width=60, anchor="center")
self.flight_timeline_tree.grid(row=0, column=0, columnspan=2, sticky="nsew")
# Scrollbar per la tabella
tree_scrollbar = ttk.Scrollbar(results_frame, orient="vertical", command=self.flight_timeline_tree.yview) tree_scrollbar = ttk.Scrollbar(results_frame, orient="vertical", command=self.flight_timeline_tree.yview)
self.flight_timeline_tree.configure(yscrollcommand=tree_scrollbar.set) self.flight_timeline_tree.configure(yscrollcommand=tree_scrollbar.set)
tree_scrollbar.grid(row=0, column=1, sticky="ns") tree_scrollbar.grid(row=0, column=2, sticky="ns")
# Nuovo frame per i bottoni inferiori, allineati a sinistra
bottom_action_frame = ttk.Frame(results_frame)
bottom_action_frame.grid(row=1, column=0, columnspan=2, sticky="w", pady=5)
# Bottone per esportare i segmenti
self.export_segment_button = ttk.Button( self.export_segment_button = ttk.Button(
results_frame, text="Export Selected Segment(s)", state=tk.DISABLED, command=lambda: print("TODO: Export Segment") # TODO bottom_action_frame, text="Export Selected Segment(s)", state=tk.DISABLED,
command=lambda: print("TODO: Export Segment")
) )
self.export_segment_button.grid(row=1, column=0, columnspan=2, pady=5) self.export_segment_button.pack(side=tk.LEFT, padx=(0, 5))
self.open_flight_folder_button = ttk.Button(
bottom_action_frame, text="Open Flight Folder", command=self.controller.open_current_flight_folder,
state=tk.DISABLED
)
self.open_flight_folder_button.pack(side=tk.LEFT, padx=5)
def _create_out_processor_tab(self, parent): def _create_out_processor_tab(self, parent):
parent.columnconfigure(1, weight=1) parent.columnconfigure(1, weight=1)
@ -526,6 +552,13 @@ class MainWindow(tk.Frame):
) )
elif msg_type == "data_batch": elif msg_type == "data_batch":
self.controller.handle_data_batch(msg.get("data")) self.controller.handle_data_batch(msg.get("data"))
elif msg_type == "file_progress":
file_num = msg.get("file_number", 0)
total_files = self.controller.total_files_for_analysis
if total_files > 0:
progress = (file_num / total_files) * 100
self.analyzer_progress_var.set(progress)
self.analyzer_progress_text_var.set(f"File {file_num} / {total_files}")
elif msg_type in ("success", "complete", "error"): elif msg_type in ("success", "complete", "error"):
if msg_type == "error": if msg_type == "error":
log.error(f"Received error from worker: {msg.get('message')}") log.error(f"Received error from worker: {msg.get('message')}")