update
This commit is contained in:
parent
45380c86e6
commit
17e2cabffb
@ -18,6 +18,7 @@ import time
|
||||
from datetime import datetime
|
||||
import queue
|
||||
from collections import OrderedDict
|
||||
from .file_analyzer import FileStructureAnalyzer
|
||||
|
||||
try:
|
||||
import pandas as pd
|
||||
@ -85,6 +86,20 @@ def _convert_ctypes_for_json(obj: Any) -> Any:
|
||||
if isinstance(obj, ctypes.Array): return [_convert_ctypes_for_json(item) for item in obj]
|
||||
return obj
|
||||
|
||||
def run_analysis_worker(filepath: Path, config: Dict[str, Any], result_queue: mp.Queue):
|
||||
"""
|
||||
Worker function to run the FileStructureAnalyzer in a separate process.
|
||||
"""
|
||||
try:
|
||||
analyzer = FileStructureAnalyzer(filepath, config)
|
||||
report = analyzer.analyze()
|
||||
stats = analyzer.stats
|
||||
# Invia un messaggio unico con il report completo e le statistiche
|
||||
result_queue.put({"type": "analysis_report", "report": report, "stats": stats})
|
||||
except Exception as e:
|
||||
log.error(f"Analysis worker failed: {e}", exc_info=True)
|
||||
result_queue.put({"type": "error", "message": f"Analysis worker failed: {e}"})
|
||||
|
||||
|
||||
class AppController:
|
||||
"""The main controller of the application."""
|
||||
@ -243,34 +258,58 @@ class AppController:
|
||||
return False
|
||||
|
||||
def start_out_processing(self):
|
||||
if self.is_processing: log.warning("Processing already in progress."); return
|
||||
if not all([self.view.out_filepath_var.get(), self.view.out_output_dir_var.get(), self.view.out_basename_var.get()]):
|
||||
log.error("Please set input file, output directory, and base filename."); return
|
||||
if not any([self.view.out_output_csv_var.get(), self.view.out_output_json_var.get()]):
|
||||
log.error("Please select at least one output format (CSV or JSON)."); return
|
||||
if not self._prepare_out_processor_files(): return
|
||||
if self.is_processing:
|
||||
log.warning("Processing already in progress.")
|
||||
return
|
||||
|
||||
filepath_str = self.view.out_filepath_var.get()
|
||||
output_dir_str = self.view.out_output_dir_var.get()
|
||||
basename = self.view.out_basename_var.get()
|
||||
|
||||
if not all([filepath_str, output_dir_str, basename]):
|
||||
log.error("Please set input file, output directory, and base filename.")
|
||||
return
|
||||
|
||||
self.is_processing = True
|
||||
self.view.start_processing_ui()
|
||||
|
||||
filepath_str = self.view.out_filepath_var.get()
|
||||
self.config_manager.set("last_opened_out_file", filepath_str)
|
||||
self.config_manager.set("last_out_output_dir", self.view.out_output_dir_var.get())
|
||||
self.config_manager.set("active_out_export_profile_name", self.view.out_csv_profile_var.get())
|
||||
self.config_manager.save_config()
|
||||
|
||||
active_profile = self.active_export_profiles.get("csv") or self.active_export_profiles.get("json")
|
||||
if not active_profile:
|
||||
log.error("No active export profile found for processing.")
|
||||
self.is_processing = False; self.view.update_ui_for_processing_state(False)
|
||||
return
|
||||
|
||||
cpp_config = self.config_manager.get_cpp_converter_config()
|
||||
enable_profiling = cpp_config.get("enable_python_worker_profiling", False)
|
||||
|
||||
self.out_processor.start_processing(Path(filepath_str), active_profile, enable_profiling)
|
||||
self.worker_process = self.out_processor.get_worker()
|
||||
self.view.poll_result_queue()
|
||||
# --- NUOVA LOGICA PER SCEGLIERE LA MODALITÀ ---
|
||||
if self.view.out_analysis_only_var.get():
|
||||
log.info("Starting file structure analysis...")
|
||||
analysis_config = self.config_manager.get("file_analyzer_config", {})
|
||||
worker_args = (Path(filepath_str), analysis_config, self.result_queue)
|
||||
self._launch_worker(run_analysis_worker, worker_args)
|
||||
else:
|
||||
# Flusso di lavoro standard per l'esportazione dati
|
||||
if not any([self.view.out_output_csv_var.get(), self.view.out_output_json_var.get()]):
|
||||
log.error("Please select at least one output format (CSV or JSON).")
|
||||
self.is_processing = False
|
||||
self.view.update_ui_for_processing_state(False)
|
||||
return
|
||||
if not self._prepare_out_processor_files():
|
||||
self.is_processing = False
|
||||
self.view.update_ui_for_processing_state(False)
|
||||
return
|
||||
|
||||
self.config_manager.set("last_opened_out_file", filepath_str)
|
||||
self.config_manager.set("last_out_output_dir", output_dir_str)
|
||||
self.config_manager.set("active_out_export_profile_name", self.view.out_csv_profile_var.get())
|
||||
self.config_manager.save_config()
|
||||
|
||||
active_profile = self.active_export_profiles.get("csv") or self.active_export_profiles.get("json")
|
||||
if not active_profile:
|
||||
log.error("No active export profile found for processing.")
|
||||
self.is_processing = False
|
||||
self.view.update_ui_for_processing_state(False)
|
||||
return
|
||||
|
||||
cpp_config = self.config_manager.get_cpp_converter_config()
|
||||
enable_profiling = cpp_config.get("enable_python_worker_profiling", False)
|
||||
|
||||
self.out_processor.start_processing(Path(filepath_str), active_profile, enable_profiling)
|
||||
# Il worker process viene recuperato all'interno di out_processor
|
||||
self.worker_process = self.out_processor.get_worker()
|
||||
self.view.poll_result_queue()
|
||||
|
||||
def _build_cpp_command_list(self) -> List[str]:
|
||||
config = self.config_manager.get_cpp_converter_config()
|
||||
@ -778,4 +817,61 @@ class AppController:
|
||||
self.segment_processor = SegmentProcessor(config, self.result_queue, self.command_queue)
|
||||
self.segment_processor.start()
|
||||
|
||||
self.view.poll_result_queue()
|
||||
self.view.poll_result_queue()
|
||||
|
||||
def _save_analysis_report(self, report_data: Dict[str, Any]):
|
||||
"""Saves the analysis report to a text file."""
|
||||
report_list = report_data.get("report", [])
|
||||
stats = report_data.get("stats", {})
|
||||
block_counts = stats.get("block_type_counts", {})
|
||||
|
||||
output_dir = Path(self.view.out_output_dir_var.get())
|
||||
basename = self.view.out_basename_var.get()
|
||||
report_path = output_dir / f"{basename}_analysis_report.txt"
|
||||
|
||||
log.info(f"Saving analysis report to: {report_path}")
|
||||
try:
|
||||
output_dir.mkdir(parents=True, exist_ok=True)
|
||||
with open(report_path, "w", encoding="utf-8") as f:
|
||||
f.write(f"--- Analysis Report for file: {self.view.out_filepath_var.get()} ---\n\n")
|
||||
|
||||
f.write("--- Summary ---\n")
|
||||
# Scrive le statistiche generali
|
||||
for key, value in stats.items():
|
||||
if key != "block_type_counts":
|
||||
f.write(f"{key.replace('_', ' ').title()}: {value}\n")
|
||||
f.write("\n")
|
||||
|
||||
# --- NUOVA SEZIONE PER IL RIASSUNTO DEI BLOCCHI ---
|
||||
if block_counts:
|
||||
f.write("--- Block Type Summary ---\n")
|
||||
sorted_blocks = sorted(block_counts.items(), key=lambda item: item[1], reverse=True)
|
||||
for name, count in sorted_blocks:
|
||||
f.write(f"{name:<15}: {count}\n")
|
||||
f.write("\n")
|
||||
|
||||
f.write("--- Block Sequence Log ---\n")
|
||||
for entry in report_list:
|
||||
offset = entry.get('offset', 'N/A')
|
||||
msg_type = entry.get('type', 'INFO').upper()
|
||||
|
||||
if msg_type == "BLOCK":
|
||||
name = entry.get('name', 'N/A')
|
||||
# Legge la chiave corretta: declared_payload_bytes
|
||||
size = entry.get('declared_payload_bytes', 'N/A')
|
||||
f.write(f"[{offset}] {msg_type:<8} | Name: {name:<15} | Declared Payload: {size} bytes\n")
|
||||
else:
|
||||
message = entry.get('message', '')
|
||||
f.write(f"[{offset}] {msg_type:<8} | {message}\n")
|
||||
log.info(f"Analysis report saved successfully. You can open it from the output directory.")
|
||||
except IOError as e:
|
||||
log.error(f"Failed to save analysis report: {e}")
|
||||
|
||||
def handle_analysis_report(self, msg: Dict[str, Any]):
|
||||
"""Handles the completion of the analysis worker."""
|
||||
log.info("--- File Structure Analysis Complete. ---")
|
||||
self._save_analysis_report(msg)
|
||||
|
||||
self.is_processing = False
|
||||
self.worker_process = None
|
||||
self.view.update_ui_for_processing_state(False)
|
||||
174
radar_data_reader/core/file_analyzer.py
Normal file
174
radar_data_reader/core/file_analyzer.py
Normal file
@ -0,0 +1,174 @@
|
||||
# radar_data_reader/core/file_analyzer.py
|
||||
|
||||
"""
|
||||
Provides tools for analyzing the structural integrity of .out files.
|
||||
This module implements a sequential, marker-to-marker reading logic with
|
||||
validation of declared block sizes.
|
||||
"""
|
||||
|
||||
from pathlib import Path
|
||||
from typing import List, Dict, Any, Optional
|
||||
import numpy as np
|
||||
from collections import defaultdict
|
||||
|
||||
from ..utils import logger
|
||||
from .data_structures import BLOCK_TYPE_MAP, LEGACY_BLOCK_MARKER, FW_BLOCK_MARKER
|
||||
|
||||
log = logger.get_logger(__name__)
|
||||
|
||||
# Constants for header offsets (in 32-bit words)
|
||||
LEGACY_NAME_OFFSET = 17
|
||||
LEGACY_SIZE_OFFSET = 5
|
||||
FW_NAME_OFFSET = 2
|
||||
FW_SIZE_OFFSET = 5
|
||||
FW_HEADER_WORDS = 8
|
||||
LEGACY_HEADER_WORDS_APPROX = 36 # Used for payload calculation
|
||||
|
||||
|
||||
class FileStructureAnalyzer:
|
||||
"""
|
||||
Analyzes the block structure of a .out file by jumping from marker to marker
|
||||
and validating the space between them against the declared block size.
|
||||
"""
|
||||
|
||||
def __init__(self, file_path: Path, config: Dict[str, Any]):
|
||||
self.file_path = file_path
|
||||
self.config = config
|
||||
self.data_vector: Optional[np.ndarray] = None
|
||||
self.report: List[Dict[str, Any]] = []
|
||||
self.stats: Dict[str, Any] = {
|
||||
"total_blocks_found": 0,
|
||||
"size_mismatches": 0,
|
||||
"block_type_counts": defaultdict(int),
|
||||
}
|
||||
self.all_marker_positions: Optional[np.ndarray] = None
|
||||
|
||||
def analyze(self) -> List[Dict[str, Any]]:
|
||||
if not self._load_and_find_all_markers():
|
||||
return self.report
|
||||
|
||||
self._scan_marker_to_marker()
|
||||
# Convert defaultdict to dict for JSON serialization
|
||||
self.stats["block_type_counts"] = dict(self.stats["block_type_counts"])
|
||||
return self.report
|
||||
|
||||
def _load_and_find_all_markers(self) -> bool:
|
||||
"""Loads the file and pre-scans for all potential block markers."""
|
||||
try:
|
||||
log.info(f"[Analyzer] Loading data from {self.file_path}...")
|
||||
self.data_vector = np.fromfile(str(self.file_path), dtype="<u4")
|
||||
log.info(f"[Analyzer] Loaded {self.data_vector.size} 32-bit words.")
|
||||
|
||||
legacy_indices = np.where(self.data_vector == LEGACY_BLOCK_MARKER)[0]
|
||||
fw_indices = np.where(self.data_vector == FW_BLOCK_MARKER)[0]
|
||||
|
||||
confirmed_legacy = legacy_indices[np.where(np.diff(legacy_indices) == 1)[0]]
|
||||
confirmed_fw = fw_indices[np.where(np.diff(fw_indices) == 1)[0]]
|
||||
|
||||
self.all_marker_positions = np.sort(np.concatenate((confirmed_legacy, confirmed_fw)))
|
||||
|
||||
if self.all_marker_positions.size == 0:
|
||||
log.error("[Analyzer] No valid block markers found in the file.")
|
||||
self.report.append({"type": "critical", "message": "No valid block markers found."})
|
||||
return False
|
||||
|
||||
log.info(f"[Analyzer] Found {self.all_marker_positions.size} block start markers.")
|
||||
return True
|
||||
except Exception as e:
|
||||
log.error(f"[Analyzer] Failed to load file: {e}", exc_info=True)
|
||||
self.report.append({"type": "error", "message": f"Failed to load file: {e}"})
|
||||
return False
|
||||
|
||||
def _scan_marker_to_marker(self):
|
||||
"""
|
||||
Iterates through the pre-found markers, validating the space between them.
|
||||
"""
|
||||
if self.all_marker_positions is None or self.data_vector is None: return
|
||||
|
||||
num_markers = self.all_marker_positions.size
|
||||
for i in range(num_markers):
|
||||
current_pos = self.all_marker_positions[i]
|
||||
|
||||
header_info = self._read_block_header(current_pos)
|
||||
if not header_info:
|
||||
self.report.append({
|
||||
"type": "error", "offset": hex(current_pos * 4),
|
||||
"message": "Marker found, but header is invalid or unreadable."
|
||||
})
|
||||
continue
|
||||
|
||||
block_name = header_info['name']
|
||||
self.stats["total_blocks_found"] += 1
|
||||
self.stats["block_type_counts"][block_name] += 1
|
||||
|
||||
next_marker_pos = self.all_marker_positions[i+1] if i + 1 < num_markers else self.data_vector.size
|
||||
real_block_size_bytes = (next_marker_pos - current_pos) * 4
|
||||
declared_payload_bytes = header_info['size_bytes']
|
||||
|
||||
entry = {
|
||||
"type": "block",
|
||||
"offset": hex(current_pos * 4),
|
||||
"name": block_name,
|
||||
"declared_payload_bytes": declared_payload_bytes,
|
||||
"real_block_size_bytes": real_block_size_bytes,
|
||||
"mismatch": False
|
||||
}
|
||||
|
||||
# Per i blocchi legacy, la dimensione reale dovrebbe essere vicina
|
||||
# a header + payload. Per i FW, la dimensione dichiarata è inaffidabile.
|
||||
if header_info['marker_type'] == LEGACY_BLOCK_MARKER:
|
||||
# Calcoliamo la dimensione reale del payload
|
||||
# L'header Legacy è variabile, ma il payload non può essere più grande del blocco intero.
|
||||
if declared_payload_bytes > real_block_size_bytes:
|
||||
entry["mismatch"] = True
|
||||
entry["message"] = f"Declared payload ({declared_payload_bytes} B) is larger than the entire block on disk ({real_block_size_bytes} B)."
|
||||
self.stats["size_mismatches"] += 1
|
||||
|
||||
self.report.append(entry)
|
||||
|
||||
def _read_block_header(self, pos: int) -> Optional[Dict[str, Any]]:
|
||||
"""Reads block name and size from its header at a given position."""
|
||||
if self.data_vector is None or pos + LEGACY_NAME_OFFSET + 1 >= self.data_vector.size:
|
||||
return None
|
||||
|
||||
try:
|
||||
marker_type = self.data_vector[pos]
|
||||
block_name = "UNKNOWN"
|
||||
|
||||
if marker_type == LEGACY_BLOCK_MARKER:
|
||||
name_id = self.data_vector[pos + LEGACY_NAME_OFFSET]
|
||||
discriminator = self.data_vector[pos + LEGACY_NAME_OFFSET + 1]
|
||||
|
||||
size_bytes = self.data_vector[pos + LEGACY_SIZE_OFFSET]
|
||||
|
||||
# --- LOGICA DI CLASSIFICAZIONE GERARCHICA ---
|
||||
# Questa logica ora mappa il comportamento osservato dai log.
|
||||
|
||||
# 1. Identifica i blocchi di Tracking (STT/MTT) che hanno la precedenza
|
||||
if name_id == 1599362131 and discriminator == 4276556:
|
||||
block_name = "STT" # Nome unificato
|
||||
elif name_id == 1599362125 and discriminator == 4276556:
|
||||
block_name = "MTT" # Nome unificato
|
||||
|
||||
# 2. Se non è tracking, controlla se è un DSPHDROUT
|
||||
elif name_id == 1213223748 and discriminator == 1431261764:
|
||||
block_name = "DSPHDROUT"
|
||||
|
||||
# 3. Altrimenti, usa il mapping standard basato solo sul name_id
|
||||
else:
|
||||
block_name = BLOCK_TYPE_MAP.get(name_id, f"UNKNOWN_ID_{name_id}")
|
||||
|
||||
elif marker_type == FW_BLOCK_MARKER:
|
||||
name_id = self.data_vector[pos + FW_NAME_OFFSET]
|
||||
size_bytes = self.data_vector[pos + FW_SIZE_OFFSET]
|
||||
block_name = BLOCK_TYPE_MAP.get(name_id, f"UNKNOWN_ID_{name_id}")
|
||||
else:
|
||||
return None
|
||||
|
||||
return {
|
||||
"name": block_name,
|
||||
"size_bytes": int(size_bytes),
|
||||
"marker_type": marker_type,
|
||||
}
|
||||
except IndexError:
|
||||
return None
|
||||
@ -41,7 +41,7 @@ class BaseBlock:
|
||||
BLOCK_TYPE_MAP = {
|
||||
# Legacy Blocks (marker 0x5A5A5A5A)
|
||||
1213223748: "DSPHDRIN",
|
||||
1431261764: "DSPHDROUT",
|
||||
1431261764: "DSPHDROUT", # Usato come discriminatore
|
||||
5068115: "SUM",
|
||||
1380013383: "GUARD",
|
||||
5914948: "DAZ",
|
||||
@ -51,8 +51,10 @@ BLOCK_TYPE_MAP = {
|
||||
1397769283: "CDPSTS",
|
||||
1095976257: "AESA",
|
||||
1397773124: "DSPS",
|
||||
1599362131: "STT_LAA_NAME",
|
||||
1599362125: "MTT_LAA_NAME",
|
||||
# ID primari per i blocchi di tracking. Il nome finale (STT/MTT)
|
||||
# viene deciso dal parser usando anche un discriminatore.
|
||||
1599362131: "STT",
|
||||
1599362125: "MTT",
|
||||
# Firmware Blocks (marker 0x7A7A7A7A)
|
||||
5265477: "EXP",
|
||||
17232: "PC",
|
||||
@ -64,4 +66,6 @@ BLOCK_TYPE_MAP = {
|
||||
1280596037: "SOFTDFE",
|
||||
}
|
||||
|
||||
LEGACY_BLOCK_MARKER = 0x5A5A5A5A
|
||||
FW_BLOCK_MARKER = 0x7A7A7A7A
|
||||
SIGNAL_DATA_MARKER = 1313304915
|
||||
@ -90,6 +90,25 @@ class MainWindow(tk.Frame):
|
||||
|
||||
self.aggregate_by_scale_var = tk.BooleanVar(value=True)
|
||||
self.aggregate_by_waveform_var = tk.BooleanVar(value=True)
|
||||
|
||||
self.out_analysis_only_var = tk.BooleanVar(value=False)
|
||||
|
||||
def _on_analysis_only_toggle(self):
|
||||
is_analysis_only = self.out_analysis_only_var.get()
|
||||
state = tk.DISABLED if is_analysis_only else tk.NORMAL
|
||||
|
||||
# Disables standard output options when analysis only is selected
|
||||
self.out_csv_check.config(state=state)
|
||||
self.out_json_check.config(state=state)
|
||||
self.out_csv_profile_combobox.config(state=state)
|
||||
self.out_json_profile_combobox.config(state=state)
|
||||
|
||||
# Re-enables comboboxes only if their corresponding checkbox was already active
|
||||
if not is_analysis_only:
|
||||
if self.out_output_csv_var.get():
|
||||
self.out_csv_profile_combobox.config(state="readonly")
|
||||
if self.out_output_json_var.get():
|
||||
self.out_json_profile_combobox.config(state="readonly")
|
||||
|
||||
def _create_widgets(self):
|
||||
menu_bar = tk.Menu(self.master)
|
||||
@ -261,6 +280,8 @@ class MainWindow(tk.Frame):
|
||||
|
||||
def _create_out_processor_tab(self, parent):
|
||||
parent.columnconfigure(1, weight=1)
|
||||
|
||||
# --- Input Frame ---
|
||||
input_frame = ttk.LabelFrame(parent, text="Input .out File")
|
||||
input_frame.grid(row=0, column=0, columnspan=3, sticky="ew", padx=5, pady=5)
|
||||
input_frame.columnconfigure(1, weight=1)
|
||||
@ -276,6 +297,8 @@ class MainWindow(tk.Frame):
|
||||
)
|
||||
self.out_browse_button.grid(row=0, column=2, padx=5, pady=5)
|
||||
self.out_filepath_var.trace_add("write", self.controller.on_out_config_changed)
|
||||
|
||||
# --- Output Frame ---
|
||||
output_frame = ttk.LabelFrame(parent, text="Output Configuration")
|
||||
output_frame.grid(row=1, column=0, columnspan=3, sticky="ew", padx=5, pady=5)
|
||||
output_frame.columnconfigure(1, weight=1)
|
||||
@ -304,31 +327,49 @@ class MainWindow(tk.Frame):
|
||||
ttk.Entry(output_frame, textvariable=self.out_basename_var).grid(
|
||||
row=1, column=1, columnspan=2, sticky="ew", padx=5
|
||||
)
|
||||
|
||||
# --- Formats & Options Frame ---
|
||||
formats_frame = ttk.LabelFrame(parent, text="Output Formats & Options")
|
||||
formats_frame.grid(row=2, column=0, columnspan=3, sticky="ew", padx=5, pady=5)
|
||||
formats_frame.columnconfigure(1, weight=1)
|
||||
ttk.Checkbutton(
|
||||
|
||||
analysis_check = ttk.Checkbutton(
|
||||
formats_frame,
|
||||
text="Generate Structure Analysis Report Only",
|
||||
variable=self.out_analysis_only_var,
|
||||
command=self._on_analysis_only_toggle
|
||||
)
|
||||
analysis_check.grid(row=0, column=0, columnspan=3, sticky="w", padx=5, pady=(5, 10))
|
||||
|
||||
separator = ttk.Separator(formats_frame, orient='horizontal')
|
||||
separator.grid(row=1, column=0, columnspan=3, sticky='ew', padx=5, pady=5)
|
||||
|
||||
self.out_csv_check = ttk.Checkbutton(
|
||||
formats_frame, text="Generate .csv file", variable=self.out_output_csv_var
|
||||
).grid(row=0, column=0, sticky="w", padx=5, pady=2)
|
||||
)
|
||||
self.out_csv_check.grid(row=2, column=0, sticky="w", padx=5, pady=2)
|
||||
self.out_csv_profile_combobox = ttk.Combobox(
|
||||
formats_frame,
|
||||
textvariable=self.out_csv_profile_var,
|
||||
state="readonly",
|
||||
width=25,
|
||||
)
|
||||
self.out_csv_profile_combobox.grid(row=0, column=1, sticky="w", padx=5)
|
||||
ttk.Checkbutton(
|
||||
self.out_csv_profile_combobox.grid(row=2, column=1, sticky="w", padx=5)
|
||||
|
||||
self.out_json_check = ttk.Checkbutton(
|
||||
formats_frame, text="Generate .json file", variable=self.out_output_json_var
|
||||
).grid(row=1, column=0, sticky="w", padx=5, pady=2)
|
||||
)
|
||||
self.out_json_check.grid(row=3, column=0, sticky="w", padx=5, pady=2)
|
||||
self.out_json_profile_combobox = ttk.Combobox(
|
||||
formats_frame,
|
||||
textvariable=self.out_json_profile_var,
|
||||
state="readonly",
|
||||
width=25,
|
||||
)
|
||||
self.out_json_profile_combobox.grid(row=1, column=1, sticky="w", padx=5)
|
||||
self.out_json_profile_combobox.grid(row=3, column=1, sticky="w", padx=5)
|
||||
|
||||
options_subframe = ttk.Frame(formats_frame)
|
||||
options_subframe.grid(row=0, column=2, rowspan=2, sticky="w", padx=(20, 5))
|
||||
options_subframe.grid(row=2, column=2, rowspan=2, sticky="w", padx=(20, 5))
|
||||
ttk.Checkbutton(
|
||||
options_subframe,
|
||||
text="Use Tab Separator (CSV)",
|
||||
@ -339,6 +380,8 @@ class MainWindow(tk.Frame):
|
||||
text="Use Full Path for Headers",
|
||||
variable=self.out_use_full_path_var,
|
||||
).pack(anchor="w")
|
||||
|
||||
# --- Action & Live Data Frames ---
|
||||
action_frame = ttk.Frame(parent)
|
||||
action_frame.grid(row=3, column=0, columnspan=3, pady=(10, 0))
|
||||
self.out_process_button = ttk.Button(
|
||||
@ -582,17 +625,19 @@ class MainWindow(tk.Frame):
|
||||
if msg_type == "log":
|
||||
level_str = msg.get("level", "INFO").upper()
|
||||
level_map = {
|
||||
"ERROR": logging.ERROR,
|
||||
"WARNING": logging.WARNING,
|
||||
"SUCCESS": logging.INFO,
|
||||
"DEBUG": logging.DEBUG,
|
||||
"ERROR": logging.ERROR, "WARNING": logging.WARNING,
|
||||
"SUCCESS": logging.INFO, "DEBUG": logging.DEBUG,
|
||||
}
|
||||
log_level = level_map.get(level_str, logging.INFO)
|
||||
log.log(log_level, f"[C++ Runner] {msg.get('message')}")
|
||||
|
||||
elif msg_type == "export_log":
|
||||
log.info(f"[C++ Export] {msg.get('message')}")
|
||||
|
||||
elif msg_type == "start":
|
||||
self.total_blocks_for_progress = msg.get("total", 0)
|
||||
self.progress_bar_var.set(0)
|
||||
self.progress_text_var.set("Starting...")
|
||||
|
||||
elif msg_type == "data_batch_fragment":
|
||||
blocks_done = msg.get("blocks_done", 0)
|
||||
@ -606,6 +651,12 @@ class MainWindow(tk.Frame):
|
||||
|
||||
self.batches_found_var.set(f"{self.controller.total_batches_found_count}")
|
||||
|
||||
# --- NUOVA CONDIZIONE PER GESTIRE IL REPORT DI ANALISI ---
|
||||
elif msg_type == "analysis_report":
|
||||
self.progress_bar_var.set(100)
|
||||
self.progress_text_var.set("Analysis complete. Saving report...")
|
||||
self.controller.handle_analysis_report(msg)
|
||||
|
||||
elif msg_type == "file_progress":
|
||||
file_num = msg.get("file_number", 0)
|
||||
total_files = self.controller.total_files_for_analysis
|
||||
@ -615,6 +666,7 @@ class MainWindow(tk.Frame):
|
||||
self.analyzer_progress_text_var.set(
|
||||
f"Analyzing file {file_num} / {total_files}"
|
||||
)
|
||||
|
||||
elif msg_type == "segment_progress":
|
||||
self.segments_done_count += 1
|
||||
total_segs = self.controller.total_segments_for_export
|
||||
@ -624,6 +676,7 @@ class MainWindow(tk.Frame):
|
||||
self.analyzer_progress_text_var.set(
|
||||
f"Exported {self.segments_done_count} / {total_segs}"
|
||||
)
|
||||
|
||||
elif msg_type == "batch_progress":
|
||||
current, total = msg.get("current", 0), msg.get("total", 0)
|
||||
if total > 0:
|
||||
@ -632,18 +685,23 @@ class MainWindow(tk.Frame):
|
||||
self.segment_processor_tab.progress_text_var.set(
|
||||
f"Processing segment {current} of {total}: {msg.get('segment_name', '')}"
|
||||
)
|
||||
|
||||
elif msg_type == "cpp_complete":
|
||||
self.controller.handle_final_analysis_steps()
|
||||
|
||||
elif msg_type == "analysis_summary_data":
|
||||
self.controller.handle_analysis_summary_data(msg)
|
||||
|
||||
elif msg_type in ("success", "complete", "error"):
|
||||
self.progress_bar_var.set(100)
|
||||
self.analyzer_progress_var.set(100)
|
||||
self.analyzer_progress_text_var.set("Done")
|
||||
if hasattr(self, "segment_processor_tab"):
|
||||
self.segment_processor_tab.progress_var.set(100)
|
||||
self.segment_processor_tab.progress_text_var.set("Finished.")
|
||||
self.controller.handle_worker_completion(msg)
|
||||
# Gestione generica di fine processo per i flussi standard
|
||||
if msg.get("type") != "analysis_report":
|
||||
self.progress_bar_var.set(100)
|
||||
self.analyzer_progress_var.set(100)
|
||||
self.analyzer_progress_text_var.set("Done")
|
||||
if hasattr(self, "segment_processor_tab"):
|
||||
self.segment_processor_tab.progress_var.set(100)
|
||||
self.segment_processor_tab.progress_text_var.set("Finished.")
|
||||
self.controller.handle_worker_completion(msg)
|
||||
|
||||
except queue.Empty:
|
||||
pass
|
||||
@ -655,6 +713,13 @@ class MainWindow(tk.Frame):
|
||||
)
|
||||
|
||||
if self.controller.is_processing:
|
||||
# Per la modalità analisi, diamo un feedback visivo generico
|
||||
if self.out_analysis_only_var.get():
|
||||
current_progress = self.progress_bar_var.get()
|
||||
if current_progress < 95: # Evita di andare al 100% prima della fine
|
||||
self.progress_bar_var.set(current_progress + 1)
|
||||
self.progress_text_var.set("Analyzing structure...")
|
||||
|
||||
self.after(100, self.poll_result_queue)
|
||||
|
||||
def populate_timeline_from_dataframe(self, summary_df: "pd.DataFrame"):
|
||||
|
||||
Loading…
Reference in New Issue
Block a user