move save file into dumper

This commit is contained in:
VALLONGOL 2025-05-28 10:57:19 +02:00
parent 42401b115f
commit 6fa0ce0fea
5 changed files with 2134 additions and 2791 deletions

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -10,7 +10,7 @@ from typing import Dict, Any, Optional, Callable, List, Tuple
from .gdb_controller import GDBSession
from .config_manager import AppSettings
from .output_formatter import save_to_json as save_data_to_json_file
from .output_formatter import save_to_csv as save_data_to_csv_file
from .output_formatter import save_to_csv as save_data_to_csv_file # Ensure this is correctly used
logger = logging.getLogger(__name__)
@ -32,7 +32,7 @@ class ProfileExecutor:
app_settings: AppSettings,
status_update_callback: Optional[Callable[[str], None]] = None,
gdb_output_callback: Optional[Callable[[str], None]] = None,
json_output_callback: Optional[Callable[[Any], None]] = None,
json_output_callback: Optional[Callable[[Any], None]] = None, # Will show status JSON now
execution_log_callback: Optional[Callable[[ExecutionLogEntry], None]] = None,
):
self.profile = profile_data
@ -58,6 +58,7 @@ class ProfileExecutor:
self.gdb_output_writer = (
gdb_output_callback if gdb_output_callback else self._default_gdb_output
)
# json_data_handler will now receive the status payload from the dumper
self.json_data_handler = (
json_output_callback if json_output_callback else self._default_json_data
)
@ -77,8 +78,8 @@ class ProfileExecutor:
def _default_gdb_output(self, msg: str):
logger.debug(f"GDB Output: {msg}")
def _default_json_data(self, data: Any):
logger.debug(f"JSON Data: {str(data)[:200]}")
def _default_json_data(self, data: Any): # data is now status payload
logger.debug(f"Dumper Status/JSON Data: {str(data)[:200]}")
def _default_execution_log(self, entry: ExecutionLogEntry):
logger.info(f"Execution Log: {entry}")
@ -95,11 +96,12 @@ class ProfileExecutor:
self,
breakpoint_loc_spec: str,
variable_name: str,
file_path: str,
status: str,
final_file_path: str, # Path of the final file (JSON or CSV)
status: str, # "Success", "Failed GDB Dump", "Failed CSV Conversion"
gdb_bp_num: Optional[int] = None,
address: Optional[str] = None,
details: str = "",
original_json_path: Optional[str] = None # Path to the JSON written by GDB
) -> None:
entry: ExecutionLogEntry = {
"timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
@ -107,20 +109,23 @@ class ProfileExecutor:
"gdb_bp_num": str(gdb_bp_num) if gdb_bp_num is not None else "N/A",
"address": address if address else "N/A",
"variable": variable_name,
"file_produced": os.path.basename(file_path) if file_path else "N/A",
"full_path": file_path if file_path else "N/A",
"file_produced": os.path.basename(final_file_path) if final_file_path else "N/A",
"full_path": final_file_path if final_file_path else "N/A",
"status": status,
"details": details,
"raw_json_path_by_gdb": original_json_path if original_json_path else (final_file_path if status=="Success" and final_file_path and final_file_path.endswith(".json") else "N/A")
}
self.produced_files_log.append(entry)
self.execution_log_adder(entry)
def _get_setting(
self, category: str, key: str, default: Optional[Any] = None
) -> Any:
return self.app_settings.get_setting(category, key, default)
def _get_dumper_options(self) -> Dict[str, Any]:
# These are general options, not the per-dump file path
return self.app_settings.get_category_settings("dumper_options", {})
def _generate_output_filename(
@ -129,7 +134,9 @@ class ProfileExecutor:
profile_name: str,
bp_loc_spec: str,
var_name: str,
file_format: str,
# file_format is the *final* desired format (json or csv)
# The dumper will always create .json, this is for the final name
file_format_extension_without_dot: str,
) -> str:
timestamp_str = datetime.now().strftime("%Y%m%d_%H%M%S_%f")[:-3]
placeholders = {
@ -140,14 +147,20 @@ class ProfileExecutor:
"{breakpoint}": sanitize_filename_component(bp_loc_spec),
"{variable}": sanitize_filename_component(var_name),
"{timestamp}": timestamp_str,
"{format}": file_format.lower(),
# {format} will be replaced by the actual extension needed
"{format}": file_format_extension_without_dot.lower(),
}
filename = pattern
for ph, val in placeholders.items():
filename = filename.replace(ph, val)
if not filename.lower().endswith(f".{file_format.lower()}"):
filename += f".{file_format.lower()}"
return filename
# Ensure the final filename has the correct extension based on file_format_extension_without_dot
# Remove any existing extension and add the correct one.
name_part, _ = os.path.splitext(filename)
final_filename = f"{name_part}.{file_format_extension_without_dot.lower()}"
return final_filename
def _prepare_output_directory(
self, base_output_dir_from_action: str, profile_name: str
@ -176,69 +189,30 @@ class ProfileExecutor:
)
return None
def _parse_gdb_set_breakpoint_output(
self, gdb_output: str
) -> Optional[Tuple[int, str]]:
if not gdb_output:
return None
match = re.search(
r"Breakpoint\s+(\d+)\s+at\s+(0x[0-9a-fA-F]+)", gdb_output, re.IGNORECASE
)
if match:
bp_num = int(match.group(1))
address = match.group(2).lower()
return bp_num, address
match_pending = re.search(
r"Breakpoint\s+(\d+)\s+pending", gdb_output, re.IGNORECASE
)
if match_pending:
bp_num = int(match_pending.group(1))
return bp_num, "pending"
logger.warning(
f"Could not parse GDB breakpoint number and address from set_breakpoint output: '{gdb_output[:200]}'"
)
def _parse_gdb_set_breakpoint_output(self, gdb_output: str) -> Optional[Tuple[int, str]]:
if not gdb_output: return None
match = re.search(r"Breakpoint\s+(\d+)\s+at\s+(0x[0-9a-fA-F]+)", gdb_output, re.IGNORECASE)
if match: return int(match.group(1)), match.group(2).lower()
match_pending = re.search(r"Breakpoint\s+(\d+)\s+pending", gdb_output, re.IGNORECASE)
if match_pending: return int(match_pending.group(1)), "pending"
logger.warning(f"Could not parse GDB BP num and addr from output: '{gdb_output[:200]}'")
return None
def _parse_breakpoint_hit_output(self, gdb_output: str) -> Optional[int]:
if not gdb_output:
return None
match = re.search(
r"Thread\s+\S+\s+hit\s+Breakpoint\s+(\d+)", gdb_output, re.IGNORECASE
)
if match:
return int(match.group(1))
match = re.search(
r"Breakpoint\s+(\d+)[,\s]", gdb_output
)
if match:
return int(match.group(1))
logger.debug(
f"Could not parse GDB breakpoint number from hit output: '{gdb_output[:200]}...'"
)
if not gdb_output: return None
match_thread_hit = re.search(r"Thread\s+\S+\s+hit\s+Breakpoint\s+(\d+)", gdb_output, re.IGNORECASE)
if match_thread_hit: return int(match_thread_hit.group(1))
match_simple_hit = re.search(r"Breakpoint\s+(\d+)[,\s]", gdb_output) # Simpler match as fallback
if match_simple_hit: return int(match_simple_hit.group(1))
logger.debug(f"Could not parse GDB BP num from hit output: '{gdb_output[:200]}...'")
return None
def _check_program_exited_from_output(self, gdb_output: str) -> bool:
"""
Checks GDB output for signs that the entire program/inferior has exited.
More specific than just "exited with code" which can apply to threads.
"""
# Pattern for GDB indicating the inferior process itself exited
# Example: "[Inferior 1 (process 1234) exited normally]"
# Example: "[Inferior 1 (process 1234) exited with code 01]"
# Example: "Program exited normally." (often seen when GDB quits the debugged program)
# Example: "Program terminated with signal SIGINT, Interrupt."
# Example: "Remote communication error. Target disconnected.: Connection reset by peer." (if remote debugging)
# Regex for inferior exit messages
inferior_exit_pattern = r"\[Inferior\s+\d+\s+\(process\s+\d+\)\s+exited"
# General program exit messages from GDB
program_exit_patterns = [
r"Program exited normally\.",
r"Program exited with code .*\.",
r"Program terminated with signal .*\.",
r"Remote communication error\." # For remote debugging scenarios
r"Program exited normally\.", r"Program exited with code .*\.",
r"Program terminated with signal .*\.", r"Remote communication error\."
]
if re.search(inferior_exit_pattern, gdb_output, re.IGNORECASE):
logger.info("Detected inferior exit from GDB output.")
return True
@ -246,44 +220,24 @@ class ProfileExecutor:
if re.search(pattern, gdb_output, re.IGNORECASE):
logger.info(f"Detected program exit via pattern: '{pattern}'")
return True
# If the only prompt is (gdb) and no other output indicating a stop/signal,
# and the previous command was 'run' or 'continue', it might mean the program
# finished without GDB explicitly stating "Program exited normally" before the prompt.
# This is a more subtle case and might need careful handling if common.
# For now, rely on explicit messages.
return False
def run(self) -> None:
profile_name = self.profile.get("profile_name", "Unnamed Profile")
self._log_event(f"Starting profile: '{profile_name}'...", True)
self.is_running = True
self._stop_requested = False
self.produced_files_log.clear()
self.execution_event_log.clear()
self.gdb_bp_num_to_details_map.clear()
self.address_to_action_indices_map.clear()
self.is_running = True; self._stop_requested = False
self.produced_files_log.clear(); self.execution_event_log.clear()
self.gdb_bp_num_to_details_map.clear(); self.address_to_action_indices_map.clear()
self.profile_execution_summary = {
"profile_name": profile_name,
"target_executable": self.profile.get("target_executable"),
"program_parameters": self.profile.get("program_parameters"),
"start_time": datetime.now().isoformat(),
"end_time": None,
"status": "Initialized",
"actions_summary": [
{"action_index": i,
"breakpoint_spec": action.get("breakpoint_location", "N/A"),
"gdb_bp_num_assigned": None,
"address_resolved": None,
"variables_dumped_count": 0,
"hit_count": 0,
"status": "Pending"}
for i, action in enumerate(self.profile.get("actions", []))
],
"execution_log": [],
"files_produced_detailed": []
"profile_name": profile_name, "target_executable": self.profile.get("target_executable"),
"program_parameters": self.profile.get("program_parameters"), "start_time": datetime.now().isoformat(),
"end_time": None, "status": "Initialized",
"actions_summary": [{"action_index": i, "breakpoint_spec": action.get("breakpoint_location", "N/A"),
"gdb_bp_num_assigned": None, "address_resolved": None,
"variables_dumped_count": 0, "hit_count": 0, "status": "Pending"}
for i, action in enumerate(self.profile.get("actions", []))],
"execution_log": [], "files_produced_detailed": []
}
gdb_exe = self._get_setting("general", "gdb_executable_path")
@ -291,48 +245,34 @@ class ProfileExecutor:
gdb_script_path = self._get_setting("general", "gdb_dumper_script_path")
if not target_exe or not os.path.exists(target_exe):
msg = f"Error: Target executable '{target_exe}' not found for profile '{profile_name}'."
self._log_event(msg, True)
self.profile_execution_summary["status"] = "Error: Target not found"
self.is_running = False
self._finalize_summary_report(None)
return
msg = f"Error: Target executable '{target_exe}' not found for profile '{profile_name}'."; self._log_event(msg, True)
self.profile_execution_summary["status"] = "Error: Target not found"; self.is_running = False
self._finalize_summary_report(None); return
actions = self.profile.get("actions", [])
if not actions:
self._log_event(f"Profile '{profile_name}' has no actions defined. Stopping.", True)
self.profile_execution_summary["status"] = "Error: No actions"
self.is_running = False
self._finalize_summary_report(None)
return
self._log_event(f"Profile '{profile_name}' has no actions. Stopping.", True)
self.profile_execution_summary["status"] = "Error: No actions"; self.is_running = False
self._finalize_summary_report(None); return
base_output_dir = "."
if actions and "output_directory" in actions[0]: # Use first action's output dir as base
base_output_dir = actions[0].get("output_directory", ".")
base_output_dir = actions[0].get("output_directory", ".") if actions else "."
self.current_run_output_path = self._prepare_output_directory(base_output_dir, profile_name)
if not self.current_run_output_path:
self.profile_execution_summary["status"] = "Error: Cannot create output directory"
self.profile_execution_summary["status"] = "Error: Output dir creation failed"
self.profile_execution_summary["end_time"] = datetime.now().isoformat()
self._finalize_summary_report(self.current_run_output_path)
self.is_running = False
return
self._finalize_summary_report(self.current_run_output_path); self.is_running = False; return
try:
self.gdb_session = GDBSession(
gdb_path=gdb_exe, executable_path=target_exe,
gdb_script_full_path=gdb_script_path, dumper_options=self._get_dumper_options()
)
self.gdb_session = GDBSession(gdb_path=gdb_exe, executable_path=target_exe,
gdb_script_full_path=gdb_script_path,
dumper_options=self._get_dumper_options())
startup_timeout = self._get_setting("timeouts", "gdb_start", 30)
self._log_event(f"Spawning GDB for '{os.path.basename(target_exe)}'...", True)
self.gdb_session.start(timeout=startup_timeout)
if not self.gdb_session.symbols_found:
msg = (f"Error for profile '{profile_name}': No debugging symbols found in "
f"'{os.path.basename(target_exe)}'. Profile execution aborted.")
self._log_event(msg, True)
self.profile_execution_summary["status"] = "Error: No Debug Symbols"
return
msg = f"Error: No debugging symbols in '{os.path.basename(target_exe)}'. Profile aborted."
self._log_event(msg, True); self.profile_execution_summary["status"] = "Error: No Debug Symbols"; return
self._log_event("GDB session started.", False)
if gdb_script_path and self.gdb_session.gdb_script_sourced_successfully:
@ -346,281 +286,214 @@ class ProfileExecutor:
if self._stop_requested: break
bp_spec = action_config.get("breakpoint_location")
action_summary = self.profile_execution_summary["actions_summary"][action_idx]
if not bp_spec:
self._log_event(f"Action {action_idx + 1}: No breakpoint location. Skipping.", False)
action_summary["status"] = "Skipped (No BP Spec)"
continue
self._log_event(f"Action {action_idx + 1}: No BP. Skipping.", False)
action_summary["status"] = "Skipped (No BP Spec)"; continue
self._log_event(f"Setting BP for Action {action_idx + 1} ('{bp_spec}')...", False)
bp_set_output = self.gdb_session.set_breakpoint(bp_spec, timeout=cmd_timeout)
self.gdb_output_writer(bp_set_output)
parsed_bp_info = self._parse_gdb_set_breakpoint_output(bp_set_output)
if parsed_bp_info:
gdb_bp_num, address_str = parsed_bp_info
action_summary["gdb_bp_num_assigned"] = gdb_bp_num
action_summary["address_resolved"] = address_str
self.gdb_bp_num_to_details_map[gdb_bp_num] = {
"address": address_str,
"action_index": action_idx,
"bp_spec": bp_spec
}
action_summary["gdb_bp_num_assigned"] = gdb_bp_num; action_summary["address_resolved"] = address_str
self.gdb_bp_num_to_details_map[gdb_bp_num] = {"address": address_str, "action_index": action_idx, "bp_spec": bp_spec }
if address_str != "pending":
if address_str not in self.address_to_action_indices_map:
self.address_to_action_indices_map[address_str] = []
if action_idx not in self.address_to_action_indices_map[address_str]:
self.address_to_action_indices_map[address_str].append(action_idx)
self._log_event(f"Action {action_idx+1} ('{bp_spec}'): GDB BP {gdb_bp_num} at {address_str}.", False)
num_successfully_mapped_breakpoints +=1
if address_str not in self.address_to_action_indices_map: self.address_to_action_indices_map[address_str] = []
if action_idx not in self.address_to_action_indices_map[address_str]: self.address_to_action_indices_map[address_str].append(action_idx)
self._log_event(f"Action {action_idx+1} ('{bp_spec}'): GDB BP {gdb_bp_num} at {address_str}.", False); num_successfully_mapped_breakpoints +=1
else:
self._log_event(f"Action {action_idx+1} ('{bp_spec}'): GDB BP {gdb_bp_num} is PENDING. Will not trigger until resolved.", False)
action_summary["status"] = "Pending in GDB"
self._log_event(f"Action {action_idx+1} ('{bp_spec}'): GDB BP {gdb_bp_num} PENDING.", False); action_summary["status"] = "Pending in GDB"
else:
self._log_event(f"Error: Action {action_idx + 1}: Failed to parse GDB BP info for '{bp_spec}'. Output: {bp_set_output[:100]}", True)
action_summary["status"] = "Error (BP Set/Parse)"
self._log_event(f"Error: Action {action_idx + 1}: Failed GDB BP parse for '{bp_spec}'.", True); action_summary["status"] = "Error (BP Set/Parse)"
if self._stop_requested: raise InterruptedError("User requested stop during BP setup.")
if num_successfully_mapped_breakpoints == 0:
self._log_event("No non-pending breakpoints successfully mapped. Aborting profile.", True)
self.profile_execution_summary["status"] = "Error: No BPs Mapped"
return
self._log_event("No non-pending BPs mapped. Aborting.", True)
self.profile_execution_summary["status"] = "Error: No BPs Mapped"; return
program_params = self.profile.get("program_parameters", "")
self._log_event(f"Running program '{os.path.basename(target_exe)} {program_params}'...", True)
run_timeout = self._get_setting("timeouts", "program_run_continue", 120)
gdb_output = self.gdb_session.run_program(program_params, timeout=run_timeout)
self.gdb_output_writer(gdb_output)
program_has_exited = self._check_program_exited_from_output(gdb_output) # MODIFIED
if program_has_exited:
self._log_event(f"Program exited on initial run. Output: {gdb_output[:250]}", True) # Increased log length
program_has_exited = self._check_program_exited_from_output(gdb_output)
if program_has_exited: self._log_event(f"Program exited on initial run. Output: {gdb_output[:250]}", True)
while self.gdb_session.is_alive() and not program_has_exited and not self._stop_requested:
hit_gdb_bp_num = self._parse_breakpoint_hit_output(gdb_output)
current_pc_address: Optional[str] = None
# Only query PC if we actually hit a breakpoint or stopped for some reason
# and are not about to exit the loop due to program_has_exited.
if self.gdb_session and self.gdb_session.is_alive() and not program_has_exited and hit_gdb_bp_num:
try:
pc_out = self.gdb_session.send_cmd("p/x $pc", expect_prompt=True, timeout=cmd_timeout)
self.gdb_output_writer(f"$pc query: {pc_out}\n")
pc_match = re.search(r"=\s*(0x[0-9a-fA-F]+)", pc_out)
if pc_match:
current_pc_address = pc_match.group(1).lower()
self._log_event(f"Current PC: {current_pc_address}", False)
except Exception as e_pc:
self._log_event(f"Could not get current PC: {e_pc}", False)
if pc_match: current_pc_address = pc_match.group(1).lower(); self._log_event(f"Current PC: {current_pc_address}", False)
except Exception as e_pc: self._log_event(f"Could not get PC: {e_pc}", False)
actions_to_process_at_this_stop: List[int] = []
hit_bp_details_for_log = "N/A"
if hit_gdb_bp_num is not None and hit_gdb_bp_num in self.gdb_bp_num_to_details_map:
bp_details = self.gdb_bp_num_to_details_map[hit_gdb_bp_num]
address_of_hit = bp_details["address"] # This should be the resolved address
address_of_hit = bp_details["address"]
hit_bp_details_for_log = f"GDB BP {hit_gdb_bp_num} ('{bp_details['bp_spec']}') at {address_of_hit}"
# Ensure current_pc_address matches the breakpoint's resolved address if possible, or use resolved address
effective_address_for_action_lookup = current_pc_address if current_pc_address else address_of_hit
if effective_address_for_action_lookup != "pending" and \
effective_address_for_action_lookup in self.address_to_action_indices_map:
actions_to_process_at_this_stop.extend(self.address_to_action_indices_map[effective_address_for_action_lookup])
elif current_pc_address and current_pc_address in self.address_to_action_indices_map:
# This case handles if we stopped for a reason other than a numbered BP but PC matches a mapped BP address
actions_to_process_at_this_stop.extend(self.address_to_action_indices_map[current_pc_address])
hit_bp_details_for_log = f"PC {current_pc_address} (mapped to actions)"
hit_bp_details_for_log = f"PC {current_pc_address} (mapped actions)"
if actions_to_process_at_this_stop:
self._log_event(f"Processing stop at {hit_bp_details_for_log}.", True)
unique_action_indices_to_process = sorted(list(set(actions_to_process_at_this_stop)))
should_continue_after_all_these_actions = True # Default to continue
should_continue_after_all_these_actions = True
for action_idx in unique_action_indices_to_process:
if self._stop_requested: break
current_action_config = actions[action_idx]
action_summary = self.profile_execution_summary["actions_summary"][action_idx]
dump_on_every_hit = current_action_config.get("dump_on_every_hit", True)
action_already_completed_once = action_summary["status"].startswith("Completed")
if action_already_completed_once and not dump_on_every_hit:
self._log_event(f"Action {action_idx + 1} ('{current_action_config.get('breakpoint_location')}') previously completed and dump_on_every_hit is False. Skipping.", False)
if not current_action_config.get("continue_after_dump", True):
should_continue_after_all_these_actions = False
if action_summary["status"].startswith("Completed") and not current_action_config.get("dump_on_every_hit", True):
self._log_event(f"Action {action_idx + 1} skipped (completed, !dump_on_every_hit).", False)
if not current_action_config.get("continue_after_dump", True): should_continue_after_all_these_actions = False
continue
self._log_event(f"Executing Action {action_idx + 1} ('{current_action_config.get('breakpoint_location')}')...", False)
action_summary["status"] = "Processing Dumps"
action_summary["hit_count"] += 1
vars_to_dump_for_action = current_action_config.get("variables_to_dump", [])
filename_pattern = current_action_config.get("filename_pattern", "{breakpoint}_{variable}_{timestamp}.{format}")
output_format_for_action = current_action_config.get("output_format", "json").lower()
action_summary["status"] = "Processing Dumps"; action_summary["hit_count"] += 1
vars_to_dump = current_action_config.get("variables_to_dump", [])
filename_pattern_cfg = current_action_config.get("filename_pattern", "{breakpoint}_{variable}_{timestamp}.{format}")
output_format_cfg = current_action_config.get("output_format", "json").lower()
bp_spec_for_file = current_action_config.get("breakpoint_location", "unknown_bp")
current_dump_success_count_for_this_hit = 0
for var_name in vars_to_dump_for_action:
current_dump_success_count = 0
for var_name in vars_to_dump:
if self._stop_requested: break
dump_timeout = self._get_setting("timeouts", "dump_variable", 60)
dumped_data = None; file_save_path = ""; dump_status_msg = "Failed"; dump_details_msg = ""
if not self.gdb_session.gdb_script_sourced_successfully and output_format_for_action == "json":
msg = f"Dumper script unavailable for '{var_name}' (JSON)."
self._log_event(msg, False); dump_details_msg = msg
self.json_data_handler({"_profile_executor_error": msg, "variable": var_name})
else:
dumped_data = self.gdb_session.dump_variable_to_json(var_name, timeout=dump_timeout)
self.json_data_handler(dumped_data)
if isinstance(dumped_data, dict) and "_gdb_tool_error" in dumped_data:
err_detail = dumped_data.get("details", dumped_data["_gdb_tool_error"])
self._log_event(f"Error dumping '{var_name}': {err_detail}", False); dump_details_msg = f"GDB Tool Error: {err_detail}"
if "raw_gdb_output" in dumped_data:
self.gdb_output_writer(f"--- Raw GDB output for failed dump of '{var_name}' ---\n{dumped_data['raw_gdb_output']}\n--- End ---\n")
elif dumped_data is not None:
output_filename = self._generate_output_filename(filename_pattern, profile_name, bp_spec_for_file, var_name, output_format_for_action)
file_save_path = os.path.join(self.current_run_output_path, output_filename)
try:
if output_format_for_action == "json": save_data_to_json_file(dumped_data, file_save_path)
elif output_format_for_action == "csv":
data_for_csv = dumped_data
if isinstance(data_for_csv, dict) and not isinstance(data_for_csv, list): data_for_csv = [data_for_csv]
elif not isinstance(data_for_csv, list): data_for_csv = [{"value": data_for_csv}]
elif isinstance(data_for_csv, list) and data_for_csv and not all(isinstance(item, dict) for item in data_for_csv): data_for_csv = [{"value": item} for item in data_for_csv]
save_data_to_csv_file(data_for_csv, file_save_path)
else: raise ValueError(f"Unsupported format: {output_format_for_action}")
self._log_event(f"Saved '{var_name}' to '{output_filename}'.", False); dump_status_msg = "Success"; current_dump_success_count_for_this_hit += 1
except Exception as save_e:
self._log_event(f"Error saving dump of '{var_name}': {save_e}", False); dump_details_msg = f"Save Error: {save_e}"
else:
self._log_event(f"Dump of '{var_name}' returned no data.", False); dump_details_msg = "Dump returned no data"
self._add_produced_file_entry(bp_spec_for_file, var_name, file_save_path, dump_status_msg,
gdb_bp_num=hit_gdb_bp_num, address=current_pc_address, details=dump_details_msg)
action_summary["variables_dumped_count"] += current_dump_success_count_for_this_hit
if current_dump_success_count_for_this_hit == len(vars_to_dump_for_action) and vars_to_dump_for_action:
action_summary["status"] = "Completed"
elif not vars_to_dump_for_action:
action_summary["status"] = "Completed (No Vars)"
else:
action_summary["status"] = "Completed with Errors"
# Path where GDB dumper will write the JSON file
# GDB dumper always writes JSON, conversion to CSV is done after by ProfileExecutor
gdb_dumper_json_filename = self._generate_output_filename(filename_pattern_cfg, profile_name, bp_spec_for_file, var_name, "json")
gdb_dumper_target_json_filepath = os.path.join(self.current_run_output_path, gdb_dumper_json_filename)
if not current_action_config.get("continue_after_dump", True):
should_continue_after_all_these_actions = False # If any action says not to continue, we stop
dump_status_payload = self.gdb_session.dump_variable_to_json(
var_name,
timeout=dump_timeout,
target_output_filepath=gdb_dumper_target_json_filepath, # Pass path to GDB
target_output_format=output_format_cfg # Pass final desired format
)
self.json_data_handler(dump_status_payload) # Show status payload in GUI
final_file_path_for_log = ""
log_status_msg = "Failed"; log_details_msg = ""; original_json_path_for_log = None
if dump_status_payload.get("status") == "success":
original_json_path_for_log = dump_status_payload.get("filepath_written")
if not original_json_path_for_log: # Should not happen if status is success
log_status_msg = "Error"; log_details_msg = "Dumper success but no filepath in status."; self._log_event(f"Dumper reported success for '{var_name}' but no filepath_written in status.", True)
elif output_format_cfg == "json":
final_file_path_for_log = original_json_path_for_log
log_status_msg = "Success"; current_dump_success_count += 1
self._log_event(f"Saved '{var_name}' to '{os.path.basename(final_file_path_for_log)}' (JSON by GDB).", False)
elif output_format_cfg == "csv":
csv_filename = self._generate_output_filename(filename_pattern_cfg, profile_name, bp_spec_for_file, var_name, "csv")
csv_filepath = os.path.join(self.current_run_output_path, csv_filename)
final_file_path_for_log = csv_filepath
try:
with open(original_json_path_for_log, 'r', encoding='utf-8') as f_json_in:
json_data_for_csv = json.load(f_json_in)
data_for_csv_list = json_data_for_csv
if isinstance(json_data_for_csv, dict) and not isinstance(json_data_for_csv, list): data_for_csv_list = [json_data_for_csv]
elif not isinstance(json_data_for_csv, list): data_for_csv_list = [{"value": json_data_for_csv}]
elif isinstance(json_data_for_csv, list) and json_data_for_csv and not all(isinstance(item, dict) for item in json_data_for_csv):
data_for_csv_list = [{"value": item} for item in json_data_for_csv]
save_data_to_csv_file(data_for_csv_list, csv_filepath)
log_status_msg = "Success"; current_dump_success_count += 1
self._log_event(f"Converted and saved '{var_name}' to '{os.path.basename(csv_filepath)}' (CSV).", False)
except Exception as csv_e:
log_status_msg = "CSV Conversion Failed"; log_details_msg = f"CSV Error: {csv_e}"
self._log_event(f"Error converting/saving CSV for '{var_name}': {csv_e}", True)
else: # Unknown format, should not happen with combobox
log_status_msg = "Error"; log_details_msg = f"Unsupported format '{output_format_cfg}' for '{var_name}'."
self._log_event(log_details_msg, True)
else: # Dump status was 'error'
err_detail_from_payload = dump_status_payload.get("details", dump_status_payload.get("message", "GDB dumper script reported an error."))
log_status_msg = "GDB Dump Failed"; log_details_msg = f"Dumper Error: {err_detail_from_payload}"
self._log_event(f"Error dumping '{var_name}': {err_detail_from_payload}", True)
if "raw_gdb_output" in dump_status_payload: # This key might not exist with new dumper logic
self.gdb_output_writer(f"--- Raw GDB output for failed dump of '{var_name}' ---\n{dump_status_payload['raw_gdb_output']}\n--- End ---\n")
self._add_produced_file_entry(bp_spec_for_file, var_name, final_file_path_for_log, log_status_msg,
gdb_bp_num=hit_gdb_bp_num, address=current_pc_address, details=log_details_msg,
original_json_path=original_json_path_for_log)
action_summary["variables_dumped_count"] += current_dump_success_count
if current_dump_success_count == len(vars_to_dump) and vars_to_dump: action_summary["status"] = "Completed"
elif not vars_to_dump: action_summary["status"] = "Completed (No Vars)"
else: action_summary["status"] = "Completed with Errors"
if not current_action_config.get("continue_after_dump", True): should_continue_after_all_these_actions = False
if self._stop_requested: break # Break from main while loop if stop requested during action processing
if self._stop_requested: break
if should_continue_after_all_these_actions:
self._log_event(f"Continuing after processing actions at {hit_bp_details_for_log}...", True)
gdb_output = self.gdb_session.continue_execution(timeout=run_timeout)
self.gdb_output_writer(gdb_output)
program_has_exited = self._check_program_exited_from_output(gdb_output) # MODIFIED
if program_has_exited:
self._log_event(f"Program exited after continue. Output: {gdb_output[:250]}", True)
program_has_exited = self._check_program_exited_from_output(gdb_output)
if program_has_exited: self._log_event(f"Program exited after continue. Output: {gdb_output[:250]}", True)
else:
self._log_event(f"Execution halted after processing actions at {hit_bp_details_for_log} as per profile.", True)
program_has_exited = True # Treat as program exit for the loop
elif self._check_program_exited_from_output(gdb_output): # MODIFIED: Check if GDB indicated program exit
program_has_exited = True
self._log_event(f"Program exited. Output: {gdb_output[:250]}", True)
elif "received signal" in gdb_output.lower() and "SIGINT" not in gdb_output.upper(): # Ignore SIGINT from manual ctrl-c in GDB console
program_has_exited = True
self._log_event(f"Program received signal. Output: {gdb_output[:250]}", True)
self._log_event(f"Execution halted after actions at {hit_bp_details_for_log} as per profile.", True)
program_has_exited = True
elif self._check_program_exited_from_output(gdb_output):
program_has_exited = True; self._log_event(f"Program exited. Output: {gdb_output[:250]}", True)
elif "received signal" in gdb_output.lower() and "SIGINT" not in gdb_output.upper():
program_has_exited = True; self._log_event(f"Program received signal. Output: {gdb_output[:250]}", True)
self.profile_execution_summary["status"] = "Completed (Program Signalled/Crashed)"
elif not hit_gdb_bp_num and not self._check_program_exited_from_output(gdb_output) and not self._stop_requested:
# Program stopped for a reason other than a recognized breakpoint or exit.
# This could be a signal, an error, or an unexpected stop.
# For safety, if GDB is still alive, we might want to log this and then decide if we should try to continue or stop.
# If the program truly never stops on its own and only via breakpoints or _stop_requested,
# this branch might indicate an issue or an unexpected GDB state.
# For now, if GDB is alive and we didn't hit a BP and program didn't exit, assume we should wait or GDB is hung on continue.
# The send_cmd in continue_execution should timeout if GDB is truly hung.
# If output from 'continue' does not contain a breakpoint or exit message,
# the loop might continue if GDB sends back (gdb) prompt without stopping.
# This part of the logic might need refinement if GDB can be in a "running but not stopped at BP" state
# where we'd expect it to eventually hit another BP or exit.
# The current loop relies on `gdb_output` from `continue_execution` to update `program_has_exited` or `hit_gdb_bp_num`.
# If `continue_execution` returns without these, and `is_alive` is true, the loop continues.
# This seems okay, as we'd expect the *next* `continue` to either hit a BP, exit, or timeout.
# The `program_has_exited` check is critical.
logger.warning(f"GDB output after 'continue' did not indicate a breakpoint or program exit. Raw output: {gdb_output[:300]}")
# Let the loop continue, relying on GDB to eventually report a stop or exit, or for send_cmd to timeout.
# If no stop/exit is reported and the program is just running, `send_cmd` for `continue` should reflect that
# (e.g., not returning immediately or returning only `Continuing.`).
# The `_parse_breakpoint_hit_output` will then return None, and `_check_program_exited_from_output` will be false.
# The `while` loop condition `not program_has_exited` will keep it running.
pass # Explicitly pass if no action and no exit, let continue handle it.
logger.warning(f"GDB output after 'continue' did not indicate BP or exit. Raw: {gdb_output[:300]}")
if program_has_exited: break
final_status = "Completed"
if program_has_exited and not self._stop_requested:
if any(s["status"] == "Pending" or s["status"] == "Pending in GDB" for s in self.profile_execution_summary["actions_summary"]):
final_status = "Completed (Program Exited Prematurely)"
if self.profile_execution_summary["status"] not in ["Initialized", "Error: No BPs Mapped", "Error: No Debug Symbols"]:
if "Crashed" in self.profile_execution_summary["status"] or "Signalled" in self.profile_execution_summary["status"]:
pass # Keep the more specific status
else:
self.profile_execution_summary["status"] = final_status
elif self._stop_requested:
self.profile_execution_summary["status"] = "Completed (User Stopped)"
elif not (self.gdb_session and self.gdb_session.is_alive()) and not program_has_exited: # GDB died
self.profile_execution_summary["status"] = "Error: GDB Died Unexpectedly"
self._log_event("Error: GDB session died unexpectedly during execution.", True)
else: # Loop finished, GDB alive, not exited, not stopped by user -> implies all actions processed as per logic
if any(s["status"] == "Pending" for s in self.profile_execution_summary["actions_summary"]):
self.profile_execution_summary["status"] = "Completed (Some Actions Pending/Not Hit)"
else:
self.profile_execution_summary["status"] = "Completed (All Triggered Actions Processed)"
if any(s["status"] == "Pending" or s["status"] == "Pending in GDB" for s in self.profile_execution_summary["actions_summary"]): final_status = "Completed (Program Exited Prematurely)"
if self.profile_execution_summary["status"] not in ["Initialized", "Error: No BPs Mapped", "Error: No Debug Symbols"] and \
not ("Crashed" in self.profile_execution_summary["status"] or "Signalled" in self.profile_execution_summary["status"]):
self.profile_execution_summary["status"] = final_status
elif self._stop_requested: self.profile_execution_summary["status"] = "Completed (User Stopped)"
elif not (self.gdb_session and self.gdb_session.is_alive()) and not program_has_exited:
self.profile_execution_summary["status"] = "Error: GDB Died Unexpectedly"; self._log_event("Error: GDB session died unexpectedly.", True)
else:
if any(s["status"] == "Pending" for s in self.profile_execution_summary["actions_summary"]): self.profile_execution_summary["status"] = "Completed (Some Actions Pending/Not Hit)"
else: self.profile_execution_summary["status"] = "Completed (All Triggered Actions Processed)"
except InterruptedError as ie:
self.profile_execution_summary["status"] = "Interrupted (User Stop)"
self._log_event(str(ie), True)
self.profile_execution_summary["status"] = "Interrupted (User Stop)"; self._log_event(str(ie), True)
except FileNotFoundError as fnf_e:
msg = f"Error running profile '{profile_name}': File not found - {fnf_e}"
self._log_event(msg, True); self.profile_execution_summary["status"] = f"Error: {fnf_e}"
msg = f"Error running profile '{profile_name}': File not found - {fnf_e}"; self._log_event(msg, True); self.profile_execution_summary["status"] = f"Error: {fnf_e}"
except (ConnectionError, TimeoutError) as session_e:
msg = f"Session error running profile '{profile_name}': {type(session_e).__name__} - {session_e}"
self._log_event(msg, True); self.profile_execution_summary["status"] = f"Error: {session_e}"
msg = f"Session error for profile '{profile_name}': {type(session_e).__name__} - {session_e}"; self._log_event(msg, True); self.profile_execution_summary["status"] = f"Error: {session_e}"
except Exception as e:
msg = f"Unexpected error running profile '{profile_name}': {type(e).__name__} - {e}"
self._log_event(msg, True); logger.critical(msg, exc_info=True)
self.profile_execution_summary["status"] = f"Critical Error: {e}"
msg = f"Unexpected error for profile '{profile_name}': {type(e).__name__} - {e}"; self._log_event(msg, True); logger.critical(msg, exc_info=True); self.profile_execution_summary["status"] = f"Critical Error: {e}"
finally:
self.profile_execution_summary["end_time"] = datetime.now().isoformat()
self.profile_execution_summary["execution_log"] = self.execution_event_log
self.profile_execution_summary["files_produced_detailed"] = self.produced_files_log
self._cleanup_session()
summary_file_path = self._finalize_summary_report(self.current_run_output_path)
final_gui_message = (f"Profile '{profile_name}' execution cycle finished. "
f"Status: {self.profile_execution_summary.get('status', 'Unknown')}. "
f"Summary report attempt at: {summary_file_path if summary_file_path else 'N/A (see logs)'}.")
self._log_event(final_gui_message, True)
self.is_running = False
final_gui_message = (f"Profile '{profile_name}' cycle finished. Status: {self.profile_execution_summary.get('status', 'Unknown')}. "
f"Summary: {summary_file_path if summary_file_path else 'N/A (see logs)'}.")
self._log_event(final_gui_message, True); self.is_running = False
def _finalize_summary_report(self, run_output_path: Optional[str]) -> Optional[str]:
if not run_output_path:
logger.warning("No run output path available, cannot save summary report to specific location.")
logger.info(f"Execution Summary for '{self.profile.get('profile_name')}':\n{json.dumps(self.profile_execution_summary, indent=2)}")
return None
logger.warning("No run output path, cannot save summary report."); logger.info(f"Exec Summary '{self.profile.get('profile_name')}':\n{json.dumps(self.profile_execution_summary, indent=2)}"); return None
sane_profile_name = sanitize_filename_component(self.profile.get("profile_name", "profile_run"))
summary_filename = f"_{sane_profile_name}_summary_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
summary_filepath = os.path.join(run_output_path, summary_filename)
try:
with open(summary_filepath, 'w', encoding='utf-8') as f_summary:
json.dump(self.profile_execution_summary, f_summary, indent=2, ensure_ascii=False)
logger.info(f"Execution summary report saved to: {summary_filepath}")
return summary_filepath
logger.info(f"Execution summary report saved: {summary_filepath}"); return summary_filepath
except Exception as e:
logger.error(f"Failed to save execution summary report to '{summary_filepath}': {e}")
return None
logger.error(f"Failed to save summary report to '{summary_filepath}': {e}"); return None
def request_stop(self) -> None:
self._log_event("Stop requested for current profile execution...", True)
@ -631,14 +504,9 @@ class ProfileExecutor:
self._log_event("Cleaning up GDB session...", False)
quit_timeout = self._get_setting("timeouts", "gdb_quit", 10)
try:
# We no longer send kill_program here explicitly if we want the program to continue
# GDB quit will handle killing the inferior if it's still running and GDB exits.
self.gdb_session.quit(timeout=quit_timeout)
self.gdb_output_writer("GDB session quit during cleanup.\n")
except Exception as e_quit:
logger.error(f"Exception during GDB quit in cleanup: {e_quit}")
finally:
self.gdb_session = None
elif self.gdb_session:
self.gdb_session = None
except Exception as e_quit: logger.error(f"Exception during GDB quit in cleanup: {e_quit}")
finally: self.gdb_session = None
elif self.gdb_session: self.gdb_session = None
logger.info("ProfileExecutor GDB session resources attempted cleanup.")

File diff suppressed because it is too large Load Diff