fix arrray, struct and simple var

This commit is contained in:
VALLONGOL 2025-05-22 07:22:53 +02:00
parent e66ce1683a
commit 386d85cc27
6 changed files with 542 additions and 380 deletions

View File

@ -4,55 +4,86 @@
import logging
import sys # For stream handler to output to console
import os # For path manipulation and log file deletion
# Use relative import to get the GDBGui class from the gui subpackage
from .gui.main_window import GDBGui
# --- Application Constants (Optional) ---
# --- Application Constants ---
APP_NAME = "Cpp-Python GDB Debug Helper"
APP_VERSION = "1.1.0" # Example version
# --- PERCORSO DEL FILE DI LOG DEL DUMPER ---
# Questo percorso deve essere coerente con come GDB_DUMPER_LOG_PATH è definito in gdb_dumper.py
GDB_DUMPER_LOG_TO_DELETE = None # Inizializza a None
try:
# Se __file__ è definito per __main__.py (di solito lo è)
if '__file__' in globals():
# Assumendo la struttura:
# cpp_python_debug/
# |-- __main__.py (questo file, os.path.dirname(__file__) è cpp_python_debug/)
# |-- core/
# |-- gdb_dumper.py (e gdb_dumper_debug.log viene creato qui)
# |-- ...
# |-- gui/
# |-- ...
main_script_dir = os.path.dirname(os.path.abspath(__file__))
gdb_dumper_script_dir = os.path.join(main_script_dir, "core")
GDB_DUMPER_LOG_TO_DELETE = os.path.join(gdb_dumper_script_dir, "gdb_dumper_debug.log")
else:
# Fallback se __file__ non è definito (raro per __main__.py)
# In questo caso, assumiamo che gdb_dumper.py scriva nella home dell'utente
# come suo fallback.
GDB_DUMPER_LOG_TO_DELETE = os.path.join(os.path.expanduser("~"), "gdb_dumper_debug.log")
except Exception:
# Se c'è un errore nel determinare il percorso, non tentare di cancellare.
# GDB_DUMPER_LOG_TO_DELETE rimarrà None.
pass
def setup_global_logging():
"""
Configures basic logging for the application.
- Logs to console (stdout).
- GUI will have its own handler to display logs within the application window.
"""
# Define the root logger level. DEBUG will capture everything.
# Specific handlers can have their own, more restrictive levels.
# Definizione di log_level qui, così è nello scope corretto
log_level = logging.DEBUG # Or logging.INFO for less verbosity in console
# Basic configuration for the root logger
# This will set up a default StreamHandler to sys.stderr if no handlers are added.
# We will add our own StreamHandler to sys.stdout for better control.
logging.basicConfig(level=log_level,
format='%(asctime)s - %(name)-25s - %(levelname)-8s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
# If you want to ensure console output goes to stdout instead of stderr (default for basicConfig warnings/errors)
# and have more control over the console handler:
# 1. Get the root logger
root_logger = logging.getLogger()
# 2. Remove any default handlers basicConfig might have added (if any, depends on Python version behavior)
# For safety, clear existing handlers if you want to be sure only yours are present.
# Opzionale: Se si vuole un controllo più fine sull'handler della console (es. stdout invece di stderr)
# root_logger = logging.getLogger()
# # Rimuovi handler di default se presenti e se si vuole solo il proprio
# for handler in root_logger.handlers[:]:
# root_logger.removeHandler(handler)
# 3. Add your custom console StreamHandler
# console_handler = logging.StreamHandler(sys.stdout)
# console_formatter = logging.Formatter('%(asctime)s - %(name)-25s - %(levelname)-8s - %(message)s',
# datefmt='%Y-%m-%d %H:%M:%S')
# console_handler.setFormatter(console_formatter)
# console_handler.setLevel(log_level) # Set level for this specific handler
# console_handler.setLevel(log_level)
# root_logger.addHandler(console_handler)
# The ScrolledTextLogHandler in main_window.py will be added to the root logger
# when the GDBGui instance is created, so logs will also go to the GUI.
logger = logging.getLogger(__name__) # Get a logger for this module
logger = logging.getLogger(__name__) # Logger per questo modulo (__main__)
logger.info(f"Global logging initialized. Console log level: {logging.getLevelName(log_level)}.")
logger.info(f"Starting {APP_NAME} v{APP_VERSION}")
# --- CANCELLA IL VECCHIO LOG DEL DUMPER ---
if GDB_DUMPER_LOG_TO_DELETE: # Controlla se il percorso è stato determinato
if os.path.exists(GDB_DUMPER_LOG_TO_DELETE):
try:
os.remove(GDB_DUMPER_LOG_TO_DELETE)
logger.info(f"Previous GDB dumper log deleted: {GDB_DUMPER_LOG_TO_DELETE}")
except Exception as e_remove:
logger.warning(f"Could not delete previous GDB dumper log ('{GDB_DUMPER_LOG_TO_DELETE}'): {e_remove}")
else:
logger.info(f"GDB dumper log not found (no previous log to delete): {GDB_DUMPER_LOG_TO_DELETE}")
else:
logger.warning("Path for GDB dumper log to delete could not be determined.")
# --- FINE CANCELLAZIONE ---
def main():
"""
@ -60,43 +91,35 @@ def main():
"""
setup_global_logging() # Initialize logging first
# Get a logger for the main execution scope
# (using __name__ here would give 'cpp_python_debug.__main__')
app_logger = logging.getLogger(APP_NAME) # Or use a more generic name
app_logger = logging.getLogger(APP_NAME) # Logger per l'applicazione in generale
try:
app_logger.info("Creating GDBGui instance...")
app = GDBGui() # Create the main application window
app = GDBGui()
app_logger.info("Starting Tkinter main event loop.")
app.mainloop() # Start the Tkinter event loop
app.mainloop()
app_logger.info("Tkinter main event loop finished.")
except Exception as e:
# Catch any unhandled exceptions during GUI initialization or runtime
# that might not be caught by Tkinter's own error handling.
app_logger.critical(f"An unhandled critical error occurred: {e}", exc_info=True)
# Optionally, show a simple error dialog if Tkinter is still somewhat functional
# or if it fails before Tkinter is even up.
# For console-based launch, the log to console is key.
try:
# Prova a mostrare un errore Tkinter se possibile
import tkinter as tk
from tkinter import messagebox
# Check if a root window exists or can be created
# This is a bit of a hack; ideally, errors are caught within the GUI logic.
root_for_error = tk.Tk()
root_for_error.withdraw() # Hide the empty root window
root_for_error.withdraw()
messagebox.showerror("Critical Application Error",
f"A critical error occurred:\n{e}\n\nPlease check the logs for more details.")
root_for_error.destroy()
except Exception:
pass # If Tkinter itself is broken, can't show a Tkinter messagebox
# Se Tkinter stesso è rotto, non si può fare molto altro che loggare
pass
finally:
app_logger.info(f"{APP_NAME} is shutting down.")
logging.shutdown() # Cleanly close all logging handlers
logging.shutdown() # Chiude gli handler di logging
if __name__ == "__main__":
# This allows the script to be run directly (e.g., python __main__.py)
# as well as via `python -m cpp_python_debug`.
# Permette l'esecuzione come script diretto o come modulo
main()

View File

@ -6,6 +6,7 @@ import re
import wexpect
import logging
import json # For parsing JSON output from the GDB script
import time # <<<<---- IMPORTANTE: AGGIUNTO PER IL DEBUG DI QUIT
logger = logging.getLogger(__name__)
@ -15,16 +16,6 @@ class GDBSession:
set breakpoints, run the target, and dump variables using a custom GDB Python script.
"""
def __init__(self, gdb_path: str, executable_path: str, gdb_script_full_path: str = None, timeout: int = 30):
"""
Initializes the GDBSession.
Args:
gdb_path: Path to the GDB executable.
executable_path: Path to the target executable to debug.
gdb_script_full_path: Absolute path to the GDB Python dumper script (e.g., gdb_dumper.py).
If None, JSON dumping capabilities will be limited.
timeout: Default timeout for GDB command expectations.
"""
if not os.path.exists(gdb_path):
msg = f"GDB executable not found at: {gdb_path}"
logger.error(msg)
@ -34,227 +25,149 @@ class GDBSession:
logger.error(msg)
raise FileNotFoundError(msg)
if gdb_script_full_path and not os.path.exists(gdb_script_full_path):
# Log a warning instead of raising an error, to allow basic GDB usage
# if the script is missing for some reason. The GUI should inform the user.
logger.warning(f"GDB Python dumper script not found at: {gdb_script_full_path}. Advanced JSON dumping will be unavailable.")
gdb_script_full_path = None # Treat as if not provided
gdb_script_full_path = None
self.gdb_path = gdb_path
self.executable_path = executable_path
self.gdb_script_path = gdb_script_full_path # Store the path to our dumper script
self.gdb_script_path = gdb_script_full_path
self.timeout = timeout
self.child = None # The wexpect child process
self.gdb_prompt = "(gdb) " # Standard GDB prompt
self.child = None
self.gdb_prompt = "(gdb) "
self.gdb_script_sourced_successfully = False
logger.info(f"GDBSession initialized. GDB: '{gdb_path}', Executable: '{executable_path}', DumperScript: '{gdb_script_full_path}'")
def start(self) -> None:
"""
Starts the GDB subprocess and waits for the initial prompt.
Also attempts to source the custom GDB Python script if provided.
"""
command = f'"{self.gdb_path}" --nx --quiet "{self.executable_path}"'
# --nx: Do not execute commands from any .gdbinit files.
# --quiet: Do not print the introductory and copyright messages.
logger.info(f"Spawning GDB process: {command}")
try:
self.child = wexpect.spawn(command, timeout=self.timeout, encoding='utf-8', errors='replace')
self.child.expect_exact(self.gdb_prompt, timeout=max(self.timeout, 10)) # Longer timeout for initial startup
self.child.expect_exact(self.gdb_prompt, timeout=max(self.timeout, 15)) # Increased timeout slightly
logger.info("GDB started successfully and prompt received.")
# Attempt to source the custom GDB Python script if a path was provided
logger.info("Disabling GDB pagination ('set pagination off').")
# Inviamo il comando e ci aspettiamo un altro prompt.
# Usiamo un timeout più breve per questo comando di setup.
self.send_cmd("set pagination off", expect_prompt=True, timeout=max(5, self.timeout // 2))
logger.info("GDB pagination disabled.")
if self.gdb_script_path:
self._source_gdb_dumper_script()
else:
logger.info("No GDB dumper script path provided; skipping sourcing.")
except wexpect.TIMEOUT:
error_msg = "Timeout waiting for GDB prompt on start."
logger.error(error_msg)
# Try to get some output for diagnostics
try:
debug_output = self.child.read_nonblocking(size=2048, timeout=1)
logger.error(f"GDB output before timeout: {debug_output}")
except Exception:
pass
raise TimeoutError(error_msg) # Use standard Python TimeoutError
except Exception: pass
raise TimeoutError(error_msg)
except Exception as e:
logger.error(f"Failed to spawn or initialize GDB: {e}", exc_info=True)
raise ConnectionError(f"Failed to spawn or initialize GDB: {e}") # Use standard ConnectionError
raise ConnectionError(f"Failed to spawn or initialize GDB: {e}")
def _source_gdb_dumper_script(self) -> None:
"""
Internal method to send the 'source' command to GDB for the custom dumper script.
"""
if not self.gdb_script_path or not self.child:
return
if not self.gdb_script_path or not self.child: return
normalized_script_path = self.gdb_script_path.replace('\\', '/')
logger.info(f"Sourcing GDB Python script: {normalized_script_path}")
try:
output_before_prompt = self.send_cmd(f'source "{normalized_script_path}"')
source_command = f'source {normalized_script_path}' # No quotes as per previous findings
logger.debug(f"Constructed source command: [{source_command}]")
output_before_prompt = self.send_cmd(source_command)
# --- LOGICA DI CONTROLLO ERRORI MIGLIORATA ---
error_detected = False
# Controlla errori Python standard
if "Traceback (most recent call last):" in output_before_prompt or \
"SyntaxError:" in output_before_prompt:
logger.error(f"Python error detected while sourcing GDB script '{normalized_script_path}':\n{output_before_prompt}")
error_detected = True
# Controlla errori GDB comuni come "No such file" o "Error:" all'inizio della riga
# GDB può avere output multiriga, quindi controlliamo ogni riga per certi pattern
# o la prima riga significativa.
# L'output "No such file or directory." è spesso preceduto dal path stesso.
# Esempio: '"path/to/script.py": No such file or directory.'
# Usiamo una regex per questo.
# Il messaggio di errore da GDB è "nomefile: messaggio errore."
# es: "C:/path/to/file.py: No such file or directory."
# es: "Error: an error message from gdb."
# es: "Script Error: some error message from a python script sourced." (improbabile con "source")
# Pattern per "filename: No such file or directory."
# \S+ matches one or more non-whitespace characters (for the filename part)
no_such_file_pattern = re.compile(r"""
^ # Inizio della riga (con re.MULTILINE) o inizio della stringa
"? # Opzionali virgolette all'inizio
\S+ # Il nome del file/percorso
"? # Opzionali virgolette alla fine
:\s* # Due punti seguito da zero o più spazi
No\s+such\s+file\s+or\s+directory # Il messaggio di errore
""", re.VERBOSE | re.MULTILINE) # re.MULTILINE per far funzionare ^ per ogni riga
# Pattern per "Error: " all'inizio di una riga
no_such_file_pattern = re.compile(r"""^"?\S+"?:\s*No\s+such\s+file\s+or\s+directory""", re.VERBOSE | re.MULTILINE)
gdb_error_pattern = re.compile(r"^Error:", re.MULTILINE)
if no_such_file_pattern.search(output_before_prompt):
logger.error(f"GDB error 'No such file or directory' for script '{normalized_script_path}':\n{output_before_prompt}")
error_detected = True
elif gdb_error_pattern.search(output_before_prompt):
# Cattura "Error: " generico se non è un errore Python o "No such file"
# Questo potrebbe essere troppo generico, ma è un tentativo.
# Potrebbe essere meglio controllare la prima riga di output_before_prompt.
first_line_output = output_before_prompt.splitlines()[0] if output_before_prompt.splitlines() else ""
if first_line_output.strip().startswith("Error:"):
logger.error(f"GDB 'Error:' detected while sourcing script '{normalized_script_path}':\n{output_before_prompt}")
error_detected = True
if error_detected:
self.gdb_script_sourced_successfully = False
else:
logger.info(f"GDB script '{normalized_script_path}' sourced (no critical errors detected in output).")
self.gdb_script_sourced_successfully = True
# --- FINE LOGICA DI CONTROLLO ERRORI MIGLIORATA ---
except Exception as e:
logger.error(f"Exception during 'source' command for GDB script '{normalized_script_path}': {e}", exc_info=True)
self.gdb_script_sourced_successfully = False
def send_cmd(self, command: str, expect_prompt: bool = True, timeout: int = None) -> str:
"""
Sends a command to the GDB subprocess and returns its output.
# ... (docstring come prima) ...
"""
if not self.child or not self.child.isalive():
logger.error("GDB session not started or is dead. Cannot send command.")
raise ConnectionError("GDB session not active.")
effective_timeout = timeout if timeout is not None else self.timeout
logger.debug(f"Sending GDB command: '{command}' with timeout: {effective_timeout}s")
try:
self.child.sendline(command)
if expect_prompt:
index = self.child.expect_exact([self.gdb_prompt, wexpect.EOF, wexpect.TIMEOUT], timeout=effective_timeout)
output_before = self.child.before
if index == 0: # Matched GDB prompt
# --- CORREZIONE QUI ---
if index == 0:
logger.debug(f"GDB output for '{command}':\n{output_before.rstrip ('\r\n')}")
# --- FINE CORREZIONE ---
return output_before
elif index == 1: # Matched EOF
logger.error(f"GDB exited unexpectedly (EOF) after command: {command}. Output: {output_before.rstrip('\r\n')}") # Aggiunto rstrip anche qui per pulizia log
elif index == 1:
logger.error(f"GDB exited unexpectedly (EOF) after command: {command}. Output: {output_before.rstrip('\r\n')}")
self.child.close()
raise wexpect.EOF(f"GDB exited unexpectedly after command: {command}")
elif index == 2: # Matched TIMEOUT
logger.error(f"Timeout ({effective_timeout}s) executing GDB command: {command}. Output so far: {output_before.rstrip ('\r\n')}") # Aggiunto rstrip anche qui
elif index == 2:
logger.error(f"Timeout ({effective_timeout}s) executing GDB command: {command}. Output so far: {output_before.rstrip ('\r\n')}")
current_output = output_before
try:
current_output += self.child.read_nonblocking(size=4096, timeout=1)
except Exception:
pass
raise TimeoutError(f"Timeout executing GDB command: {command}. Partial output: {current_output.rstrip('\r\n')}") # Aggiunto rstrip anche qui
try: current_output += self.child.read_nonblocking(size=4096, timeout=0.5) # Shorter timeout for nonblocking read
except Exception: pass
raise TimeoutError(f"Timeout executing GDB command: {command}. Partial output: {current_output.rstrip('\r\n')}")
return ""
except (wexpect.TIMEOUT, TimeoutError) as e:
logger.error(f"Timeout during GDB command: {command} -> {e}", exc_info=True)
raise TimeoutError(f"Timeout during GDB command: {command}") from e
except wexpect.EOF as e:
logger.error(f"GDB EOF during command: {command} -> {e}", exc_info=True)
self.child.close()
if self.child and self.child.isalive(): self.child.close() # Ensure close if possible
raise
except Exception as e:
logger.error(f"Generic error during GDB command: {command} -> {e}", exc_info=True)
raise ConnectionError(f"Error during GDB command '{command}': {e}") from e
def set_breakpoint(self, location: str) -> str:
"""Sets a breakpoint in GDB."""
logger.info(f"Setting breakpoint at: {location}")
return self.send_cmd(f"break {location}")
def run_program(self, params: str = "") -> str:
"""Runs the program in GDB with optional parameters."""
run_command = "run"
if params:
run_command += f" {params.strip()}"
if params: run_command += f" {params.strip()}"
logger.info(f"Running program in GDB: '{run_command}'")
# 'run' can take a long time and output varies (hit breakpoint, exit, crash)
# We expect the (gdb) prompt if it hits a breakpoint or after it exits normally.
# Timeout should be generous.
return self.send_cmd(run_command, timeout=max(self.timeout, 120))
def continue_execution(self) -> str:
"""Continues program execution in GDB."""
logger.info("Continuing program execution in GDB.")
return self.send_cmd("continue", timeout=max(self.timeout, 120))
def dump_variable_to_json(self, var_name: str) -> dict:
"""
Dumps a variable using the 'dump_json' command (from the sourced GDB script)
and parses the JSON output.
Args:
var_name: The name of the variable or expression to dump.
Returns:
A Python dictionary parsed from the JSON output.
If an error occurs, returns a dictionary алкоголь error information.
"""
if not self.gdb_script_sourced_successfully:
logger.warning(f"GDB dumper script was not sourced successfully. Cannot dump '{var_name}' to JSON.")
return {"_gdb_tool_error": "GDB dumper script not available or failed to load."}
logger.info(f"Dumping variable '{var_name}' to JSON using 'dump_json' GDB command.")
try:
# The 'dump_json' command is expected to print delimiters START_JSON_OUTPUT and END_JSON_OUTPUT
# The timeout here might need to be quite generous for complex variables.
raw_gdb_output = self.send_cmd(f"dump_json {var_name}", expect_prompt=True, timeout=max(self.timeout, 60))
# Extract the JSON string between the delimiters
# Using re.DOTALL to make '.' match newlines
match = re.search(r"START_JSON_OUTPUT\s*([\s\S]*?)\s*END_JSON_OUTPUT", raw_gdb_output, re.DOTALL)
if match:
json_str = match.group(1).strip()
logger.debug(f"JSON string received from GDB 'dump_json': {json_str[:500]}...") # Log start of JSON
logger.debug(f"JSON string received from GDB 'dump_json': {json_str[:500]}...")
try:
parsed_data = json.loads(json_str)
# Check if the parsed data itself indicates an error from the GDB script
if isinstance(parsed_data, dict) and "gdb_script_error" in parsed_data:
logger.error(f"Error reported by GDB dumper script for '{var_name}': {parsed_data['gdb_script_error']}")
return parsed_data
@ -264,30 +177,25 @@ class GDBSession:
else:
logger.error(f"Delimiters START_JSON_OUTPUT/END_JSON_OUTPUT not found in 'dump_json' output for '{var_name}'.")
logger.debug(f"Full GDB output for 'dump_json {var_name}':\n{raw_gdb_output}")
# Check if the raw output itself contains a GDB or Python script error message
if "Traceback (most recent call last):" in raw_gdb_output or \
"gdb.error:" in raw_gdb_output or "Error:" in raw_gdb_output.splitlines()[0]:
"gdb.error:" in raw_gdb_output or ("Error:" in raw_gdb_output.splitlines()[0] if raw_gdb_output.splitlines() else False) :
return {"_gdb_tool_error": "Error during GDB 'dump_json' script execution", "raw_gdb_output": raw_gdb_output}
return {"_gdb_tool_error": "JSON delimiters not found in GDB script output", "raw_gdb_output": raw_gdb_output}
except TimeoutError: # Catch timeout specific to this command
except TimeoutError:
logger.error(f"Timeout dumping variable '{var_name}' with 'dump_json'.")
return {"_gdb_tool_error": f"Timeout during GDB 'dump_json {var_name}' command"}
except Exception as e: # Catch other errors from send_cmd or this method
except Exception as e:
logger.error(f"Generic exception dumping variable '{var_name}' with 'dump_json': {e}", exc_info=True)
return {"_gdb_tool_error": f"Generic exception during 'dump_json {var_name}': {str(e)}"}
def kill_program(self) -> str:
"""Kills the currently running program in GDB, if any."""
logger.info("Sending 'kill' command to GDB.")
try:
# 'kill' might not always return the prompt immediately if the program
# wasn't running or if GDB needs time.
# A short timeout is usually sufficient.
return self.send_cmd("kill", expect_prompt=True, timeout=10)
# Increased timeout for kill, as it might take time for GDB to process
return self.send_cmd("kill", expect_prompt=True, timeout=max(self.timeout, 20))
except (TimeoutError, wexpect.EOF) as e:
logger.warning(f"Exception during 'kill' (might be normal if program not running or GDB exits): {e}")
return f"<kill_command_exception: {e}>" # Return some info
return f"<kill_command_exception: {e}>"
except ConnectionError:
logger.warning("Cannot send 'kill', GDB session not active.")
return "<kill_command_error_no_session>"
@ -295,51 +203,65 @@ class GDBSession:
logger.error(f"Unexpected error during 'kill': {e}", exc_info=True)
return f"<kill_command_unexpected_error: {e}>"
# --- DEBUG VERSION of quit() ---
def quit(self) -> None:
"""Quits the GDB session gracefully."""
if self.child and self.child.isalive():
logger.info("Quitting GDB session.")
logger.info("DEBUG: Attempting simplified GDB quit.")
try:
self.child.sendline("quit")
# GDB might ask "Quit anyway? (y or n)" if a program is running or has state.
# It might also just quit if nothing is active.
# It might also timeout if it's stuck.
patterns = [
r"Quit anyway\?\s*\(y or n\)\s*", # GDB confirmation prompt
self.gdb_prompt, # GDB prompt (if it didn't ask for confirmation but didn't quit yet)
wexpect.EOF, # GDB exited
wexpect.TIMEOUT # Timeout
]
index = self.child.expect(patterns, timeout=10) # Generous timeout for quit
logger.info("DEBUG: Sent 'quit'. Waiting to see GDB's reaction.")
if index == 0: # Matched "Quit anyway?"
logger.info("GDB asked for quit confirmation. Sending 'y'.")
time.sleep(0.5) # Give GDB a moment to respond
gdb_response_after_quit = ""
try:
gdb_response_after_quit = self.child.read_nonblocking(size=2048, timeout=2)
logger.info(f"DEBUG: GDB response after 'quit' command: {gdb_response_after_quit!r}")
except wexpect.TIMEOUT:
logger.info("DEBUG: Timeout reading GDB response after 'quit' command.")
except Exception as e_read_quit:
logger.info(f"DEBUG: Error reading GDB response after 'quit': {e_read_quit}")
if "Quit anyway?" in gdb_response_after_quit: # Check if confirmation is needed
logger.info("DEBUG: GDB asked for quit confirmation. Sending 'y'.")
self.child.sendline("y")
# Expect EOF or another prompt briefly
self.child.expect([wexpect.EOF, self.gdb_prompt, wexpect.TIMEOUT], timeout=5)
elif index == 1: # Matched GDB prompt (shouldn't happen if quit is effective)
logger.warning("GDB prompt received after 'quit' command. Forcing close.")
elif index == 2: # Matched EOF
logger.info("GDB exited after 'quit' command (EOF).")
elif index == 3: # Matched TIMEOUT
logger.warning("Timeout waiting for GDB to quit. Forcing close.")
time.sleep(0.5) # Give GDB a moment for 'y'
gdb_response_after_y = ""
try:
gdb_response_after_y = self.child.read_nonblocking(size=2048, timeout=2)
logger.info(f"DEBUG: GDB response after 'y': {gdb_response_after_y!r}")
except wexpect.TIMEOUT:
logger.info("DEBUG: Timeout reading GDB response after 'y'.")
except Exception as e_read_y:
logger.info(f"DEBUG: Error reading GDB response after 'y': {e_read_y}")
except (wexpect.TIMEOUT, wexpect.EOF, TimeoutError) as e: # Catch our TimeoutError too
logger.warning(f"Exception during GDB quit sequence (EOF or Timeout is often normal here): {e}")
except Exception as e: # Other unexpected errors
logger.error(f"Error during GDB quit sequence: {e}", exc_info=True)
# Check if GDB is still alive after attempting to quit
time.sleep(1) # Wait a bit more to ensure GDB has time to exit
if self.child.isalive():
logger.warning("DEBUG: GDB is still alive after quit sequence. Attempting child.close().")
try:
self.child.close() # No 'force' argument
if self.child.isalive():
logger.error("DEBUG: GDB still alive even after child.close(). Process may be orphaned.")
else:
logger.info("DEBUG: child.close() seems to have terminated GDB.")
except Exception as e_close:
logger.error(f"DEBUG: Exception during child.close(): {e_close}", exc_info=True)
else:
logger.info("DEBUG: GDB process appears to have exited successfully after quit sequence.")
except Exception as e_quit_main:
logger.error(f"DEBUG: Exception in simplified quit's main try block: {e_quit_main}", exc_info=True)
if self.child and self.child.isalive(): # Final fallback
try: self.child.close()
except: pass # Ignore errors on this final close
finally:
if self.child and self.child.isalive():
logger.info("GDB still alive after quit sequence; forcing close.")
self.child.close(force=True)
self.child = None # Mark as no longer having a child process
self.gdb_script_sourced_successfully = False
else:
logger.info("GDB process terminated or already closed.")
self.child = None
self.gdb_script_sourced_successfully = False # Reset flag
else:
logger.info("GDB session quit called, but no active child process.")
logger.info("GDB session resources released.")
logger.info("DEBUG: GDB session quit called, but no active child process or child already None.")
logger.info("DEBUG: GDB session resources (controller-side) released.")
def is_alive(self) -> bool:
"""Checks if the GDB child process is alive."""
return self.child is not None and self.child.isalive()

View File

@ -1,236 +1,414 @@
# File: cpp_python_debug/core/gdb_dumper.py
# This script is intended to be sourced by GDB.
# It provides a command to dump GDB values as JSON.
import gdb
import json
import traceback # For detailed error logging within the script
import traceback
import os # Per il percorso del file di log
# --- Configuration ---
# Maximum number of elements to serialize for an array/list-like structure
# to prevent excessively large JSON outputs or infinite loops with cyclic structures.
# --- Configurazione del Logging su File per gdb_dumper.py ---
# Determina un percorso per il file di log.
# Potrebbe essere nella directory temporanea dell'utente o vicino allo script.
# Per semplicità, usiamo una directory temporanea. Sii consapevole dei permessi.
try:
# Prova a usare la directory dello script GDB stesso (se ha permessi di scrittura)
# o una directory temporanea standard.
# __file__ potrebbe non essere definito se lo script è "incollato" in GDB in modi strani,
# ma per il sourcing normale dovrebbe funzionare.
if '__file__' in globals():
GDB_DUMPER_LOG_DIR = os.path.dirname(os.path.abspath(__file__))
else: # Fallback a una directory nota se __file__ non è definito
GDB_DUMPER_LOG_DIR = os.path.expanduser("~") # Home directory dell'utente
GDB_DUMPER_LOG_PATH = os.path.join(GDB_DUMPER_LOG_DIR, "gdb_dumper_debug.log")
# Funzione per scrivere nel log dedicato
def _dumper_log_write(message):
try:
# Usiamo 'a' per appendere, ma il file verrà cancellato all'avvio dell'app principale
with open(GDB_DUMPER_LOG_PATH, "a", encoding='utf-8') as f_log:
f_log.write(str(message) + "\n")
except Exception:
pass # Non far fallire lo script principale se il logging fallisce
# Messaggio iniziale nel log quando lo script viene caricato/sorgente
_dumper_log_write("--- GDB Dumper Script Initialized/Sourced ---")
except Exception as e_log_setup:
# Se la configurazione del log fallisce, il logging su file non funzionerà,
# ma lo script principale dovrebbe comunque provare a funzionare.
# Possiamo provare a stampare un errore sulla console GDB una volta,
# sperando che non interferisca troppo se è solo un messaggio.
gdb.write("ERROR: gdb_dumper.py failed to setup file logging: {}\n".format(e_log_setup))
def _dumper_log_write(message): # Funzione NOP se il setup fallisce
pass
# --- Fine Configurazione Logging ---
# --- Configuration (MAX_*, etc. come prima) ---
MAX_ARRAY_ELEMENTS = 100
# Maximum depth for recursive serialization (e.g., nested structs)
MAX_RECURSION_DEPTH = 10
# Maximum length for strings read from memory
MAX_STRING_LENGTH = 2048
class EnhancedJsonEncoder(json.JSONEncoder):
"""
A custom JSON encoder to handle gdb.Value objects and other GDB-specific types.
It tries to serialize GDB values into corresponding Python types that can be
represented in JSON.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.current_depth = 0 # For tracking recursion depth
self.current_depth = 0
self.visited_values = {}
def _get_value_unique_key(self, gdb_val):
# ... (come prima)
if not gdb_val: return None
try:
address_str = "no_address"
if hasattr(gdb_val, 'address') and gdb_val.address is not None:
address_str = str(gdb_val.address)
original_type_str = str(gdb_val.type)
return f"{address_str}_{original_type_str}"
except gdb.error: return f"error_getting_key_for_{str(gdb_val)[:50]}"
def _is_visitable_value(self, gdb_val):
# ... (come prima)
if not gdb_val: return False
try:
type_code = gdb_val.type.strip_typedefs().code
return type_code in [gdb.TYPE_CODE_PTR, gdb.TYPE_CODE_ARRAY,
gdb.TYPE_CODE_STRUCT, gdb.TYPE_CODE_UNION]
except gdb.error: return False
# All'interno della classe EnhancedJsonEncoder:
# All'interno della classe EnhancedJsonEncoder:
def _serialize_value(self, gdb_val):
"""
Recursive helper to serialize a gdb.Value, tracking depth.
Recursively serializes a gdb.Value, tracking depth and visited objects.
"""
# _dumper_log_write(f"TRACE: _serialize_value - Depth: {self.current_depth}, Type: {gdb_val.type}")
if self.current_depth > MAX_RECURSION_DEPTH:
return f"<max_recursion_depth_reached_{MAX_RECURSION_DEPTH}>"
return "<max_recursion_depth_reached_{}>".format(MAX_RECURSION_DEPTH)
unique_key = None
is_visitable = self._is_visitable_value(gdb_val)
if is_visitable:
unique_key = self._get_value_unique_key(gdb_val)
if unique_key and unique_key in self.visited_values:
if self.current_depth >= self.visited_values[unique_key]:
type_in_key = unique_key.split('_', 1)[1] if '_' in unique_key else "unknown_type"
addr_in_key = unique_key.split('_', 1)[0]
return "<cyclic_or_shared_ref_to_type_{}_at_{}>".format(type_in_key, addr_in_key)
self.current_depth += 1
if is_visitable and unique_key: self.visited_values[unique_key] = self.current_depth
serialized_val = None
try:
# Handle cases where the value might have been optimized out by the compiler
if gdb_val.is_optimized_out:
serialized_val = "<optimized_out>"
else:
val_type = gdb_val.type.strip_typedefs() # Get the underlying type
val_type = gdb_val.type.strip_typedefs()
type_code = val_type.code
original_type_str = str(gdb_val.type)
type_name_str = str(val_type.name) if val_type.name else ""
if type_code == gdb.TYPE_CODE_PTR:
is_potentially_string = "string" in original_type_str.lower() or \
("basic_string" in original_type_str.lower() and "<" in original_type_str) or \
"string" in type_name_str.lower() or \
("basic_string" in type_name_str.lower() and "<" in type_name_str)
if is_potentially_string:
_dumper_log_write(f"DEBUG_STRING_TRACE: _serialize_value processing possible string. Original Type: '{original_type_str}', Stripped Type Name: '{type_name_str}', Type Code: {type_code}")
iterator_children_result = self._get_iterator_children(gdb_val)
if iterator_children_result is not None:
serialized_val = iterator_children_result
else:
condition_is_std_string = False
if "::basic_string<" in type_name_str:
condition_is_std_string = True
elif type_name_str == "std::string" or type_name_str == "std::__cxx11::string":
condition_is_std_string = True
elif not condition_is_std_string and type_name_str.endswith("::string"):
condition_is_std_string = True
if is_potentially_string:
_dumper_log_write(f"DEBUG_STRING_CONDITION_REVISED: For type_name_str: '{type_name_str}', condition_is_std_string result: {condition_is_std_string}")
if condition_is_std_string:
_dumper_log_write(f"DEBUG_STRING_HANDLER: Entered 'std::string' block for: '{type_name_str}' (Attempting internal pointer access)")
_M_p_val = None
try:
# Tentativo di accedere a _M_dataplus['_M_p']
if '_M_dataplus' in (f.name for f in gdb_val.type.fields()):
_M_dataplus_val = gdb_val['_M_dataplus']
if '_M_p' in (f.name for f in _M_dataplus_val.type.fields()):
_M_p_val = _M_dataplus_val['_M_p']
else:
_dumper_log_write(f"DEBUG_STRING_HANDLER: Field '_M_p' not found in _M_dataplus for '{type_name_str}'.")
# Tentativo di accedere a _M_p direttamente (per alcune implementazioni/SSO)
elif '_M_p' in (f.name for f in gdb_val.type.fields()):
_M_p_val = gdb_val['_M_p']
else:
_dumper_log_write(f"DEBUG_STRING_HANDLER: Internal string pointer fields ('_M_dataplus' or direct '_M_p') not found for '{type_name_str}'.")
if _M_p_val is not None:
_dumper_log_write(f"DEBUG_STRING_HANDLER: Found _M_p for '{type_name_str}'. Type of _M_p: {str(_M_p_val.type)}")
_M_p_type_stripped = _M_p_val.type.strip_typedefs()
if _M_p_type_stripped.code == gdb.TYPE_CODE_PTR:
_M_p_target_type = _M_p_type_stripped.target().strip_typedefs()
if _M_p_target_type.code == gdb.TYPE_CODE_INT and \
("char" in str(_M_p_target_type.name) if _M_p_target_type.name else False):
serialized_val = _M_p_val.string(encoding='utf-8', errors='replace') #, length=MAX_STRING_LENGTH)
_dumper_log_write(f"DEBUG_STRING_HANDLER: _M_p.string() successful, value (first 100 chars): {str(serialized_val)[:100]}")
else:
_dumper_log_write(f"DEBUG_STRING_HANDLER: _M_p for '{type_name_str}' is not a char pointer as expected. Target Type: {str(_M_p_target_type)}. Falling back to str(gdb_val).")
serialized_val = str(gdb_val)
else:
_dumper_log_write(f"DEBUG_STRING_HANDLER: _M_p for '{type_name_str}' is not a pointer. Type: {str(_M_p_val.type)}. Falling back to str(gdb_val).")
serialized_val = str(gdb_val)
else: # _M_p_val is None (non trovato)
_dumper_log_write(f"DEBUG_STRING_HANDLER: Could not access _M_p for '{type_name_str}'. This might be due to Small String Optimization (SSO). Falling back to str(gdb_val).")
serialized_val = str(gdb_val) # Fallback per SSO o se _M_p non è accessibile
except Exception as e_str_internal:
_dumper_log_write(f"DEBUG_STRING_HANDLER: Error accessing internal string pointer for '{type_name_str}': {e_str_internal}\n{traceback.format_exc()}")
try: # Ultimo fallback
serialized_val = str(gdb_val)
_dumper_log_write(f"DEBUG_STRING_HANDLER: Fallback to str(gdb_val) after internal pointer error, value: {str(serialized_val)[:100]}")
except Exception as e_fallback_str:
_dumper_log_write(f"DEBUG_STRING_HANDLER: Fallback str(gdb_val) also failed: {e_fallback_str}")
serialized_val = "<error_reading_std_string_internals_and_fallback: {}>".format(str(e_str_internal))
elif type_code == gdb.TYPE_CODE_PTR:
serialized_val = self._handle_pointer(gdb_val, val_type)
elif type_code == gdb.TYPE_CODE_ARRAY:
serialized_val = self._handle_array(gdb_val, val_type)
serialized_val = self._handle_c_array(gdb_val, val_type)
elif type_code == gdb.TYPE_CODE_STRUCT or \
type_code == gdb.TYPE_CODE_UNION or \
(hasattr(val_type, 'fields') and callable(val_type.fields) and len(val_type.fields()) > 0): # Includes classes
serialized_val = self._handle_struct_or_class(gdb_val, val_type)
type_code == gdb.TYPE_CODE_UNION:
if is_potentially_string and not condition_is_std_string:
_dumper_log_write(f"DEBUG_STRING_ERROR_REVISED_CHECK: String type ('{original_type_str}') still reached STRUCT/UNION! type_name_str: '{type_name_str}', condition_was: {condition_is_std_string}")
serialized_val = self._handle_struct_or_class(gdb_val, val_type, original_type_str)
elif type_code == gdb.TYPE_CODE_ENUM:
serialized_val = str(gdb_val) # GDB usually gives the enum member name
serialized_val = str(gdb_val)
elif type_code == gdb.TYPE_CODE_INT:
serialized_val = int(gdb_val)
elif type_code == gdb.TYPE_CODE_FLT:
serialized_val = float(gdb_val)
elif type_code == gdb.TYPE_CODE_BOOL:
serialized_val = bool(gdb_val)
elif str(val_type) == "void": # Special case for void, gdb_val might be an address
elif str(val_type) == "void":
serialized_val = str(gdb_val)
else:
# Fallback for other types (e.g., function pointers, internal types)
# Try to convert to string; might be an address or symbolic representation
try:
serialized_val = str(gdb_val)
except Exception:
serialized_val = "<failed_to_str_unknown_type_{}>".format(str(val_type))
except gdb.error as e_gdb:
# Error accessing GDB value (e.g., invalid memory)
serialized_val = f"<gdb_error: {str(e_gdb)}>"
_dumper_log_write(f"ERROR: GDB error in _serialize_value for type {gdb_val.type}: {e_gdb}")
serialized_val = "<gdb_error_serializing: {} (type: {})>".format(str(e_gdb), str(gdb_val.type))
except Exception as e_py:
# Python error within this serialization logic
serialized_val = f"<python_script_error: {str(e_py)} - {traceback.format_exc(limit=2)}>"
_dumper_log_write(f"ERROR: Python Traceback in _serialize_value for type {gdb_val.type}:\n{traceback.format_exc()}")
serialized_val = "<python_script_error_serializing: {} (type: {})>".format(str(e_py), str(gdb_val.type))
self.current_depth -= 1
if is_visitable and unique_key and unique_key in self.visited_values:
if self.visited_values[unique_key] == self.current_depth + 1:
del self.visited_values[unique_key]
return serialized_val
def default(self, o):
if isinstance(o, gdb.Value):
if self.current_depth == 0: self.visited_values.clear()
return self._serialize_value(o)
# Let the base class default method raise the TypeError for unsupported types
return json.JSONEncoder.default(self, o)
def _handle_pointer(self, gdb_val, val_type):
"""Handles gdb.TYPE_CODE_PTR."""
if not gdb_val: # Null pointer
def _get_iterator_children(self, gdb_val_original):
type_name = "UnknownType"
try:
gdb_val_type_stripped = gdb_val_original.type.strip_typedefs()
type_name = str(gdb_val_type_stripped.name) if gdb_val_type_stripped.name else ""
if "std::basic_string" in type_name: return None
is_known_stl_vector = "std::vector" in type_name
is_known_stl_map = "std::map" in type_name or "std::unordered_map" in type_name
is_any_known_stl_container = is_known_stl_vector or is_known_stl_map or \
any(container_name == type_name or type_name.startswith(container_name + "<") for container_name in [
"std::list", "std::set", "std::deque", "std::forward_list",
"std::multimap", "std::multiset"
])
has_children_attr = hasattr(gdb_val_original, 'children')
is_children_callable = False
if has_children_attr:
try:
children_method_obj = gdb_val_original.children
is_children_callable = callable(children_method_obj)
except Exception: pass
has_children_method = has_children_attr and is_children_callable
elements = []
children_processed_successfully = False
if has_children_method:
try:
children_iter = gdb_val_original.children()
temp_children_list = [item for item in children_iter]
count = 0
for child_tuple_or_val in temp_children_list:
child_val_to_serialize = None; key_for_map_entry = None
if isinstance(child_tuple_or_val, tuple) and len(child_tuple_or_val) == 2:
key_obj, val_obj = child_tuple_or_val
if is_known_stl_map:
key_for_map_entry = self._serialize_value(key_obj) if isinstance(key_obj, gdb.Value) else str(key_obj)
child_val_to_serialize = val_obj
else: child_val_to_serialize = child_tuple_or_val
if count < MAX_ARRAY_ELEMENTS:
serialized_element = None
if isinstance(child_val_to_serialize, gdb.Value): serialized_element = self._serialize_value(child_val_to_serialize)
elif child_val_to_serialize is None or isinstance(child_val_to_serialize, (int, float, str, bool, list, dict)): serialized_element = child_val_to_serialize
else: serialized_element = "<child_item_not_gdb_value_or_simple_type: {}>".format(type(child_val_to_serialize))
if key_for_map_entry is not None: elements.append({"key": key_for_map_entry, "value": serialized_element})
else: elements.append(serialized_element)
else:
elements.append("<container_truncated_showing_{}_elements>".format(MAX_ARRAY_ELEMENTS)); break
count += 1
if elements or (is_any_known_stl_container and not temp_children_list): children_processed_successfully = True
except Exception: pass
if not children_processed_successfully and is_known_stl_vector:
try:
m_impl = gdb_val_original['_M_impl']
m_start_val = m_impl['_M_start']; m_finish_val = m_impl['_M_finish']
m_start_type_stripped = m_start_val.type.strip_typedefs()
m_finish_type_stripped = m_finish_val.type.strip_typedefs()
if m_start_type_stripped.code == gdb.TYPE_CODE_PTR and m_finish_type_stripped.code == gdb.TYPE_CODE_PTR:
element_type = m_start_type_stripped.target().strip_typedefs()
if element_type.sizeof == 0: elements = []; children_processed_successfully = True
else:
current_ptr_val = m_start_val; num_elements_manually = 0; manual_elements = []
max_manual_elements_to_check = MAX_ARRAY_ELEMENTS + 5
while current_ptr_val != m_finish_val and num_elements_manually < max_manual_elements_to_check :
if num_elements_manually < MAX_ARRAY_ELEMENTS:
try: manual_elements.append(self._serialize_value(current_ptr_val.dereference()))
except gdb.error: manual_elements.append("<error_dereferencing_vector_element>"); break
num_elements_manually += 1
try: current_ptr_val = current_ptr_val + 1
except gdb.error: break
if num_elements_manually >= MAX_ARRAY_ELEMENTS and current_ptr_val != m_finish_val :
manual_elements.append("<container_truncated_showing_{}_elements>".format(MAX_ARRAY_ELEMENTS))
elements = manual_elements; children_processed_successfully = True
except Exception: pass
if children_processed_successfully: return elements
return None
except Exception:
# _dumper_log_write(f"Outer Python error in _get_iterator_children for {type_name}:\n{traceback.format_exc()}")
return None
def _handle_pointer(self, gdb_val, val_type):
# ... (implementazione come prima, senza gdb.write)
if not gdb_val: return None
target_type = val_type.target().strip_typedefs()
target_type_name_str = str(target_type.name) if target_type.name else ""
if target_type.code == gdb.TYPE_CODE_INT and ("char" in target_type_name_str or "wchar_t" in target_type_name_str):
try: return gdb_val.string(encoding='utf-8', errors='replace', length=MAX_STRING_LENGTH)
except gdb.error: return "<error_reading_string_at_{}>".format(str(gdb_val))
except UnicodeDecodeError: return "<unicode_decode_error_at_{}>".format(str(gdb_val))
if self.current_depth < MAX_RECURSION_DEPTH:
try: return self._serialize_value(gdb_val.dereference())
except gdb.error: return str(gdb_val)
else: return "<pointer_not_dereferenced_due_to_max_depth: {}>".format(str(gdb_val))
# Check for C-style strings (char*, const char*, etc.)
if target_type.code == gdb.TYPE_CODE_INT and \
target_type.name in ["char", "signed char", "unsigned char", "wchar_t"]: # Added wchar_t
try:
# lazy_string can prevent reading excessive amounts of memory
# For wchar_t, GDB might handle encoding, or you might need specific handling
# if GDB's default lazy_string encoding isn't suitable.
# Assuming UTF-8 for char* types is common.
encoding = 'utf-8'
if target_type.name == "wchar_t":
# wchar_t is tricky. GDB's lazy_string might guess,
# or it might use a system-dependent encoding like UTF-32 or UTF-16.
# For simplicity, we'll try 'utf-8', but this might need adjustment
# based on the target system and how wchar_t is used.
# A more robust solution for wchar_t might involve reading memory
# byte by byte and decoding based on wchar_t size.
# GDB Python API doesn't have a direct 'read_wide_string'.
pass # Keep utf-8, or try a common wchar_t encoding like 'utf-32' if issues.
return gdb_val.lazy_string(encoding=encoding, length=MAX_STRING_LENGTH)
except gdb.error:
return f"<error_reading_string_at_{str(gdb_val)}>"
except UnicodeDecodeError:
return f"<unicode_decode_error_at_{str(gdb_val)}>"
# For other pointer types, return the address as a string
return str(gdb_val)
def _handle_array(self, gdb_val, val_type):
"""Handles gdb.TYPE_CODE_ARRAY."""
def _handle_c_array(self, gdb_val, val_type):
# ... (implementazione come prima, senza gdb.write)
arr_elements = []
try:
# GDB arrays usually have known bounds (for static arrays or VLA if supported)
# val_type.range() gives (lower_bound, upper_bound)
bounds = val_type.range()
if bounds[0] > bounds[1]: return []
num_elements_to_fetch = min(bounds[1] - bounds[0] + 1, MAX_ARRAY_ELEMENTS)
for i in range(num_elements_to_fetch):
arr_elements.append(gdb_val[bounds[0] + i]) # Recursively serialize elements
arr_elements.append(self._serialize_value(gdb_val[bounds[0] + i]))
if (bounds[1] - bounds[0] + 1) > MAX_ARRAY_ELEMENTS:
arr_elements.append(f"<array_truncated_showing_{MAX_ARRAY_ELEMENTS}_elements>")
arr_elements.append("<c_array_truncated_showing_{}_elements>".format(MAX_ARRAY_ELEMENTS))
return arr_elements
except gdb.error as e:
# This can happen if bounds are indeterminable or access fails
return f"<gdb_error_processing_array: {str(e)}>"
except Exception as e_py: # Python error during array processing
return f"<python_script_error_processing_array: {str(e_py)} - {traceback.format_exc(limit=2)}>"
except gdb.error as e: return "<gdb_error_processing_c_array: {} (type: {})>".format(str(e), str(val_type))
except Exception as e_py: return "<python_script_error_processing_c_array: {} (type: {})>".format(str(e_py), str(val_type))
def _handle_struct_or_class(self, gdb_val, val_type):
"""Handles gdb.TYPE_CODE_STRUCT, gdb.TYPE_CODE_UNION, and classes."""
obj_dict = {}
def _handle_struct_or_class(self, gdb_val, val_type, original_type_str=""):
# ... (implementazione come prima, senza gdb.write se non specifici per errori stringa)
obj_dict = {"_type": original_type_str if original_type_str else str(val_type)}
try:
fields = val_type.fields()
if not fields:
str_summary = str(gdb_val)
if str_summary != obj_dict["_type"]: obj_dict["_summary"] = str_summary
return obj_dict
for field in fields:
field_name = field.name
if field_name is None: # Skip unnamed fields (e.g., anonymous unions/structs padding)
continue
if field_name is None: continue
if field.artificial and not field.is_base_class: continue
if field.is_base_class:
# For base classes, recursively serialize its fields into the current dict.
# Prefix with base class name to avoid collisions and show hierarchy.
try:
base_val = gdb_val.cast(field.type)
base_obj_dict = self._serialize_value(base_val) # Recursive call
if isinstance(base_obj_dict, dict):
for k, v_base in base_obj_dict.items():
obj_dict[f"{field.type.name}::{k}"] = v_base
else: # If base class serialization failed or wasn't a dict
obj_dict[f"{field.type.name}"] = base_obj_dict
base_obj_dict_or_val = self._serialize_value(base_val)
if isinstance(base_obj_dict_or_val, dict):
base_type_name_prefix = (str(field.type.name) + "::") if field.type.name else "base::"
for k_base, v_base in base_obj_dict_or_val.items():
if k_base == "_type": continue
obj_dict[base_type_name_prefix + k_base] = v_base
else: obj_dict[str(field.type.name) if field.type.name else "base_class_value"] = base_obj_dict_or_val
except gdb.error as e_base_cast: obj_dict[str(field.type.name) if field.type.name else "base_class_error"] = "<gdb_error_casting_base: {}>".format(str(e_base_cast))
else:
try:
field_value = gdb_val[field_name]
obj_dict[field_name] = field_value # Will be serialized by default() on next pass
except gdb.error as e_field:
obj_dict[field_name] = f"<gdb_error_accessing_field: {str(e_field)}>"
except Exception as e_py_field:
obj_dict[field_name] = f"<python_script_error_accessing_field: {str(e_py_field)} - {traceback.format_exc(limit=2)}>"
field_value_obj = gdb_val[field_name]
obj_dict[field_name] = self._serialize_value(field_value_obj)
except gdb.error as e_field: obj_dict[field_name] = "<gdb_error_accessing_field: {} (field: {})>".format(str(e_field), field_name)
except Exception as e_py_field: obj_dict[field_name] = "<python_script_error_accessing_field: {} (field: {})>".format(str(e_py_field), field_name)
return obj_dict
except gdb.error as e:
return f"<gdb_error_processing_struct: {str(e)}>"
except Exception as e_py:
return f"<python_script_error_processing_struct: {str(e_py)} - {traceback.format_exc(limit=2)}>"
except gdb.error as e: return "<gdb_error_processing_struct_fields: {} (type: {})>".format(str(e), str(val_type))
except Exception as e_py: return "<python_script_error_processing_struct: {} (type: {})>".format(str(e_py), str(val_type))
class GDBDumpJsonCommand(gdb.Command):
"""
A GDB command to dump the value of a C/C++ expression to JSON format.
Usage: dump_json <expression>
The command prints the JSON output enclosed in delimiters:
START_JSON_OUTPUT
...JSON_string...
END_JSON_OUTPUT
"""
def __init__(self):
# COMMAND_DATA: For commands that inspect data.
# COMPLETE_SYMBOL: GDB will attempt symbol completion for the argument.
super(GDBDumpJsonCommand, self).__init__("dump_json",
gdb.COMMAND_DATA,
gdb.COMPLETE_SYMBOL)
super(GDBDumpJsonCommand, self).__init__("dump_json", gdb.COMMAND_DATA, gdb.COMPLETE_SYMBOL)
self.output_start_delimiter = "START_JSON_OUTPUT"
self.output_end_delimiter = "END_JSON_OUTPUT"
def invoke(self, arg_string, from_tty):
# 'arg_string' contains the expression to evaluate (e.g., variable name, expression)
# 'from_tty' is True if the command was typed by a user at the GDB prompt.
if not arg_string:
gdb.write("Error: No expression provided.\nUsage: dump_json <expression>\n")
return
gdb.write(f"{self.output_start_delimiter}\n")
_dumper_log_write(f"--- dump_json command invoked with arg: '{arg_string}' ---")
gdb.write("{}\n".format(self.output_start_delimiter))
try:
# Evaluate the expression string in the context of the current frame
if not arg_string:
raise ValueError("No expression provided.")
gdb_value = gdb.parse_and_eval(arg_string)
# Serialize the gdb.Value to JSON using the custom encoder
# indent=None and compact separators for minimal output size
json_output = json.dumps(gdb_value,
cls=EnhancedJsonEncoder,
indent=None,
separators=(',', ':'),
ensure_ascii=False) # Allow unicode characters directly
gdb.write(f"{json_output}\n")
except gdb.error as e_gdb:
# Handle errors from GDB during parse_and_eval or value access
error_json = json.dumps({"gdb_script_error": str(e_gdb),
_dumper_log_write(f"Successfully parsed expression. Type: {gdb_value.type}")
encoder = EnhancedJsonEncoder(indent=None, separators=(',', ':'), ensure_ascii=False)
json_output = encoder.encode(gdb_value)
gdb.write("{}\n".format(json_output))
_dumper_log_write("Successfully encoded to JSON.")
except gdb.error as e_gdb: # Errore da GDB (es. simbolo non trovato)
_dumper_log_write(f"GDB error during 'dump_json {arg_string}': {e_gdb}\n{traceback.format_exc()}")
error_payload = {"gdb_script_error": str(e_gdb), "expression": arg_string, "details": "GDB error during evaluation or value access. Check gdb_dumper_debug.log for GDB Python script trace."}
gdb.write("{}\n".format(json.dumps(error_payload)))
except ValueError as e_val: # Errore Python, es. ValueError da noi
_dumper_log_write(f"ValueError during 'dump_json {arg_string}': {e_val}\n{traceback.format_exc()}")
error_payload = {"gdb_script_error": str(e_val), "expression": arg_string, "details": "Input error for dump_json command. Check gdb_dumper_debug.log."}
gdb.write("{}\n".format(json.dumps(error_payload)))
except Exception as e_py: # Altri errori Python nello script
_dumper_log_write(f"Unexpected Python error during 'dump_json {arg_string}': {e_py}\n{traceback.format_exc()}")
# Per il client, un messaggio più generico, ma il log ha i dettagli
error_payload = {"gdb_script_error": "Internal Python script error.",
"expression": arg_string,
"details": "Error evaluating expression or accessing GDB value."})
gdb.write(f"{error_json}\n")
except Exception as e_py:
# Handle other Python exceptions that might occur within this script's logic
error_json = json.dumps({"gdb_script_error": str(e_py),
"expression": arg_string,
"details": f"Python error in GDB dumper script: {traceback.format_exc()}"})
gdb.write(f"{error_json}\n")
"details": "Unexpected Python error in GDB dumper script. Check gdb_dumper_debug.log for details."}
gdb.write("{}\n".format(json.dumps(error_payload)))
finally:
# Ensure the end delimiter is always printed, even if an error occurred
gdb.write(f"{self.output_end_delimiter}\n")
gdb.write("{}\n".format(self.output_end_delimiter))
gdb.flush()
_dumper_log_write(f"--- dump_json command finished for arg: '{arg_string}' ---")
# Register the command with GDB when this script is sourced.
GDBDumpJsonCommand()
_dumper_log_write("--- GDB Dumper Script Fully Parsed and Command Registered ---")

30
todo.md
View File

@ -20,3 +20,33 @@ dump_to_csv myVector
Troverai output.json e output.csv nella root del progetto.
////////////////////////////
Reading symbols from C:\src\____GitProjects\cpp_python_debug\ws_luna\test_cpp_python\Debug\test_cpp_python.exe...
(gdb) b 25
Breakpoint 1 at 0x401740: file ../src/test_cpp_python.cpp, line 25.
(gdb) run
Starting program: C:\src\____GitProjects\cpp_python_debug\ws_luna\test_cpp_python\Debug\test_cpp_python.exe
[New Thread 6004.0x2004]
[New Thread 6004.0x2f04]
[New Thread 6004.0x21b4]
Thread 1 hit Breakpoint 1, main () at ../src/test_cpp_python.cpp:25
25 std::cout << "Break here" << std::endl; // <-- punto di stop
(gdb) source C:\src\____GitProjects\cpp_python_debug\cpp_python_debug\core\gdb_dumper.py
(gdb) dump_json myInt
START_JSON_OUTPUT
987
END_JSON_OUTPUT
(gdb) dump_json myDouble
START_JSON_OUTPUT
123.456
END_JSON_OUTPUT
(gdb) dump_json myStruct
START_JSON_OUTPUT
DEBUG_STRING_TRACE: _serializ

1
ws_luna/test_cpp_python/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
/Debug/

View File

@ -10,7 +10,15 @@ struct MyData {
int main() {
std::vector<MyData> myVector;
for (int i = 0; i < 5; ++i) {
int myInt = 987;
double myDouble = 123.456;
MyData myStruct;
myStruct.id = 99;
myStruct.name = "prova testo 1";
myStruct.value = 98765.4321;
for (int i = 0; i < 10000; ++i) {
myVector.push_back({i, "Item" + std::to_string(i), i * 1.5});
}