""" Action handlers for PyUCC GUI. Separates business logic from GUI construction. """ import os import time import types from pathlib import Path from tkinter import messagebox import tkinter as tk from tkinter import ttk from ..core.scanner import find_source_files from ..core.differ import Differ, BaselineManager from ..config import settings as app_settings from .dialogs import DuplicatesDialog class ActionHandlers: """Handles all main actions (scan, countings, metrics, differ) for the GUI.""" def __init__(self, gui_app): """ Args: gui_app: Reference to the main GUI application instance """ self.app = gui_app self._configure_tree_tags() def handle_scan(self): """Execute scan action: find source files matching profile filters.""" self.app.log("Action: Scan started", level="INFO") self.app._set_phase("Scanning...") self.app._current_mode = "scan" self.app._current_action = "scan" # Disable action buttons during operation self.app._disable_action_buttons() # Reset UI self._reset_progress_ui() tooltips = {"name": "File name", "path": "Relative path from project root"} self.app._set_results_columns(("name", "path"), tooltips=tooltips) self._clear_results() self.app.update_idletasks() # Get paths and filters paths, allowed_exts, ignore_patterns, pr = ( self.app._resolve_profile_and_filters() ) if not paths: messagebox.showwarning( "Missing path", "Please select a folder or profile to analyze first." ) return # Store project root for report generation self.app._last_scan_root = paths[0] if paths else None # Submit scan task def _scan_task(paths): files = [] for p in paths: pth = Path(p) try: if pth.is_dir(): files.extend( find_source_files( pth, allowed_extensions=allowed_exts, ignore_patterns=ignore_patterns, ) ) elif pth.is_file(): files.append(pth) except Exception: continue # Deduplicate seen = set() unique = [] for f in files: s = str(f) if s not in seen: seen.add(s) unique.append(f) return unique self.app._current_task_id = self.app.worker.submit( _scan_task, paths, kind="thread", on_done=self.app._scan_done ) self.app.cancel_btn.config(state="normal") def handle_countings(self): """Execute countings action: analyze code lines (code/comment/blank).""" self.app.log("Action: Countings started", level="INFO") self.app._set_phase("Counting...") self.app._current_mode = "countings" self.app._current_action = "countings" # For report generation # Disable action buttons during operation self.app._disable_action_buttons() # Reset UI self._reset_progress_ui() tooltips = { "name": "File name", "path": "Relative path from project root", "code": "Code lines: executable lines excluding comments and blanks", "comment": "Comment lines: lines containing only comments", "blank": "Blank lines: empty lines", "total": "Total physical lines: sum of code + comment + blank", "language": "Detected programming language", # UCC extended metrics (shown for C/C++ files) "cmt_whole": "UCC: Whole comment lines (comments on their own line)", "cmt_embed": "UCC: Embedded comments (comments on same line as code)", "directive": "UCC: Compiler directives (#include, #define, etc.)", "data_decl": "UCC: Data declarations (variables, types, etc.)", "exec_inst": "UCC: Executable instructions (statements, calls, etc.)", "logical": "UCC: Logical SLOC (count of logical statements)", "physical": "UCC: Physical SLOC (non-blank, non-comment lines)", } self.app._set_results_columns( ( "name", "path", "code", "comment", "blank", "total", "language", "cmt_whole", "cmt_embed", "directive", "data_decl", "exec_inst", "logical", "physical", ), tooltips=tooltips, ) self._clear_results() self.app.update_idletasks() paths, allowed_exts, ignore_patterns, pr = ( self.app._resolve_profile_and_filters() ) if not paths: messagebox.showwarning( "Missing path", "Please select a folder or profile to analyze first." ) return # Store project root for report generation self.app._last_scan_root = paths[0] if paths else None # Gather files first, then analyze def _gather_files(paths): return self._gather_source_files(paths, allowed_exts, ignore_patterns) def _on_gather_done(results): files = results or [] if not files: messagebox.showinfo("Countings", "No files found to analyze.") self.app._set_phase("Idle") self.app._enable_action_buttons() return from ..core.countings_impl import analyze_file_counts self.app._total_files = len(files) self.app._processed_files = 0 try: self.app.progress["maximum"] = max(1, self.app._total_files) self.app.progress["value"] = 0 self.app._lbl_files.config(text=f"Files: 0/{self.app._total_files}") except Exception: pass self.app._set_phase(f"Analyzing {self.app._total_files} files...") self.app.update_idletasks() self.app._current_task_id = self.app.worker.map_iterable( func=analyze_file_counts, items=files, kind="thread", on_progress=self.app._on_count_progress, on_done=self.app._on_count_done, ) self.app._set_phase("Gathering files...") self.app.update_idletasks() self.app._current_task_id = self.app.worker.submit( _gather_files, paths, kind="thread", on_done=_on_gather_done ) self.app.cancel_btn.config(state="normal") def handle_metrics(self): """Execute metrics action: compute complexity metrics (CC, MI).""" self.app.log("Action: Metrics started", level="INFO") self.app._set_phase("Computing metrics...") self.app._current_mode = "metrics" self.app._current_action = "metrics" # Reset metrics view to "By File" self.app.metrics_view_mode.set("file") self.app.metrics_toggle_frame.grid_remove() # Will be shown when done # Disable action buttons during operation self.app._disable_action_buttons() # Reset UI self._reset_progress_ui() tooltips = { "name": "File Name", "path": "Full Path", "total_cc": "Total Cyclomatic Complexity: sum of complexity for all functions in file", "avg_cc": "Average Cyclomatic Complexity: average complexity per function", "risk": "Risk Level: Low (≤10) | Medium (≤20) | High (≤50) | Very High (>50)", "mi": "Maintainability Index (0-100): higher is better\n85-100 = Excellent | 65-84 = Good | 0-64 = Needs attention", } self.app._set_results_columns( ("name", "path", "total_cc", "avg_cc", "risk", "mi"), tooltips=tooltips, ) self._clear_results() self.app.update_idletasks() paths, allowed_exts, ignore_patterns, pr = ( self.app._resolve_profile_and_filters() ) if not paths: messagebox.showwarning( "Missing path", "Please select a folder or profile to analyze first." ) return # Store project root for report generation self.app._last_scan_root = paths[0] if paths else None # Gather files first, then analyze def _gather_files(paths): return self._gather_source_files(paths, allowed_exts, ignore_patterns) def _on_gather_done(results): files = results or [] if not files: messagebox.showinfo("Metrics", "No files found to analyze.") self.app._set_phase("Idle") self.app._enable_action_buttons() return try: from ..core.metrics import analyze_file_metrics as analyzer except Exception: messagebox.showerror("Metrics", "Metrics analyzer not available") self.app._set_phase("Idle") self.app._enable_action_buttons() return self.app._total_files = len(files) self.app._processed_files = 0 try: self.app.progress["maximum"] = max(1, self.app._total_files) self.app.progress["value"] = 0 self.app._lbl_files.config(text=f"Files: 0/{self.app._total_files}") except Exception: pass self.app._set_phase( f"Computing metrics for {self.app._total_files} files..." ) self.app.update_idletasks() self.app._current_task_id = self.app.worker.map_iterable( func=analyzer, items=files, kind="thread", on_progress=self.app._on_metrics_progress, on_done=self.app._on_metrics_done, ) self.app._set_phase("Gathering files...") self.app.update_idletasks() self.app._current_task_id = self.app.worker.submit( _gather_files, paths, kind="thread", on_done=_on_gather_done ) self.app.cancel_btn.config(state="normal") def handle_differ(self): """Execute differ action: compare current code with baseline.""" self.app.log("Action: Differing started", level="INFO") self.app._set_phase("Differing...") self.app._current_action = "differ" # Show columns as Current (codebase) and Baseline tooltips = { "Current": "Current file path", "Baseline": "Baseline file path", "added": "Added files: present in current but not in baseline", "deleted": "Deleted files: present in baseline but not in current", "modified": "Modified files: content changed between baseline and current", "unmodified": "Unmodified files: identical in both versions", "Δ code_lines": "Delta Code Lines: change in executable lines (+positive / -negative)", "Δ comment_lines": "Delta Comment Lines: change in comment lines", "Δ blank_lines": "Delta Blank Lines: change in empty lines", "Δ func_count": "Current Function Count (delta vs baseline)\nShows absolute value with change in parentheses", "Δ avg_cc": "Current Avg Cyclomatic Complexity (delta vs baseline)\nShows absolute value with change in parentheses", "Δ mi": "Current Maintainability Index (delta vs baseline)\nShows absolute value with change in parentheses", # UCC extended delta columns "Δ cmt_whole": "Delta Whole Comments: change in whole-line comments", "Δ cmt_embed": "Delta Embedded Comments: change in embedded comments", "Δ directive": "Delta Compiler Directives: change in preprocessor directives", "Δ data_decl": "Delta Data Declarations: change in variable/type declarations", "Δ exec_inst": "Delta Exec Instructions: change in executable statements", "Δ logical": "Delta Logical SLOC: change in logical statement count", "Δ physical": "Delta Physical SLOC: change in physical non-blank/non-comment lines", } self.app._set_results_columns( ( "Current", "Baseline", "added", "deleted", "modified", "unmodified", "Δ code_lines", "Δ comment_lines", "Δ blank_lines", "Δ func_count", "Δ avg_cc", "Δ mi", "Δ cmt_whole", "Δ cmt_embed", "Δ directive", "Δ data_decl", "Δ exec_inst", "Δ logical", "Δ physical", ), tooltips=tooltips, ) self._clear_results() self.app._current_mode = "differ" # Disable action buttons during operation self.app._disable_action_buttons() try: self.app._lbl_files.config(text="Files: 0/0") self.app.progress["maximum"] = 1 self.app.progress["value"] = 0 except Exception: pass self.app.update_idletasks() paths, allowed_exts, ignore_patterns, pr = ( self.app._resolve_profile_and_filters() ) if not paths: messagebox.showwarning( "Missing path", "Please select a folder or profile to analyze first." ) return project = paths[0] self.app._set_phase("Loading baselines...") self.app.update_idletasks() bm = BaselineManager(project) all_baselines = bm.list_baselines() profile_name = pr.get("name") if pr else None max_keep = app_settings.get_max_keep() # Filter baselines by current profile baselines = [] for baseline_id in all_baselines: try: meta = bm.load_metadata(baseline_id) baseline_profile = meta.profile if hasattr(meta, "profile") else None # Match baselines with same profile (or both None) if baseline_profile == profile_name: baselines.append(baseline_id) except Exception: # Skip baselines that can't be loaded pass # Callback handlers def _on_create_done(baseline_id): try: self.app._set_phase("Idle") self.app._current_task_id = None self.app._enable_action_buttons() # After baseline is created, export the differ `result` (if available) to CSV and text report inside baseline folder try: from ..utils.csv_exporter import export_rows_to_csv from ..utils.report_generator import generate_differ_report bdir = bm._baseline_dir(baseline_id) csv_path = os.path.join(bdir, "diff.csv") report_path = os.path.join(bdir, "diff_report.txt") result = getattr(self.app, "_current_results", None) if result: headers = [c for c in self.app.results_tree["columns"]] # Build rows from result['pairs'] to ensure consistency with computed data def _row_from_pair(p): baseline_name = p.get("fileA") or "" current_name = p.get("fileB") or "" counts = p.get("counts") or {} cd = p.get("countings_delta") md = p.get("metrics_delta") # ensure simple deltas when missing if cd is None: base = p.get("baseline_countings") cur = p.get("current_countings") if base is None and cur is not None: cd = { "physical_lines": cur.get("physical_lines", 0), "code_lines": cur.get("code_lines", 0), "comment_lines": cur.get("comment_lines", 0), "blank_lines": cur.get("blank_lines", 0), } elif cur is None and base is not None: cd = { "physical_lines": 0 - base.get("physical_lines", 0), "code_lines": 0 - base.get("code_lines", 0), "comment_lines": 0 - base.get("comment_lines", 0), "blank_lines": 0 - base.get("blank_lines", 0), } # Format numeric values similarly to UI (no sign formatting in CSV values) return ( current_name, baseline_name, counts.get("added", 0), counts.get("deleted", 0), counts.get("modified", 0), counts.get("unmodified", 0), (cd.get("code_lines") if cd else ""), (cd.get("comment_lines") if cd else ""), (cd.get("blank_lines") if cd else ""), (md.get("func_count") if md else ""), (md.get("avg_cc") if md else ""), (md.get("mi") if md else ""), ) rows = (_row_from_pair(p) for p in result.get("pairs", [])) export_rows_to_csv(csv_path, headers, rows) # Generate text report profile_config = { "name": profile_name, "root": project, "paths": paths if paths else [project], "languages": allowed_exts if allowed_exts else [], "exclude_patterns": ignore_patterns, } generate_differ_report( result, profile_config, baseline_id, report_path ) self.app.log(f"Differ report saved to: {report_path}", level="INFO") except Exception as e: # non-fatal: continue even if export fails self.app.log( f"Failed to export differ results: {e}", level="WARNING" ) # Show summary dialog self._show_differ_summary_dialog(result, baseline_id, bdir) self.app.log(f"New baseline created: {baseline_id}", level="INFO") except Exception: self.app._set_phase("Idle") self.app._current_task_id = None self.app._enable_action_buttons() def _on_diff_done(result): try: total = result.get("total", {}) self.app.log( f"Differ finished: added={total.get('added',0)} deleted={total.get('deleted',0)} modified={total.get('modified',0)}", level="INFO", ) self.app.export_btn.config(state="normal") self.app._set_phase("Idle") self.app._current_task_id = None # Hide metrics toggle (not relevant for differ) self.app.metrics_toggle_frame.grid_remove() self.app._enable_action_buttons() # Show summary dialog first (non-blocking) self._show_differ_summary_with_baseline_prompt( result, bm, project, ignore_patterns, profile_name, max_keep, _on_create_done, ) except Exception as e: messagebox.showerror("Differ Error", str(e)) self.app._set_phase("Idle") self.app._enable_action_buttons() # If no baseline exists, create the first baseline without diffing if not baselines: self.app.log("No baseline found: creating first baseline", level="INFO") messagebox.showinfo( "Differing", "No baseline found for this project.\n\nCreating the first baseline now.\n\nAfter this, you can run Differ again to compare changes.", ) try: self.app._set_phase("Creating first baseline...") self.app._current_task_id = self.app.worker.submit( bm.create_baseline_from_dir, project, None, True, True, ignore_patterns, profile_name, max_keep, kind="thread", on_done=lambda baseline_id: self._on_first_baseline_created( baseline_id ), ) self.app.cancel_btn.config(state="normal") except Exception as e: messagebox.showerror( "Create Baseline", f"Failed to create baseline: {e}" ) self.app._set_phase("Idle") self.app._enable_action_buttons() return # Select baseline to compare with chosen = self._select_baseline_dialog(baselines) if not chosen: self.app._set_phase("Idle") self.app._enable_action_buttons() return try: meta = bm.load_metadata(chosen) self.app.log(f"Loaded baseline: {chosen}", level="INFO") except Exception as e: messagebox.showerror("Differing", f"Failed to load baseline metadata: {e}") self.app._set_phase("Idle") self.app._enable_action_buttons() return # Setup baseline and current roots for diff viewer self._setup_differ_roots(bm, chosen, meta, project) # Build file lists and start diff baseline_files_dir = bm.get_baseline_files_dir(chosen) differ = Differ( meta, project, ignore_patterns=ignore_patterns, baseline_files_dir=baseline_files_dir, ) self.app._set_phase("Scanning current files...") self.app.update_idletasks() try: current_files = differ.build_current_file_list() except Exception: messagebox.showerror("Differing", "Failed to scan current project files") self.app._set_phase("Idle") self.app._enable_action_buttons() return self.app._set_phase("Building pairs...") self.app.update_idletasks() try: pairs = differ.match_files(differ.baseline.files, current_files) except Exception: messagebox.showerror("Differing", "Failed to build file pairs") self.app._set_phase("Idle") self.app._enable_action_buttons() return if not pairs: messagebox.showinfo("Differing", "No files to compare") self.app._set_phase("Idle") self.app._enable_action_buttons() return # Process pairs in parallel self._run_differ_worker(differ, pairs, project, meta, _on_diff_done) def _on_first_baseline_created(self, baseline_id): """Callback after creating the very first baseline (no diff performed).""" try: self.app._set_phase("Idle") self.app._current_task_id = None self.app._enable_action_buttons() self.app.log(f"First baseline created: {baseline_id}", level="INFO") messagebox.showinfo( "Baseline Created", f"First baseline created successfully:\n\n{baseline_id}\n\nYou can now run Differ to see changes from this baseline.", ) except Exception: self.app._set_phase("Idle") self.app._current_task_id = None self.app._enable_action_buttons() def _run_differ_worker(self, differ, pairs, project, meta, on_diff_done): """Start background worker to process differ pairs.""" from ..core.countings_impl import analyze_file_counts from ..core.metrics import analyze_file_metrics self.app._processed_files = 0 try: self.app.progress["maximum"] = max(1, len(pairs)) self.app.progress["value"] = 0 self.app._lbl_files.config(text=f"Files: 0/{len(pairs)}") except Exception: pass self.app._set_phase(f"Comparing {len(pairs)} file pairs...") self.app.update_idletasks() # Process pair function def _process_pair(pair): a, b = pair res = { "fileA": a.path if a is not None else None, "fileB": b.path if b is not None else None, "counts": None, "file_meta": None, "baseline_countings": None, "current_countings": None, "countings_delta": None, "baseline_metrics": None, "current_metrics": None, "metrics_delta": None, } # Determine file status: added, deleted, or modified # Check if both files exist and have same SHA1 hash if a is None and b is not None: # File added res["counts"] = { "added": 1, "deleted": 0, "modified": 0, "unmodified": 0, } elif a is not None and b is None: # File deleted res["counts"] = { "added": 0, "deleted": 1, "modified": 0, "unmodified": 0, } elif a is not None and b is not None: # Both exist: check if modified (compare SHA1 hash) # Compute current file SHA1 if not already present if not hasattr(b, "sha1") or b.sha1 is None: try: import hashlib file_path = Path(os.path.join(project, b.path)) h = hashlib.sha1() with file_path.open("rb") as f: for chunk in iter(lambda: f.read(8192), b""): h.update(chunk) b.sha1 = h.hexdigest() except Exception: b.sha1 = None # Compare hashes if hasattr(a, "sha1") and a.sha1 and b.sha1 and a.sha1 == b.sha1: # Identical files res["counts"] = { "added": 0, "deleted": 0, "modified": 0, "unmodified": 1, } else: # Modified file res["counts"] = { "added": 0, "deleted": 0, "modified": 1, "unmodified": 0, } else: # Both None (shouldn't happen) res["counts"] = { "added": 0, "deleted": 0, "modified": 0, "unmodified": 0, } # Extract baseline countings and metrics if a is not None: if hasattr(a, "countings") and a.countings: res["baseline_countings"] = a.countings if hasattr(a, "metrics") and a.metrics: res["baseline_metrics"] = a.metrics # Extract current countings and metrics if b is not None: # Compute on-the-fly if missing need_countings = not (hasattr(b, "countings") and b.countings) need_metrics = not (hasattr(b, "metrics") and b.metrics) if need_countings: try: counts_result = analyze_file_counts( Path(os.path.join(project, b.path)) ) b.countings = { "physical_lines": counts_result.get("physical_lines", 0), "code_lines": counts_result.get("code_lines", 0), "comment_lines": counts_result.get("comment_lines", 0), "blank_lines": counts_result.get("blank_lines", 0), } except Exception: b.countings = None if need_metrics: try: metrics_result = analyze_file_metrics( Path(os.path.join(project, b.path)) ) b.metrics = { "func_count": metrics_result.get("func_count", 0), "avg_cc": metrics_result.get("avg_cc", 0.0), "max_cc": metrics_result.get("max_cc", 0), "mi": metrics_result.get("mi", 0.0), } except Exception: b.metrics = None file_entry = { "path": b.path, "size": b.size, "mtime": b.mtime, "sha1": b.sha1, } if hasattr(b, "countings") and b.countings: file_entry["countings"] = b.countings res["current_countings"] = b.countings else: file_entry["countings"] = None if hasattr(b, "metrics") and b.metrics: file_entry["metrics"] = b.metrics res["current_metrics"] = b.metrics else: file_entry["metrics"] = None res["file_meta"] = file_entry # Compute deltas if res["baseline_countings"] and res["current_countings"]: res["countings_delta"] = { "physical_lines": res["current_countings"]["physical_lines"] - res["baseline_countings"].get("physical_lines", 0), "code_lines": res["current_countings"]["code_lines"] - res["baseline_countings"].get("code_lines", 0), "comment_lines": res["current_countings"]["comment_lines"] - res["baseline_countings"].get("comment_lines", 0), "blank_lines": res["current_countings"]["blank_lines"] - res["baseline_countings"].get("blank_lines", 0), # UCC extended deltas "comment_whole": res["current_countings"].get("comment_whole", 0) - res["baseline_countings"].get("comment_whole", 0), "comment_embedded": res["current_countings"].get( "comment_embedded", 0 ) - res["baseline_countings"].get("comment_embedded", 0), "compiler_directives": res["current_countings"].get( "compiler_directives", 0 ) - res["baseline_countings"].get("compiler_directives", 0), "data_declarations": res["current_countings"].get( "data_declarations", 0 ) - res["baseline_countings"].get("data_declarations", 0), "exec_instructions": res["current_countings"].get( "exec_instructions", 0 ) - res["baseline_countings"].get("exec_instructions", 0), "logical_sloc": res["current_countings"].get("logical_sloc", 0) - res["baseline_countings"].get("logical_sloc", 0), "physical_sloc": res["current_countings"].get("physical_sloc", 0) - res["baseline_countings"].get("physical_sloc", 0), } if res["baseline_metrics"] and res["current_metrics"]: res["metrics_delta"] = { "func_count": res["current_metrics"]["func_count"] - res["baseline_metrics"].get("func_count", 0), "avg_cc": res["current_metrics"]["avg_cc"] - res["baseline_metrics"].get("avg_cc", 0.0), "max_cc": res["current_metrics"]["max_cc"] - res["baseline_metrics"].get("max_cc", 0), "mi": res["current_metrics"]["mi"] - res["baseline_metrics"].get("mi", 0.0), } # Compute metrics delta when one side missing (treat missing as zero) if res.get("metrics_delta") is None: base_m = res.get("baseline_metrics") cur_m = res.get("current_metrics") if base_m is None and cur_m is not None: # additions: delta == current res["metrics_delta"] = { "func_count": cur_m.get("func_count", 0), "avg_cc": cur_m.get("avg_cc", 0.0), "max_cc": cur_m.get("max_cc", 0), "mi": cur_m.get("mi", 0.0), } elif cur_m is None and base_m is not None: # deletions: delta == 0 - baseline res["metrics_delta"] = { "func_count": 0 - base_m.get("func_count", 0), "avg_cc": 0.0 - base_m.get("avg_cc", 0.0), "max_cc": 0 - base_m.get("max_cc", 0), "mi": 0.0 - base_m.get("mi", 0.0), } return res # Batch buffer for GUI updates _batch_buffer = [] _batch_size = 50 # Progress handler def _on_progress(item_res): try: # Columns: Current (codebase) and Baseline baseline = item_res.get("fileA") or "" current = item_res.get("fileB") or "" counts = item_res.get("counts") or {} countings_delta = item_res.get("countings_delta") metrics_delta = item_res.get("metrics_delta") # If deltas are not provided, compute simple deltas when one side is missing if countings_delta is None: base = item_res.get("baseline_countings") cur = item_res.get("current_countings") if base is None and cur is not None: # additions: delta == current countings_delta = { "physical_lines": cur.get("physical_lines", 0), "code_lines": cur.get("code_lines", 0), "comment_lines": cur.get("comment_lines", 0), "blank_lines": cur.get("blank_lines", 0), "comment_whole": cur.get("comment_whole", 0), "comment_embedded": cur.get("comment_embedded", 0), "compiler_directives": cur.get("compiler_directives", 0), "data_declarations": cur.get("data_declarations", 0), "exec_instructions": cur.get("exec_instructions", 0), "logical_sloc": cur.get("logical_sloc", 0), "physical_sloc": cur.get("physical_sloc", 0), } elif cur is None and base is not None: # deletions: delta == 0 - baseline countings_delta = { "physical_lines": 0 - base.get("physical_lines", 0), "code_lines": 0 - base.get("code_lines", 0), "comment_lines": 0 - base.get("comment_lines", 0), "blank_lines": 0 - base.get("blank_lines", 0), "comment_whole": 0 - base.get("comment_whole", 0), "comment_embedded": 0 - base.get("comment_embedded", 0), "compiler_directives": 0 - base.get("compiler_directives", 0), "data_declarations": 0 - base.get("data_declarations", 0), "exec_instructions": 0 - base.get("exec_instructions", 0), "logical_sloc": 0 - base.get("logical_sloc", 0), "physical_sloc": 0 - base.get("physical_sloc", 0), } # Format delta values with explicit sign (include + for zero/positive) def format_delta(value, is_float=False): if value is None: return "" if is_float: formatted = f"{abs(value):.2f}" sign = "+" if value >= 0 else "-" return f"{sign}{formatted}" else: ival = int(value) formatted = str(abs(ival)) sign = "+" if ival >= 0 else "-" return f"{sign}{formatted}" # Format metrics: show absolute current value with delta in parentheses (only when baseline exists) def format_metric( current_val, delta_val, baseline_exists, is_float=False ): if current_val is None: return "" # Show delta in parentheses when baseline exists (even if delta is zero) if baseline_exists and delta_val is not None: # Show absolute value with delta in parentheses if is_float: abs_str = f"{current_val:.2f}" delta_str = format_delta(delta_val, is_float=True) else: abs_str = str(int(current_val)) delta_str = format_delta(delta_val, is_float=False) return f"{abs_str} ({delta_str})" else: # No baseline: show only absolute value if is_float: return f"{current_val:.2f}" else: return str(int(current_val)) delta_code = format_delta( countings_delta.get("code_lines") if countings_delta else None ) delta_comment = format_delta( countings_delta.get("comment_lines") if countings_delta else None ) delta_blank = format_delta( countings_delta.get("blank_lines") if countings_delta else None ) # UCC extended deltas delta_cmt_whole = format_delta( countings_delta.get("comment_whole") if countings_delta else None ) delta_cmt_embed = format_delta( countings_delta.get("comment_embedded") if countings_delta else None ) delta_directive = format_delta( countings_delta.get("compiler_directives") if countings_delta else None ) delta_data_decl = format_delta( countings_delta.get("data_declarations") if countings_delta else None ) delta_exec_inst = format_delta( countings_delta.get("exec_instructions") if countings_delta else None ) delta_logical = format_delta( countings_delta.get("logical_sloc") if countings_delta else None ) delta_physical = format_delta( countings_delta.get("physical_sloc") if countings_delta else None ) # Metrics: show absolute current value with delta in parentheses (only when baseline exists) # Check if baseline exists by verifying baseline_id is not "__empty__" baseline_exists = meta.baseline_id != "__empty__" cur_metrics = item_res.get("current_metrics") delta_func = format_metric( cur_metrics.get("func_count") if cur_metrics else None, metrics_delta.get("func_count") if metrics_delta else None, baseline_exists, ) delta_cc = format_metric( cur_metrics.get("avg_cc") if cur_metrics else None, metrics_delta.get("avg_cc") if metrics_delta else None, baseline_exists, is_float=True, ) delta_mi = format_metric( cur_metrics.get("mi") if cur_metrics else None, metrics_delta.get("mi") if metrics_delta else None, baseline_exists, is_float=True, ) # Add to batch buffer with metadata for tag application row_data = { "values": ( current, baseline, counts.get("added", 0), counts.get("deleted", 0), counts.get("modified", 0), counts.get("unmodified", 0), delta_code, delta_comment, delta_blank, delta_func, delta_cc, delta_mi, delta_cmt_whole, delta_cmt_embed, delta_directive, delta_data_decl, delta_exec_inst, delta_logical, delta_physical, ), "countings_delta": countings_delta, "metrics_delta": metrics_delta, "counts": counts, } _batch_buffer.append(row_data) # Insert batch when buffer is full if len(_batch_buffer) >= _batch_size: for row_data in _batch_buffer: item_id = self.app.results_tree.insert( "", "end", values=row_data["values"] ) # Apply color tags based on deltas and counts self._apply_delta_tags( item_id, row_data["countings_delta"], row_data["metrics_delta"], row_data.get("counts"), ) _batch_buffer.clear() # Update progress (less frequent) try: self.app._processed_files = ( getattr(self.app, "_processed_files", 0) + 1 ) if ( self.app._processed_files % 10 == 0 or self.app._processed_files == len(pairs) ): self.app._lbl_files.config( text=f"Files: {self.app._processed_files}/{len(pairs)}" ) self.app.progress["maximum"] = max(1, len(pairs)) self.app.progress["value"] = self.app._processed_files except Exception: pass except Exception: pass # Done handler def _on_done(all_results): # Flush remaining batch buffer if _batch_buffer: for row_data in _batch_buffer: item_id = self.app.results_tree.insert( "", "end", values=row_data["values"] ) # Apply color tags based on deltas and counts self._apply_delta_tags( item_id, row_data["countings_delta"], row_data["metrics_delta"], row_data.get("counts"), ) _batch_buffer.clear() # Compute totals total = {"added": 0, "deleted": 0, "modified": 0, "unmodified": 0} current_files_list = [] for it in all_results: c = it.get("counts", {}) total["added"] += c.get("added", 0) total["deleted"] += c.get("deleted", 0) total["modified"] += c.get("modified", 0) total["unmodified"] += c.get("unmodified", 0) fm = it.get("file_meta") if fm: current_files_list.append(fm) result = { "baseline_id": meta.baseline_id, "compared_at": time.time(), "total": total, "pairs": all_results, } # Build summary statistics try: baseline_counts = { "physical_lines": 0, "code_lines": 0, "comment_lines": 0, "blank_lines": 0, "file_count": 0, } baseline_metrics = { "file_count": 0, "total_func_count": 0, "avg_avg_cc": 0.0, "avg_mi": 0.0, } baseline_metrics_count = 0 for fm in meta.files: if hasattr(fm, "countings") and fm.countings: baseline_counts["physical_lines"] += fm.countings.get( "physical_lines", 0 ) baseline_counts["code_lines"] += fm.countings.get( "code_lines", 0 ) baseline_counts["comment_lines"] += fm.countings.get( "comment_lines", 0 ) baseline_counts["blank_lines"] += fm.countings.get( "blank_lines", 0 ) baseline_counts["file_count"] += 1 if hasattr(fm, "metrics") and fm.metrics: baseline_metrics["total_func_count"] += fm.metrics.get( "func_count", 0 ) baseline_metrics["avg_avg_cc"] += fm.metrics.get("avg_cc", 0.0) baseline_metrics["avg_mi"] += fm.metrics.get("mi", 0.0) baseline_metrics["file_count"] += 1 baseline_metrics_count += 1 if baseline_metrics_count > 0: baseline_metrics["avg_avg_cc"] /= baseline_metrics_count baseline_metrics["avg_mi"] /= baseline_metrics_count current_counts = { "physical_lines": 0, "code_lines": 0, "comment_lines": 0, "blank_lines": 0, "file_count": 0, } current_metrics = { "file_count": 0, "total_func_count": 0, "avg_avg_cc": 0.0, "avg_mi": 0.0, } current_metrics_count = 0 for fm in current_files_list: c = fm.get("countings") if c: current_counts["physical_lines"] += c.get("physical_lines", 0) current_counts["code_lines"] += c.get("code_lines", 0) current_counts["comment_lines"] += c.get("comment_lines", 0) current_counts["blank_lines"] += c.get("blank_lines", 0) current_counts["file_count"] += 1 m = fm.get("metrics") if m: current_metrics["total_func_count"] += m.get("func_count", 0) current_metrics["avg_avg_cc"] += m.get("avg_cc", 0.0) current_metrics["avg_mi"] += m.get("mi", 0.0) current_metrics["file_count"] += 1 current_metrics_count += 1 if current_metrics_count > 0: current_metrics["avg_avg_cc"] /= current_metrics_count current_metrics["avg_mi"] /= current_metrics_count result["summary"] = { "baseline": { "countings": baseline_counts, "metrics": baseline_metrics, }, "current": { "countings": current_counts, "metrics": current_metrics, }, } except Exception: pass self.app._current_results = result on_diff_done(result) self.app._current_task_id = self.app.worker.map_iterable( _process_pair, pairs, kind="thread", on_progress=_on_progress, on_done=_on_done, ) self.app.cancel_btn.config(state="normal") def handle_duplicates(self): """Execute duplicates action: find duplicate files in project.""" self.app.log("Action: Duplicates started", level="INFO") self.app._set_phase("Finding duplicates...") self.app._current_mode = "duplicates" self.app._current_action = "duplicates" # Disable action buttons during operation self.app._disable_action_buttons() # Reset UI self._reset_progress_ui() tooltips = { "file_a": "First file in duplicate pair", "file_b": "Second file in duplicate pair", "match_type": "Match type: exact or fuzzy", "pct_change": "Estimated percent of changed lines (for fuzzy matches)", } self.app._set_results_columns( ("file_a", "file_b", "match_type", "pct_change"), tooltips=tooltips ) self._clear_results() self.app.update_idletasks() paths, allowed_exts, ignore_patterns, pr = ( self.app._resolve_profile_and_filters() ) if not paths: messagebox.showwarning( "Missing path", "Please select a folder or profile to analyze first." ) return # Store project root for report generation self.app._last_scan_root = paths[0] if paths else None # Load last-used settings (or defaults) saved = app_settings.get_duplicates_settings() or {} init = { "threshold": saved.get("threshold", 5.0), # Prefer profile-provided extensions if available, otherwise use saved "extensions": ( list(allowed_exts) if allowed_exts else saved.get("extensions", None) ), "k": saved.get("k", 25), "window": saved.get("window", 4), } # Determine whether extensions should be editable: if the current profile # provided allowed_exts, we should not allow editing here (they're fixed). allow_edit = not bool(allowed_exts) # Show modal dialog to let user confirm/modify parameters # If profile defines extensions, present them but disable editing. dlg = DuplicatesDialog(self.app, initial=init, allow_edit_extensions=allow_edit) self.app.wait_window(dlg) if not getattr(dlg, "result", None): # user cancelled self.app._set_phase("Idle") self.app._enable_action_buttons() return params = dlg.result # Persist chosen settings for next time try: app_settings.set_duplicates_settings( threshold=params.get("threshold", 5.0), extensions=params.get("extensions"), k=params.get("k", 25), window=params.get("window", 4), ) except Exception: pass # Run duplicate finder in background with selected params def _dup_task(root, exts, threshold, k, window, ignore_pats): from ..core import duplicates as dupmod from ..core.scanner import find_source_files # compute file list according to current profile/ignore rules try: files = find_source_files( Path(root), allowed_extensions=exts, ignore_patterns=ignore_pats ) file_list = [str(p) for p in files] except Exception: file_list = None return dupmod.find_duplicates_in_dir( root=root, extensions=exts, dup_threshold=threshold, k=k, window=window, file_list=file_list, ) def _on_done(result): try: # result is dict with keys 'exact' and 'fuzzy', each a list of (a,b) exact = result.get("exact", []) if isinstance(result, dict) else [] fuzzy = result.get("fuzzy", []) if isinstance(result, dict) else [] # store results cache for export/report rows = [] for a, b in exact: rows.append( { "file_a": a, "file_b": b, "match_type": "exact", "pct_change": 0, } ) for a, b in fuzzy: rows.append( { "file_a": a, "file_b": b, "match_type": "fuzzy", "pct_change": "<=5%", } ) # Populate tree for r in rows: try: self.app.results_tree.insert( "", "end", values=( r["file_a"], r["file_b"], r["match_type"], r["pct_change"], ), ) except Exception: pass # Save cache for export and report. Include params so reports are reproducible. self.app._results_cache = rows # store both raw results and params used self.app._current_results = {"duplicates": result, "params": params} self.app._set_phase("Idle") self.app._current_task_id = None self.app._enable_action_buttons() except Exception as e: messagebox.showerror("Duplicates", f"Error processing duplicates: {e}") self.app._set_phase("Idle") self.app._enable_action_buttons() exts = list(allowed_exts) if allowed_exts else None self.app._current_task_id = self.app.worker.submit( _dup_task, paths[0], exts, params.get("threshold", 5.0), params.get("k", 25), params.get("window", 4), ignore_patterns, kind="thread", on_done=_on_done, ) self.app.cancel_btn.config(state="normal") def _setup_differ_roots(self, bm, chosen, meta, project): """Setup baseline and current roots for diff viewer.""" import zipfile baseline_dir = bm._baseline_dir(chosen) baseline_snapshot_dir = os.path.join(baseline_dir, "files") baseline_zip = os.path.join(baseline_dir, "files.zip") if os.path.isdir(baseline_snapshot_dir): self.app._differ_baseline_root = baseline_snapshot_dir elif os.path.exists(baseline_zip): try: with zipfile.ZipFile(baseline_zip, "r") as zip_ref: zip_ref.extractall(baseline_snapshot_dir) self.app._differ_baseline_root = baseline_snapshot_dir except Exception as e: self.app.log( f"Failed to extract baseline snapshot: {e}", level="WARNING" ) self.app._differ_baseline_root = meta.project_root else: self.app.log( "No baseline snapshot found, using project_root (may be inaccurate)", level="WARNING", ) self.app._differ_baseline_root = meta.project_root self.app._differ_current_root = project def _select_baseline_dialog(self, baselines): """Show modal dialog to select a baseline.""" sorted_b = sorted(baselines) dlg = tk.Toplevel(self.app) dlg.title("Select baseline to compare") dlg.transient(self.app) dlg.grab_set() # Center dialog dlg.update_idletasks() pw = self.app.winfo_width() ph = self.app.winfo_height() px = self.app.winfo_rootx() py = self.app.winfo_rooty() dw = dlg.winfo_reqwidth() dh = dlg.winfo_reqheight() x = px + (pw - dw) // 2 y = py + (ph - dh) // 2 dlg.geometry(f"+{x}+{y}") ttk.Label(dlg, text="Select a baseline:").grid( row=0, column=0, padx=12, pady=(12, 4), sticky="w" ) listbox = tk.Listbox( dlg, height=min(10, len(sorted_b)), width=60, exportselection=False ) for item in sorted_b: listbox.insert("end", item) listbox.grid(row=1, column=0, padx=12, pady=(0, 12)) listbox.select_set(len(sorted_b) - 1) # Select latest selected = {"id": None} def _on_ok(): sel = listbox.curselection() if not sel: messagebox.showwarning( "Select baseline", "Please select a baseline to compare" ) return selected["id"] = listbox.get(sel[0]) dlg.destroy() def _on_cancel(): selected["id"] = None dlg.destroy() btn_frame = ttk.Frame(dlg) btn_frame.grid(row=2, column=0, pady=(0, 12), sticky="e", padx=12) ok_btn = ttk.Button(btn_frame, text="✅ OK", command=_on_ok) ok_btn.grid(row=0, column=0, padx=(0, 8)) cancel_btn = ttk.Button(btn_frame, text="❌ Cancel", command=_on_cancel) cancel_btn.grid(row=0, column=1) def _on_dbl(evt): idx = listbox.curselection() if idx: selected["id"] = listbox.get(idx[0]) dlg.destroy() listbox.bind("", _on_dbl) self.app.wait_window(dlg) return selected.get("id") def _configure_tree_tags(self): """Configure Treeview tags for coloring delta values.""" # Positive changes (increases) - green tones with light green background self.app.results_tree.tag_configure( "positive", foreground="#006600", background="#e8f5e8", font=("TkDefaultFont", 9, "bold"), ) # Negative changes (decreases) - red tones with light red background self.app.results_tree.tag_configure( "negative", foreground="#cc0000", background="#ffe8e8", font=("TkDefaultFont", 9, "bold"), ) # Zero/neutral - default gray self.app.results_tree.tag_configure( "neutral", foreground="#666666", background="#f5f5f5" ) def _apply_delta_tags(self, item_id, countings_delta, metrics_delta, counts=None): """ Apply color tags to tree item based on delta values. Args: item_id: Treeview item identifier countings_delta: dict with code/comment/blank line deltas metrics_delta: dict with func_count/avg_cc/mi deltas """ # Determine overall change direction for visual feedback tags = [] # Check countings deltas if countings_delta: code_delta = countings_delta.get("code_lines", 0) if code_delta > 0: tags.append("positive") elif code_delta < 0: tags.append("negative") # Also consider simple added/modified/deleted counts as differences # and translate them into visual tags. Prefer "negative" for deletions # or modifications, and "positive" for additions. if counts: try: added = int(counts.get("added", 0)) deleted = int(counts.get("deleted", 0)) modified = int(counts.get("modified", 0)) except Exception: added = deleted = modified = 0 if deleted > 0 or modified > 0: if "negative" not in tags: tags.append("negative") elif added > 0: if "positive" not in tags: tags.append("positive") # Check metrics deltas (complexity increase is "negative", MI decrease is "negative") if metrics_delta: cc_delta = metrics_delta.get("avg_cc", 0) mi_delta = metrics_delta.get("mi", 0) # Complexity increase is bad (red) if cc_delta > 0.5: if "negative" not in tags: tags.append("negative") # MI decrease is bad (red) elif mi_delta < -5: if "negative" not in tags: tags.append("negative") if tags: self.app.results_tree.item(item_id, tags=tags) def _gather_source_files(self, paths, allowed_exts, ignore_patterns): """Gather source files from given paths.""" files = [] for p in paths: pth = Path(p) try: if pth.is_dir(): files.extend( find_source_files( pth, allowed_extensions=allowed_exts, ignore_patterns=ignore_patterns, ) ) elif pth.is_file(): files.append(pth) except Exception: continue # Deduplicate seen = set() unique = [] for f in files: s = str(f) if s not in seen: seen.add(s) unique.append(f) return unique def _reset_progress_ui(self): """Reset progress counters and UI elements.""" self.app._processed_files = 0 self.app._total_files = 0 try: self.app.progress["maximum"] = 1 self.app.progress["value"] = 0 self.app._lbl_files.config(text="Files: 0/0") except Exception: pass def _clear_results(self): """Clear results tree.""" for c in self.app.results_tree.get_children(""): self.app.results_tree.delete(c) def _show_differ_summary_with_baseline_prompt( self, result, bm, project, ignore_patterns, profile_name, max_keep, on_create_done_callback, ): """Show summary dialog and then ask if user wants to create a new baseline.""" import subprocess dlg = tk.Toplevel(self.app) dlg.title("Differ Summary") dlg.geometry("700x650") dlg.transient(self.app) # Center dialog dlg.update_idletasks() pw = self.app.winfo_width() ph = self.app.winfo_height() px = self.app.winfo_rootx() py = self.app.winfo_rooty() dw = 700 dh = 650 x = px + (pw - dw) // 2 y = py + (ph - dh) // 2 dlg.geometry(f"{dw}x{dh}+{x}+{y}") # Title title_frame = ttk.Frame(dlg) title_frame.pack(fill="x", padx=10, pady=10) ttk.Label( title_frame, text="Differ Summary", font=("Arial", 12, "bold"), ).pack() # Text widget with scrollbar for summary text_frame = ttk.Frame(dlg) text_frame.pack(fill="both", expand=True, padx=10, pady=(0, 10)) scrollbar = ttk.Scrollbar(text_frame) scrollbar.pack(side="right", fill="y") summary_text = tk.Text( text_frame, wrap="none", font=("Courier", 9), yscrollcommand=scrollbar.set, height=25, width=80, ) summary_text.pack(side="left", fill="both", expand=True) scrollbar.config(command=summary_text.yview) # Generate summary content baseline_id = result.get("baseline_id", "unknown") summary_lines = self._generate_summary_text(result, baseline_id) summary_text.insert("1.0", "\n".join(summary_lines)) summary_text.config(state="disabled") # Store summary for clipboard summary_content = "\n".join(summary_lines) # Buttons frame at bottom btn_frame = ttk.Frame(dlg) btn_frame.pack(fill="x", padx=10, pady=(0, 10)) def copy_to_clipboard(): self.app.clipboard_clear() self.app.clipboard_append(summary_content) messagebox.showinfo("Copied", "Summary copied to clipboard!", parent=dlg) def close_and_ask_baseline(): dlg.destroy() # Ask user if they want to save as new baseline response = messagebox.askyesno( "Save as Baseline", "Do you want to save the current state as a new baseline?\n\n" "• Yes: Create a new baseline for future comparisons\n" "• No: Keep only the analysis results without saving", icon="question", ) if response: # User chose to create a new baseline try: self.app._set_phase("Creating baseline...") self.app._disable_action_buttons() self.app._current_task_id = self.app.worker.submit( bm.create_baseline_from_dir, project, None, True, True, ignore_patterns, profile_name, max_keep, kind="thread", on_done=on_create_done_callback, ) self.app.cancel_btn.config(state="normal") except Exception as e: self.app.log(f"Failed to create baseline: {e}", level="ERROR") self.app._set_phase("Idle") self.app._current_task_id = None self.app._enable_action_buttons() else: # User chose not to create a baseline self.app.log( "Differ analysis completed without creating new baseline", level="INFO", ) ttk.Button( btn_frame, text="📋 Copy to Clipboard", command=copy_to_clipboard ).pack(side="left", padx=5) ttk.Button(btn_frame, text="✅ Close", command=close_and_ask_baseline).pack( side="right", padx=5 ) def _show_differ_summary_dialog(self, result, baseline_id, baseline_dir): """Show summary dialog with differ results (after baseline creation).""" import subprocess dlg = tk.Toplevel(self.app) dlg.title("Differ Summary") dlg.geometry("700x600") dlg.transient(self.app) # Center dialog dlg.update_idletasks() pw = self.app.winfo_width() ph = self.app.winfo_height() px = self.app.winfo_rootx() py = self.app.winfo_rooty() dw = 700 dh = 600 x = px + (pw - dw) // 2 y = py + (ph - dh) // 2 dlg.geometry(f"{dw}x{dh}+{x}+{y}") # Title title_frame = ttk.Frame(dlg) title_frame.pack(fill="x", padx=10, pady=10) ttk.Label( title_frame, text=f"Differ Summary - {baseline_id}", font=("Arial", 12, "bold"), ).pack() # Text widget with scrollbar for summary text_frame = ttk.Frame(dlg) text_frame.pack(fill="both", expand=True, padx=10, pady=(0, 10)) scrollbar = ttk.Scrollbar(text_frame) scrollbar.pack(side="right", fill="y") summary_text = tk.Text( text_frame, wrap="none", font=("Courier", 9), yscrollcommand=scrollbar.set, height=30, width=80, ) summary_text.pack(side="left", fill="both", expand=True) scrollbar.config(command=summary_text.yview) # Generate summary content summary_lines = self._generate_summary_text(result, baseline_id) summary_text.insert("1.0", "\n".join(summary_lines)) summary_text.config(state="disabled") # Store summary for clipboard summary_content = "\n".join(summary_lines) # Buttons frame btn_frame = ttk.Frame(dlg) btn_frame.pack(fill="x", padx=10, pady=(0, 10)) def copy_to_clipboard(): self.app.clipboard_clear() self.app.clipboard_append(summary_content) messagebox.showinfo("Copied", "Summary copied to clipboard!", parent=dlg) def open_baseline_folder(): try: if os.path.exists(baseline_dir): subprocess.Popen(["explorer", baseline_dir]) else: messagebox.showwarning( "Not Found", f"Baseline folder not found:\n{baseline_dir}", parent=dlg, ) except Exception as e: messagebox.showerror( "Error", f"Failed to open folder:\n{e}", parent=dlg ) ttk.Button( btn_frame, text="📋 Copy to Clipboard", command=copy_to_clipboard ).pack(side="left", padx=5) ttk.Button( btn_frame, text="📂 Open Baseline Folder", command=open_baseline_folder ).pack(side="left", padx=5) ttk.Button(btn_frame, text="❌ Close", command=dlg.destroy).pack( side="right", padx=5 ) def _generate_summary_text(self, result, baseline_id): """Generate summary text lines for differ results.""" import datetime lines = [] lines.append("=" * 80) lines.append("PyUcc - Differ Summary".center(80)) lines.append("=" * 80) lines.append("") lines.append(f"Baseline ID: {baseline_id}") lines.append( f"Generated: {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}" ) lines.append("") # Summary Statistics lines.append("-" * 80) lines.append("Summary Statistics") lines.append("-" * 80) total = result.get("total", {}) lines.append(f"Files Added: {total.get('added', 0):>8}") lines.append(f"Files Deleted: {total.get('deleted', 0):>8}") lines.append(f"Files Modified: {total.get('modified', 0):>8}") lines.append(f"Files Unmodified: {total.get('unmodified', 0):>8}") lines.append("─" * 30) total_files = sum( [ total.get("added", 0), total.get("deleted", 0), total.get("modified", 0), total.get("unmodified", 0), ] ) lines.append(f"Total Files: {total_files:>8}") lines.append("") # Code Metrics Comparison summary = result.get("summary", {}) if summary: baseline_counts = summary.get("baseline", {}).get("countings", {}) current_counts = summary.get("current", {}).get("countings", {}) baseline_metrics = summary.get("baseline", {}).get("metrics", {}) current_metrics = summary.get("current", {}).get("metrics", {}) lines.append("-" * 80) lines.append("Code Metrics Comparison") lines.append("-" * 80) lines.append( f"{'Metric':<30} {'Baseline':>15} {'Current':>15} {'Delta':>15}" ) lines.append("─" * 80) def _delta_str(baseline_val, current_val): delta = current_val - baseline_val sign = "+" if delta >= 0 else "" return f"{sign}{delta}" bc_code = baseline_counts.get("code_lines", 0) cc_code = current_counts.get("code_lines", 0) lines.append( f"{'Code Lines':<30} {bc_code:>15,} {cc_code:>15,} {_delta_str(bc_code, cc_code):>15}" ) bc_comment = baseline_counts.get("comment_lines", 0) cc_comment = current_counts.get("comment_lines", 0) lines.append( f"{'Comment Lines':<30} {bc_comment:>15,} {cc_comment:>15,} {_delta_str(bc_comment, cc_comment):>15}" ) bc_blank = baseline_counts.get("blank_lines", 0) cc_blank = current_counts.get("blank_lines", 0) lines.append( f"{'Blank Lines':<30} {bc_blank:>15,} {cc_blank:>15,} {_delta_str(bc_blank, cc_blank):>15}" ) bc_physical = baseline_counts.get("physical_lines", 0) cc_physical = current_counts.get("physical_lines", 0) lines.append( f"{'Physical Lines':<30} {bc_physical:>15,} {cc_physical:>15,} {_delta_str(bc_physical, cc_physical):>15}" ) lines.append("") bm_func = baseline_metrics.get("total_func_count", 0) cm_func = current_metrics.get("total_func_count", 0) lines.append( f"{'Function Count':<30} {bm_func:>15,} {cm_func:>15,} {_delta_str(bm_func, cm_func):>15}" ) bm_avgcc = baseline_metrics.get("avg_avg_cc", 0.0) cm_avgcc = current_metrics.get("avg_avg_cc", 0.0) delta_avgcc = cm_avgcc - bm_avgcc sign_cc = "+" if delta_avgcc >= 0 else "" lines.append( f"{'Avg Cyclomatic Complexity':<30} {bm_avgcc:>15.2f} {cm_avgcc:>15.2f} {sign_cc}{delta_avgcc:>14.2f}" ) bm_mi = baseline_metrics.get("avg_mi", 0.0) cm_mi = current_metrics.get("avg_mi", 0.0) delta_mi = cm_mi - bm_mi sign_mi = "+" if delta_mi >= 0 else "" lines.append( f"{'Maintainability Index':<30} {bm_mi:>15.2f} {cm_mi:>15.2f} {sign_mi}{delta_mi:>14.2f}" ) lines.append("") lines.append("=" * 80) lines.append("End of Summary".center(80)) lines.append("=" * 80) return lines