PlatSim_Genova/TestEnvironment/env/leo_grifo_pdf2report.py

887 lines
37 KiB
Python

import json
import logging
import os
from datetime import date, datetime, timedelta
import time
import traceback
import sys
CWD = os.path.dirname(__file__)
sys.path.append(os.path.join(CWD,'site-packages'))
from fpdf import FPDF
from fpdf.fonts import FontFace
from fpdf.enums import TableCellFillMode
from fpdf.drawing import Line
class Document(FPDF):
RED = (255, 0, 0)
PALE_RED = (255, 94, 94)
GREEN = (0, 255, 0)
PALE_GREEN = (167, 217, 173)
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
def __init__(self, header_text=None, footer_text=None):
super().__init__()
self.chapter_no = 0
self.subchapter_no = 0
self.heading_sz = dict(enumerate((18, 16, 14, 12, 10), start=1))
self.header_text = header_text
self.footer_text = footer_text
self.family = 'times'
try:
self.add_font('georgia', '', r"c:\WINDOWS\Fonts\georgia.ttf")
self.add_font('georgia', 'B', r"c:\WINDOWS\Fonts\georgiab.ttf")
self.add_font('georgia', 'I', r"c:\WINDOWS\Fonts\georgiai.ttf")
self.add_font('georgia', 'BI', r"c:\WINDOWS\Fonts\georgiaz.ttf")
self.family = 'georgia'
except FileNotFoundError:
pass
self.reset_font()
def reset_font(self):
self.set_font(family=self.family, size=8)
def header(self):
if self.header_text is not None:
# self.set_font(size=12)
self.set_font(style='I', size=10)
logo = os.path.join(CWD,'images','Leonardo_small.png')
self.image(name=logo, x=10, y=14, w=30, keep_aspect_ratio=True)
sw=self.get_string_width(s=self.header_text)
self.text(x=(200 - sw), y=18, text=self.header_text)
self.line(x1=10,y1=21,x2=200,y2=21)
# self.cell(0, 10, border='B', align='L', link=self.image(r"c:\temp\Leonardo_small.png", w=30))
# self.cell(0, 10, self.header_text, border='B', align='R')
# with self.table() as table:
# row = table.row()
# row.cell(align='L', link=self.image(r"c:\temp\Leonardo_small.png", w=30))
# row.cell(self.header_text, align='C')
self.ln(20)
def footer(self):
if self.footer_text is not None:
self.set_y(-15)
self.set_font(style='I', size=8)
self.cell(0, 10, f'{self.footer_text} - Page {self.page_no()}', border='T', align='R')
def add_chapter(self, title, newpage=True):
self.chapter_no += 1
self.subchapter_no = 0
if newpage is True:
self.add_page()
self.add_heading(3, f'{self.chapter_no} {title}', 'L')
self.reset_font()
def add_subchapter(self, title):
self.subchapter_no += 1
self.add_heading(4, f'{self.chapter_no}.{self.subchapter_no} {title}', 'L')
self.reset_font()
def add_paragraph(self, text):
self.multi_cell(0, 5, text, align='J')
self.ln(10)
def add_single_line(self, text, alignment='L', markdown=False):
self.cell(0, 5, text, align=alignment, markdown=markdown)
self.ln(10)
def add_heading(self, level, text, alignment='L'):
sz = self.heading_sz.get(level, 10)
self.set_font(style='B', size=sz)
self.cell(0, 10, text, border='0', align=alignment)
self.ln(20)
self.reset_font()
class PDFClass:
def __init__(self, time='0'):
self.vq1_equipment_info = {}
self.radio_ip = ""
self.radio_model = ""
self.radio_sw_version = ""
self.execution_time = time
self.radio_serial_number = ""
self.session_index = 1
def start_doc(self, json_test: dict, head_foot: bool = True):
"""
This function creates a latex doc object, sets its geometric option and other configurations.
:param json_test: the dict containing template with results and constants.
:param head_foot: if True, will add a head if header key is in json_test. Will do nothing otherwise.
:return: latex document.
"""
header_text = json_test["header"] if head_foot else None
footer_text = json_test["footer"] if head_foot else None
doc = Document(header_text, footer_text)
return doc
def pdf_preamble(self, doc: Document, json_test: dict):
"""
This function defines the preamble of a latex document
:param doc: latex document created by start_doc.
:param json_test: the dict containing template with results and constants.
"""
test_title = json_test["title"]
test_author = 'Test Report'
today = datetime.today().strftime('%d/%m/%Y - %H:%M')
doc.set_title(test_title)
doc.set_author(test_author)
doc.add_page()
doc.ln(10)
doc.add_heading(1, test_title, 'C')
doc.set_font(size=12)
doc.cell(0, 10, f'{test_author}', align='C')
doc.ln(10)
doc.cell(0, 10, f'{today}', align='C')
doc.reset_font()
def pdf_name(self, json_test: dict, new_path: str):
"""
This creates a test case name depending on the current time.
:param json_test: json config file for the actual test case given by the function get_config.
:param new_path: path where the executer folder is located.
:return: file path.
"""
logging.info("Setting PDF name")
# Use test ID as base name (falls back to file_name) and include seconds
now = datetime.now()
date_name = now.strftime("%Y%m%d_%H%M%S")
base = json_test.get('ID') or json_test.get('file_name') or 'testReport'
# PDF will use suffix '_report' so final file is <base>_<timestamp>_report.pdf
file_name_only = f"{base}_{date_name}_report"
file_name = os.path.join(new_path, file_name_only)
logging.info(f"Test: {file_name}")
return file_name
def pdf_test_information_section(self, doc: Document, json_test: dict):
"""
This function gets the receiver configuration and builds the Test Information chapter.
:param doc: latex document created by start_doc.
:param json_test: the dict containing template with results and constants.
"""
possible_info = {"radio_ip": "NaN", "radio_model": "NaN", "radio_sw_version": "NaN"}
serial_number = "Not found"
if self.radio_serial_number != "":
serial_number = self.radio_serial_number
rows = [
('**ID**', json_test.get("ID", ''), '**Requirement**', json_test.get("requirement", '')),
('**Strategy**', json_test.get("strategy", ''), '**Technique**', json_test.get("technique", '')),
('**Level**', json_test.get("level", ''), '**Setup**', json_test.get("setup", '')),
('**Author**', json_test.get("author", ''), '**Reviewer**', json_test.get("reviewer", '')),
('**Equipment SN**', serial_number, '**Execution Time**', f'{self.execution_time}')
]
# If the test JSON contains a script version (injected by the runner), show it
if 'script_version' in json_test:
try:
rows.append(('**Script Version**', json_test.get('script_version', ''), '', ''))
except Exception:
pass
rows = tuple(rows)
doc.ln(40)
doc.add_chapter('Test Information', False)
doc.set_font(size=10)
with doc.table(
borders_layout="INTERNAL",
col_widths=(60, 60, 60, 60),
markdown=True,
headings_style=FontFace(),
line_height=6,
text_align=("LEFT", "CENTER", "LEFT", "CENTER"),
width=180) as table:
for data_row in rows:
table.row(data_row)
if "$" not in json_test["description"]:
logging.info("Creating row description")
row = table.row()
row.cell("**Description**")
row.cell(json_test["description"], colspan=3)
doc.ln(10)
def pdf_step_execution(self, step_list: list, doc: Document):
try:
logging.info("Writing step results")
doc.ln(10)
doc.add_subchapter('Step execution and results')
doc.set_font(size=9)
name_step = 'Step '
step_nb = 0
with doc.table(
borders_layout="INTERNAL",
col_widths=(30, 90, 60, 60),
headings_style=FontFace(emphasis="BOLD", size_pt=10),
text_align=("LEFT", "LEFT", "LEFT", "CENTER"),
width=180) as table:
table.row(('Step', 'Description', 'Result', 'Error'))
for step in step_list:
step_nb = step_nb + 1
description = step["desc"]
naming = name_step + str(step_nb)
err_result = ""
ex_result = ""
color = doc.WHITE
if (step.get("result"), step.get("expected")) == (None, None):
color = doc.WHITE
naming = ''
step_nb = step_nb - 1
else:
if not step.get("result"):
color = doc.PALE_RED
elif step.get("expected"):
color = doc.PALE_GREEN
style = FontFace(fill_color=color)
if step.get("expected"):
ex_result = step["expected"].replace("var_", "", 1)
if step.get("error"):
err_result = str(step["error"]) # step["error"].replace("var_", "", 1)
row = table.row()
for c in (naming, description, ex_result, err_result):
row.cell(c, style=style)
doc.ln(10)
return True
except Exception as e:
logging.error(e)
logging.exception("Error message:")
traceback.print_exc()
return False
def pdf_step_result(self, name, passed, failure, doc: Document):
tot = passed + failure
fail = 0
if tot > 0:
fail = 100 * failure / tot
session_name = 'Test {}: {}'.format(self.session_index, name)
self.session_index = self.session_index + 1
if failure > 0:
line_color = doc.PALE_RED
else:
line_color = doc.PALE_GREEN
rows = (('Total STEP', 'PASS', 'FAIL', '% of Failure'),
(str(tot), str(passed), str(failure), '{:.02f}'.format(fail))
)
doc.add_chapter(session_name)
doc.set_font(size=9)
with doc.table(
borders_layout="INTERNAL",
cell_fill_color=line_color,
cell_fill_mode=TableCellFillMode.ROWS,
col_widths=(60, 60, 60, 60),
# headings_style=FontFace(emphasis="BOLD", size_pt=10, color=234, fill_color=(128, 128, 128)),
headings_style=FontFace(emphasis="BOLD", size_pt=10),
line_height=6,
text_align=("CENTER", "CENTER", "CENTER", "CENTER"),
width=180) as table:
for data_row in rows:
table.row(data_row)
return True
def pdf_step_summary(self, passed, failure, doc: Document):
tot = passed + failure
fail = 100 * failure / tot
session_name = 'Test Summary'
if failure > 0:
line_color = doc.PALE_RED
else:
line_color = doc.PALE_GREEN
rows = (('Total TEST', 'PASS', 'FAIL', '% of Failure'),
(str(tot), str(passed), str(failure), '{:.02f}'.format(fail))
)
doc.add_chapter(session_name)
doc.set_font(size=9)
with doc.table(
borders_layout="INTERNAL",
cell_fill_color=line_color,
cell_fill_mode=TableCellFillMode.ROWS,
col_widths=(60, 60, 60, 60),
# headings_style=FontFace(), # FontFace(emphasis="BOLD", color=234, fill_color=(128, 128, 128)),
headings_style=FontFace(emphasis="BOLD", size_pt=10),
line_height=6,
text_align=("CENTER", "CENTER", "CENTER", "CENTER"),
width=180) as table:
for data_row in rows:
table.row(data_row)
return True
def pdf_add_image(self, name, title=None, alt_text=None, width=0, height=0):
# self.add_page()
doc.image(name=name, title=title, alt_text=alt_text, keep_aspect_ratio=True, w=width, h=height)
def pdf_add_per_run_summary(self, doc: Document, repetitions: list):
"""
Generate professional per-run summary table with both 1553 and Serial statistics.
Creates a dedicated chapter with comprehensive per-run metrics including:
- Run status (PASS/FAIL)
- PBIT timing
- B6 status checks (Pass/Fail/Known)
- B8 diagnostic checks
- Serial communication (Errors/Fatal/Recycles)
Args:
doc: Document object for PDF generation
repetitions: List of per-run statistics dictionaries with keys:
- 'repetition': Run number
- 'success': Boolean pass/fail
- 'pbit_time': BIT completion time in seconds
- 'b6_pass', 'b6_fail', 'b6_known_fail': B6 check counts
- 'b8_checked', 'b8_pass', 'b8_fail': B8 check counts
- 'serial_total', 'serial_errors', 'serial_fatal', 'serial_recycles': Serial stats
This table appears as a standalone chapter, NOT mixed with step execution logs.
"""
doc.add_chapter('Per-Run Summary (1553 + Serial Statistics)')
doc.ln(5)
doc.set_font(size=9)
# Table header row (added Target column)
headers = ('Run', 'Status', 'PBIT\nTime', 'B6 Status\n(P/F/K)',
'B8 Checks\n(P/F/K)', 'Serial Messages\n(E/F/R)', 'Target', 'Target Test')
with doc.table(
borders_layout="ALL",
col_widths=(15, 20, 20, 30, 30, 30, 20, 20),
headings_style=FontFace(emphasis="BOLD", size_pt=9, fill_color=(200, 200, 200)),
line_height=5,
text_align=("CENTER", "CENTER", "CENTER", "CENTER", "CENTER", "CENTER", "CENTER", "CENTER"),
width=180) as table:
table.row(headers)
for run in repetitions:
# Determine row color based on result
if run['success']:
row_color = doc.PALE_GREEN
status_text = "PASS"
result_symbol = ""
else:
row_color = doc.PALE_RED
status_text = "FAIL"
result_symbol = ""
style = FontFace(fill_color=row_color, size_pt=9)
# Format data compactly
pbit_time = f"{run['pbit_time']:.1f}s" if run.get('bit_available', True) else "N/A"
b6_status = f"{run['b6_pass']}/{run['b6_fail']}/{run['b6_known_fail']}"
b8_checks = f"{run['b8_pass']}/{run['b8_fail']}/{run.get('b8_known_fail',0)}" if run['b8_checked'] > 0 else "-"
serial_stats = f"{run.get('serial_errors', 0)}/{run.get('serial_fatal', 0)}/{run.get('serial_recycles', 0)}"
result_text = f"{result_symbol} {status_text}"
row = table.row()
# Format target cell - only show data if target test was actually performed
target_detected = run.get('target_detected')
if target_detected is None:
# Target test was not performed (run_on_target=False)
target_text = '-'
target_result_text = '-'
else:
# Target test was performed - show results based on detection
if target_detected:
# PASS: target was detected - show details (distance @ cycle)
t_info = run.get('target_simulated')
if t_info:
try:
target_text = f"{t_info.get('distance')}m @ cycle {t_info.get('appeared_after_cycles', t_info.get('found_at_iter','?'))}"
except Exception:
target_text = "Detected"
else:
target_text = "Detected"
target_result_text = "PASS"
else:
# FAIL: target was NOT detected - no details to show
target_text = '-'
target_result_text = "FAIL"
for cell_text in (str(run['repetition']), status_text, pbit_time,
b6_status, b8_checks, serial_stats, target_text, target_result_text):
row.cell(cell_text, style=style)
doc.ln(5)
doc.set_font(size=8, style='I')
doc.multi_cell(0, 4,
"Legend: B6 Status = Pass/Fail/Known (known failures ignored) | "
"B8 Checks = Pass/Fail/Known (known failures ignored) | "
"Serial = Errors(%E)/Fatal(%F)/Recycles | "
"Target Test = Target detection result (PASS/FAIL)",
align='L')
doc.reset_font()
def pdf_add_global_statistics(self, doc: Document, aggregate: dict):
"""
Generate professional global aggregate statistics table.
Creates a dedicated chapter with combined statistics from all test runs:
- Overall test summary (total runs, pass/fail rates)
- Aggregate 1553 statistics (B6/B8 checks across all runs)
- Aggregate serial statistics (total messages, errors, fatal, recycles)
- PBIT timing analysis (avg, min, max, std dev)
Args:
doc: Document object for PDF generation
aggregate: Dictionary with aggregate statistics:
- 'total_runs', 'successful_runs', 'failed_runs'
- 'total_b6_checks', 'total_b6_pass', 'total_b6_fail', 'total_b6_known'
- 'total_b8_checks', 'total_b8_pass', 'total_b8_fail'
- 'total_serial_msgs', 'total_serial_errors', 'total_serial_fatal', 'total_serial_recycles'
- 'avg_pbit_time', 'min_pbit_time', 'max_pbit_time', 'std_dev_pbit'
This table appears as a standalone chapter, NOT mixed with step execution logs.
"""
doc.add_chapter('Global Aggregate Statistics (All Runs Combined)')
doc.ln(5)
# Section 1: Overall Test Summary
doc.add_subchapter('Overall Test Summary')
doc.set_font(size=9)
total = aggregate['total_runs']
success = aggregate['successful_runs']
failed = aggregate['failed_runs']
success_rate = (success / total * 100) if total > 0 else 0
rows_summary = (
('Metric', 'Count', 'Percentage'),
('Total Runs', str(total), '100%'),
('Successful Runs', str(success), f"{success_rate:.1f}%"),
('Failed Runs', str(failed), f"{100-success_rate:.1f}%")
)
with doc.table(
borders_layout="ALL",
col_widths=(60, 60, 60),
headings_style=FontFace(emphasis="BOLD", size_pt=9, fill_color=(200, 200, 200)),
line_height=5,
text_align=("LEFT", "CENTER", "CENTER"),
width=180) as table:
for data_row in rows_summary:
table.row(data_row)
doc.ln(10)
# Section 2: 1553 Bus Statistics (B6 + B8)
doc.add_subchapter('1553 Bus Statistics')
doc.set_font(size=9)
total_b6 = aggregate['total_b6_checks']
b6_pass_rate = (aggregate['total_b6_pass'] / total_b6 * 100) if total_b6 > 0 else 0
b6_fail_rate = (aggregate['total_b6_fail'] / total_b6 * 100) if total_b6 > 0 else 0
b6_known_rate = (aggregate['total_b6_known'] / total_b6 * 100) if total_b6 > 0 else 0
rows_b6 = (
('Metric', 'Count', 'Percentage'),
('B6 LRU Status - Total Checks', str(total_b6), '100%'),
('B6 - Pass', str(aggregate['total_b6_pass']), f"{b6_pass_rate:.1f}%"),
('B6 - Fail (Real)', str(aggregate['total_b6_fail']), f"{b6_fail_rate:.1f}%"),
('B6 - Known Failures (Ignored)', str(aggregate['total_b6_known']), f"{b6_known_rate:.1f}%")
)
with doc.table(
borders_layout="ALL",
col_widths=(80, 50, 50),
headings_style=FontFace(emphasis="BOLD", size_pt=9, fill_color=(200, 200, 200)),
line_height=5,
text_align=("LEFT", "CENTER", "CENTER"),
width=180) as table:
for data_row in rows_b6:
table.row(data_row)
doc.ln(5)
# B8 statistics (if any checks were performed)
total_b8 = aggregate['total_b8_checks']
if total_b8 > 0:
b8_pass_rate = (aggregate['total_b8_pass'] / total_b8 * 100)
b8_fail_rate = (aggregate['total_b8_fail'] / total_b8 * 100)
b8_known = aggregate.get('total_b8_known', 0)
b8_known_rate = (b8_known / total_b8 * 100) if total_b8 > 0 else 0
rows_b8 = (
('Metric', 'Count', 'Percentage'),
('B8 Diagnostic - Total Checks', str(total_b8), '100%'),
('B8 - Pass', str(aggregate['total_b8_pass']), f"{b8_pass_rate:.1f}%"),
('B8 - Fail', str(aggregate['total_b8_fail']), f"{b8_fail_rate:.1f}%"),
('B8 - Known Failures (Ignored)', str(b8_known), f"{b8_known_rate:.1f}%")
)
with doc.table(
borders_layout="ALL",
col_widths=(80, 50, 50),
headings_style=FontFace(emphasis="BOLD", size_pt=9, fill_color=(200, 200, 200)),
line_height=5,
text_align=("LEFT", "CENTER", "CENTER"),
width=180) as table:
for data_row in rows_b8:
table.row(data_row)
doc.ln(10)
else:
doc.set_font(size=9, style='I')
doc.cell(0, 5, 'B8 Diagnostic checks: Not performed (no real B6 failures detected)', align='L')
doc.ln(10)
doc.reset_font()
# Section 3: Serial Communication Statistics
doc.add_subchapter('Serial Communication Statistics')
doc.set_font(size=9)
total_serial = aggregate['total_serial_msgs']
serial_error_rate = (aggregate['total_serial_errors'] / total_serial * 100) if total_serial > 0 else 0
serial_fatal_rate = (aggregate['total_serial_fatal'] / total_serial * 100) if total_serial > 0 else 0
rows_serial = (
('Metric', 'Count', 'Notes'),
('Total Serial Messages', str(total_serial), 'All messages processed'),
('Error Messages (%%E)', str(aggregate['total_serial_errors']), f"{serial_error_rate:.1f}% of total"),
('Fatal Messages (%%F)', str(aggregate['total_serial_fatal']), f"{serial_fatal_rate:.1f}% of total"),
('System Recycles', str(aggregate['total_serial_recycles']), 'Power cycles + unexpected resets')
)
with doc.table(
borders_layout="ALL",
col_widths=(70, 40, 70),
headings_style=FontFace(emphasis="BOLD", size_pt=9, fill_color=(200, 200, 200)),
line_height=5,
text_align=("LEFT", "CENTER", "LEFT"),
width=180) as table:
for data_row in rows_serial:
table.row(data_row)
doc.ln(10)
# Section 4: PBIT Timing Analysis
if 'avg_pbit_time' in aggregate and aggregate['avg_pbit_time'] is not None:
doc.add_subchapter('PBIT Timing Analysis')
doc.set_font(size=9)
rows_timing = (
('Metric', 'Value', 'Unit'),
('Average PBIT Time', f"{aggregate['avg_pbit_time']:.2f}", 'seconds'),
('Minimum PBIT Time', f"{aggregate['min_pbit_time']:.2f}", 'seconds'),
('Maximum PBIT Time', f"{aggregate['max_pbit_time']:.2f}", 'seconds'),
('Standard Deviation', f"{aggregate['std_dev_pbit']:.2f}", 'seconds')
)
with doc.table(
borders_layout="ALL",
col_widths=(70, 50, 60),
headings_style=FontFace(emphasis="BOLD", size_pt=9, fill_color=(200, 200, 200)),
line_height=5,
text_align=("LEFT", "CENTER", "CENTER"),
width=180) as table:
for data_row in rows_timing:
table.row(data_row)
doc.ln(5)
doc.set_font(size=8, style='I')
doc.multi_cell(0, 4,
"PBIT timing measures the duration from power-on to BIT report availability. "
"Consistent timing indicates stable system behavior.",
align='L')
doc.reset_font()
def pdf_add_machine_reliability_assessment(self, doc: Document, aggregate: dict, repetitions: list):
"""
Generate Machine Reliability Assessment with problem distribution table.
Creates a dedicated chapter analyzing machine reliability with:
- Overall reliability percentage (perfect runs vs total)
- Problem type distribution table ordered by frequency
- Clear identification of main issues to address
Args:
doc: Document object for PDF generation
aggregate: Dictionary with aggregate statistics
repetitions: List of per-run statistics to analyze problem types
This section provides executive-level reliability assessment for maintenance decisions.
"""
doc.add_chapter('Machine Reliability Assessment')
doc.ln(5)
# Calculate overall reliability
total = aggregate['total_runs']
perfect_runs = aggregate['successful_runs']
reliability_pct = (perfect_runs / total * 100) if total > 0 else 0
doc.set_font(size=10)
doc.multi_cell(0, 5,
f"This section provides an executive summary of the machine under test, "
f"analyzing {total} complete test cycles to identify reliability metrics and problem patterns.",
align='J')
doc.ln(5)
# Overall Reliability Box
doc.add_subchapter('Overall Reliability Score')
doc.set_font(size=9)
if reliability_pct >= 80:
reliability_color = doc.PALE_GREEN
assessment = "EXCELLENT"
elif reliability_pct >= 60:
reliability_color = (255, 255, 200) # Pale yellow
assessment = "GOOD"
elif reliability_pct >= 40:
reliability_color = (255, 200, 150) # Pale orange
assessment = "ACCEPTABLE"
else:
reliability_color = doc.PALE_RED
assessment = "CRITICAL"
rows_reliability = (
('Metric', 'Value', 'Assessment'),
('Perfect Runs (No Issues)', f"{perfect_runs}/{total}", f"{reliability_pct:.1f}%"),
('Runs with Issues', f"{total - perfect_runs}/{total}", f"{100 - reliability_pct:.1f}%"),
('Overall Rating', assessment, '')
)
with doc.table(
borders_layout="ALL",
col_widths=(70, 50, 60),
headings_style=FontFace(emphasis="BOLD", size_pt=9, fill_color=(200, 200, 200)),
line_height=5,
text_align=("LEFT", "CENTER", "CENTER"),
width=180) as table:
for idx, data_row in enumerate(rows_reliability):
row = table.row()
for cell_text in data_row:
if idx == len(rows_reliability) - 1: # Last row (rating)
row.cell(cell_text, style=FontFace(fill_color=reliability_color, emphasis="BOLD", size_pt=9))
else:
row.cell(cell_text)
doc.ln(10)
# Known Failures Information Box
doc.add_subchapter('Known Failures (Excluded from Statistics)')
doc.set_font(size=9)
# Import KNOWN_FAILURES from test script
try:
import sys
import os
# Add scripts folder to path to import KNOWN_FAILURES
script_dir = os.path.join(os.path.dirname(__file__), '..', 'scripts')
if script_dir not in sys.path:
sys.path.insert(0, script_dir)
from GRIFO_M_PBIT import KNOWN_FAILURES
doc.multi_cell(0, 5,
"The following test checks are expected to fail due to hardware test setup limitations "
"(e.g., missing physical components). These failures are tracked separately and do not "
"affect the test verdict or reliability calculations:",
align='J')
doc.ln(3)
# List known failures
doc.set_font(size=9)
for known_field in KNOWN_FAILURES:
# Extract clean field name
if 'RdrHealthStatusAndBitReport_' in known_field:
clean_name = known_field.split('RdrHealthStatusAndBitReport_')[-1]
else:
clean_name = known_field.split('_')[-1] if '_' in known_field else known_field
clean_name = clean_name.replace('_', ' ').title()
doc.cell(10, 5, '')
doc.cell(0, 5, f"{clean_name}", ln=True)
except ImportError:
doc.multi_cell(0, 5,
"Note: Known failures configuration could not be loaded.",
align='J')
doc.ln(10)
# Problem Distribution Analysis
doc.add_subchapter('Problem Distribution Analysis')
doc.set_font(size=9)
# Analyze problem types from repetitions
problem_counts = {}
for run in repetitions:
if not run['success']:
# Extract FULL field names from failures (not just last parts)
for field, value in run['failures']:
# Remove common prefix but keep full field identifier
# Example: "radar_health_status_and_bit_report_valid_RdrHealthStatusAndBitReport_processor_status"
# -> "processor_status"
if 'RdrHealthStatusAndBitReport_' in field:
# Extract everything after message name
test_name = field.split('RdrHealthStatusAndBitReport_')[-1]
elif '_' in field and len(field.split('_')) > 3:
# For other messages, keep last 4 parts for context
parts = field.split('_')
test_name = '_'.join(parts[-4:])
else:
test_name = field
# Clean up for display (capitalize, keep underscores for clarity)
test_name = test_name.replace('_', ' ').title()
# Count this specific test failure
problem_counts[test_name] = problem_counts.get(test_name, 0) + 1
# Also check serial problems
if run.get('serial_fatal', 0) > 0:
problem_counts['Serial Communication (Fatal)'] = problem_counts.get('Serial Communication (Fatal)', 0) + 1
if run.get('serial_recycles', 0) > 1: # More than expected
problem_counts['System Instability (Recycles)'] = problem_counts.get('System Instability (Recycles)', 0) + 1
if problem_counts:
# Sort by frequency (descending)
sorted_problems = sorted(problem_counts.items(), key=lambda x: x[1], reverse=True)
doc.multi_cell(0, 5,
f"Analysis of {total - perfect_runs} runs with issues reveals the following problem distribution:",
align='J')
doc.ln(5)
# Problem distribution table
headers = ('Problem Type', 'Occurrences', '% of Total Runs', '% of Failed Runs')
with doc.table(
borders_layout="ALL",
col_widths=(70, 35, 37.5, 37.5),
headings_style=FontFace(emphasis="BOLD", size_pt=9, fill_color=(200, 200, 200)),
line_height=5,
text_align=("LEFT", "CENTER", "CENTER", "CENTER"),
width=180) as table:
table.row(headers)
for problem, count in sorted_problems:
pct_total = (count / total * 100)
pct_failed = (count / (total - perfect_runs) * 100) if (total - perfect_runs) > 0 else 0
# Color code by severity
if pct_total >= 20:
row_color = doc.PALE_RED
elif pct_total >= 10:
row_color = (255, 200, 150) # Orange
else:
row_color = (255, 255, 200) # Yellow
style = FontFace(fill_color=row_color, size_pt=9)
row = table.row()
row.cell(problem, style=style)
row.cell(f"{count}/{total}", style=style)
row.cell(f"{pct_total:.1f}%", style=style)
row.cell(f"{pct_failed:.1f}%", style=style)
doc.ln(5)
doc.set_font(size=8, style='I')
doc.multi_cell(0, 4,
"Legend: Red (≥20% of runs) = Critical priority | Orange (≥10%) = High priority | Yellow (<10%) = Medium priority",
align='L')
doc.reset_font()
else:
doc.set_font(size=10, style='B')
doc.cell(0, 5, '✓ NO PROBLEMS DETECTED - All runs completed successfully', align='L')
doc.ln(10)
doc.reset_font()
def pdf_generate(self, folder_path: str, doc: Document, file_name: str):
"""
This function generates a pdf. Usually the folder_path + file_name should have less than 260 characters.
:param folder_path: path where the PDF will be created.
:param doc: latex document created by start_doc.
:param file_name: name of the PDF.
"""
file_path = os.path.join(folder_path, file_name)
logging.info(f"Generating pdf file {file_path} and its length is {len(file_path)}")
try:
doc.output(f'{file_path}.pdf')
except Exception as e:
logging.warning("PDF not created for this reason: {}".format(e))
if __name__ == '__main__':
f = open('..\\rt800_template.json')
t = open('..\\RBT_BLACK_ATC_1783.json')
json_template = json.load(f)
json_test = json.load(t)
my_pdf = PDFClass()
doc = my_pdf.start_doc(json_template)
file_name = my_pdf.pdf_name(json_template, '')
my_pdf.pdf_preamble(doc, json_test)
my_pdf.pdf_test_information_section(doc, json_test)
# my_pdf.pdf_add_image(r'C:\temp\prova.png', 'Prova', 'provax?')
my_pdf.pdf_step_summary(7, 3, doc)
my_pdf.pdf_step_summary(3, 1, doc)
my_pdf.pdf_step_summary(10, 0, doc)
my_pdf.pdf_step_summary(0, 3, doc)
my_pdf.pdf_step_result('HF - USB Sensitivity - cycle number 1', 17, 0, doc)
step_values = [['Start Time: 2024-03-14 16:02:12.199903'],
['Get VQ1MPlatform-MIB::current Waveform', 'DCE:46c8c5bb-d957-4bd8-9be9-98beb34ee888','DCE:46c8c5bb-d957-4bd8-9be9-98beb34ee888'],
['Get VQ1MPlatform-MIB::current Waveform', None, 'DCE:46c8c5bb-d957-4bd8-9be9-98beb34ee888', 'ERROR!'],
['Set HFSuite-MIB::operationalMode 1.0 to 0', '0', '0'],
['Set HFSuite-MIB::rxSquelch1.0 to 0', '0', '0'],
['Set HFSuite-MIB::rfModulation Type1.0 to 0', '0', '0'],
['Set cma180_set_connector to OutputConnector.RFCom', 'OutputConnector.RFCom', 'OutputConnector.RFCom'],
['Set cma180_set_rf_freq to 2000000', '2000000', '2000000'],
['Set cma180_set_level to-111', '-111', '-111'],
['Set HFSuite-MIB::rxSquelch1.0 to 0', '0', '0'],
['Set HFSuite-MIB::rfModulation Type1.0 to 0', '0', '0'],
['Set cma180_set_connector to OutputConnector.RFCom', 'OutputConnector.RFCom', 'OutputConnector.RFCom'],
['Set cma180_set_rf_freq to 2000000', '2000000', '2000000'],
['Set cma180_set_level to-111', '-111', '-111'],
]
step_keys = ['desc', 'result', 'expected', 'error']
step_list = []
for v in step_values:
d = dict(zip(step_keys, v))
step_list.append(d)
my_pdf.pdf_step_execution(step_list, doc)
my_pdf.pdf_generate('', doc, file_name)