198 lines
6.7 KiB
Python
198 lines
6.7 KiB
Python
"""
|
|
Test script per verificare le ottimizzazioni del sistema di logging.
|
|
|
|
Usage:
|
|
$env:PYTHONPATH='C:\src\____GitProjects\target_simulator'
|
|
python tools/test_logging_performance.py
|
|
"""
|
|
|
|
import time
|
|
import logging
|
|
import tkinter as tk
|
|
from tkinter.scrolledtext import ScrolledText
|
|
import sys
|
|
import os
|
|
|
|
# Add project root to path
|
|
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
|
|
|
from target_simulator.utils.logger import setup_basic_logging, add_tkinter_handler, get_logger
|
|
from target_simulator.config import LOGGING_CONFIG
|
|
|
|
|
|
def stress_test_logging(logger, num_messages=1000, delay_ms=0):
|
|
"""Generate log messages to stress-test the system."""
|
|
print(f"\n=== Stress Test: {num_messages} messages ===")
|
|
start = time.perf_counter()
|
|
|
|
for i in range(num_messages):
|
|
logger.debug(f"Debug message {i}")
|
|
if i % 100 == 0:
|
|
logger.info(f"Progress: {i}/{num_messages}")
|
|
if i % 250 == 0:
|
|
logger.warning(f"Warning at {i}")
|
|
if delay_ms > 0:
|
|
time.sleep(delay_ms / 1000.0)
|
|
|
|
elapsed = time.perf_counter() - start
|
|
rate = num_messages / elapsed if elapsed > 0 else 0
|
|
print(f"Generated {num_messages} logs in {elapsed:.3f}s ({rate:.1f} msg/s)")
|
|
return elapsed
|
|
|
|
|
|
def test_batch_performance():
|
|
"""Test che il batching funzioni correttamente."""
|
|
print("\n=== Test Batch Performance ===")
|
|
|
|
root = tk.Tk()
|
|
root.title("Logging Performance Test")
|
|
root.geometry("800x600")
|
|
|
|
# Create log widget
|
|
log_widget = ScrolledText(root, state=tk.DISABLED, wrap=tk.WORD)
|
|
log_widget.pack(fill=tk.BOTH, expand=True)
|
|
|
|
# Setup logging system
|
|
setup_basic_logging(root, LOGGING_CONFIG)
|
|
add_tkinter_handler(log_widget, LOGGING_CONFIG)
|
|
|
|
logger = get_logger("test_logger")
|
|
logger.setLevel(logging.DEBUG)
|
|
|
|
# Test 1: Rapid fire logging
|
|
print("\nTest 1: 500 rapid messages (no delay)")
|
|
elapsed1 = stress_test_logging(logger, num_messages=500, delay_ms=0)
|
|
|
|
# Allow GUI to process
|
|
print("Waiting for GUI to catch up...")
|
|
for _ in range(30): # 3 seconds at 100ms poll interval
|
|
root.update()
|
|
time.sleep(0.1)
|
|
|
|
# Test 2: Moderate rate logging
|
|
print("\nTest 2: 200 messages with 10ms delay")
|
|
elapsed2 = stress_test_logging(logger, num_messages=200, delay_ms=10)
|
|
|
|
# Allow GUI to process
|
|
print("Waiting for GUI to catch up...")
|
|
for _ in range(30):
|
|
root.update()
|
|
time.sleep(0.1)
|
|
|
|
# Test 3: Check widget line count (should be capped at max_lines)
|
|
widget_lines = int(log_widget.index('end-1c').split('.')[0])
|
|
print(f"\nWidget line count: {widget_lines}")
|
|
print(f"Expected max: 1000 (may be less if not enough messages)")
|
|
|
|
print("\n=== Test Complete ===")
|
|
print("Check the GUI window to verify:")
|
|
print(" 1. All messages appeared (may be trimmed to last 1000)")
|
|
print(" 2. Colors are correct (DEBUG=gray, INFO=black, WARNING=orange)")
|
|
print(" 3. Window remained responsive during logging")
|
|
print(" 4. Auto-scroll worked (if you were at bottom)")
|
|
|
|
# Keep window open
|
|
print("\nClose the window to exit...")
|
|
root.mainloop()
|
|
|
|
|
|
def test_adaptive_polling():
|
|
"""Test che il polling adattivo funzioni."""
|
|
print("\n=== Test Adaptive Polling ===")
|
|
|
|
root = tk.Tk()
|
|
root.withdraw() # Hide window for this test
|
|
|
|
setup_basic_logging(root, LOGGING_CONFIG)
|
|
logger = get_logger("adaptive_test")
|
|
logger.setLevel(logging.DEBUG)
|
|
|
|
# Simulate activity burst followed by idle
|
|
print("\nPhase 1: Activity burst (should poll fast)")
|
|
for i in range(50):
|
|
logger.debug(f"Active message {i}")
|
|
root.update()
|
|
time.sleep(0.05) # 50ms between messages
|
|
|
|
print("\nPhase 2: Idle period (should slow down polling)")
|
|
print("Monitoring for 15 seconds...")
|
|
start = time.time()
|
|
while (time.time() - start) < 15:
|
|
root.update()
|
|
time.sleep(0.1)
|
|
|
|
print("\nPhase 3: Re-activate (should speed up again)")
|
|
for i in range(20):
|
|
logger.debug(f"Reactivated message {i}")
|
|
root.update()
|
|
time.sleep(0.05)
|
|
|
|
print("\n=== Test Complete ===")
|
|
print("Check console output for timing variations (not visible in this test)")
|
|
print("In production, you can add debug logging to _process_global_log_queue()")
|
|
|
|
root.destroy()
|
|
|
|
|
|
def benchmark_comparison():
|
|
"""Benchmark old vs new approach (simulated)."""
|
|
print("\n=== Benchmark Comparison (Simulated) ===")
|
|
|
|
# Simulate old approach: write each log individually
|
|
print("\nOLD APPROACH (individual writes):")
|
|
messages = [f"Message {i}" for i in range(1000)]
|
|
|
|
start = time.perf_counter()
|
|
simulated_widget_ops = 0
|
|
for msg in messages:
|
|
# Simulate: configure(NORMAL) + insert + configure(DISABLED) + see(END)
|
|
simulated_widget_ops += 4
|
|
elapsed_old = time.perf_counter() - start
|
|
print(f" Simulated {simulated_widget_ops} widget operations")
|
|
print(f" Estimated time (at 0.5ms/op): {simulated_widget_ops * 0.0005:.3f}s")
|
|
|
|
# Simulate new approach: batch writes
|
|
print("\nNEW APPROACH (batched writes):")
|
|
BATCH_SIZE = 50
|
|
num_batches = len(messages) // BATCH_SIZE + (1 if len(messages) % BATCH_SIZE else 0)
|
|
|
|
start = time.perf_counter()
|
|
simulated_widget_ops = 0
|
|
for batch_idx in range(num_batches):
|
|
# Simulate: configure(NORMAL) + N*insert + configure(DISABLED) + see(END)
|
|
batch_size = min(BATCH_SIZE, len(messages) - batch_idx * BATCH_SIZE)
|
|
simulated_widget_ops += (2 + batch_size + 1) # NORMAL + inserts + DISABLED + see
|
|
elapsed_new = time.perf_counter() - start
|
|
print(f" Simulated {simulated_widget_ops} widget operations")
|
|
print(f" Estimated time (at 0.5ms/op): {simulated_widget_ops * 0.0005:.3f}s")
|
|
|
|
improvement = ((4000 - simulated_widget_ops) / 4000) * 100
|
|
print(f"\n=== Improvement: {improvement:.1f}% fewer widget operations ===")
|
|
|
|
|
|
if __name__ == "__main__":
|
|
print("=== Logging Performance Test Suite ===")
|
|
print("\nSelect test to run:")
|
|
print(" 1. Batch Performance Test (interactive GUI)")
|
|
print(" 2. Adaptive Polling Test")
|
|
print(" 3. Benchmark Comparison (simulation)")
|
|
print(" 4. Run all tests")
|
|
|
|
choice = input("\nEnter choice (1-4): ").strip()
|
|
|
|
if choice == "1":
|
|
test_batch_performance()
|
|
elif choice == "2":
|
|
test_adaptive_polling()
|
|
elif choice == "3":
|
|
benchmark_comparison()
|
|
elif choice == "4":
|
|
test_adaptive_polling()
|
|
time.sleep(2)
|
|
benchmark_comparison()
|
|
time.sleep(2)
|
|
test_batch_performance()
|
|
else:
|
|
print("Invalid choice. Running benchmark only.")
|
|
benchmark_comparison()
|