#!/usr/bin/env python3 """ GPU Benchmark Report Generator Generates HTML reports from benchmark results """ import json import argparse from datetime import datetime from typing import Dict, List, Any import matplotlib.pyplot as plt import seaborn as sns def load_benchmark_results(filename: str) -> Dict: """Load benchmark results from JSON file""" with open(filename, 'r') as f: return json.load(f) def generate_html_report(results: Dict, output_file: str): """Generate HTML benchmark report""" # Extract data timestamp = datetime.fromtimestamp(results['timestamp']) gpu_info = results['gpu_info'] benchmarks = results['benchmarks'] # Create HTML content html_content = f""" GPU Benchmark Report - AITBC

🚀 GPU Benchmark Report

AITBC Performance Analysis

Generated: {timestamp.strftime('%Y-%m-%d %H:%M:%S UTC')}

📊 Performance Summary

Overall Performance Score: {calculate_performance_score(benchmarks):.1f}/100
GPU Utilization: {gpu_info.get('gpu_name', 'Unknown')}
CUDA Version: {gpu_info.get('cuda_version', 'N/A')}

🖥️ GPU Information

PropertyValue
GPU Name{gpu_info.get('gpu_name', 'N/A')}
Total Memory{gpu_info.get('gpu_memory', 0):.1f} GB
Compute Capability{gpu_info.get('gpu_compute_capability', 'N/A')}
Driver Version{gpu_info.get('gpu_driver_version', 'N/A')}
Temperature{gpu_info.get('gpu_temperature', 'N/A')}°C
Power Usage{gpu_info.get('gpu_power_usage', 0):.1f}W
""" # Generate benchmark cards for name, data in benchmarks.items(): status = get_performance_status(data['ops_per_sec']) html_content += f"""

{format_benchmark_name(name)}

Operations/sec: {data['ops_per_sec']:.2f}
Mean Time: {data['mean']:.4f}s
Std Dev: {data['std']:.4f}s
Status: {status.replace('_', ' ').title()}
""" html_content += """

📈 Performance Comparison

🎯 Benchmark Breakdown

""" # Write HTML file with open(output_file, 'w') as f: f.write(html_content) def calculate_performance_score(benchmarks: Dict) -> float: """Calculate overall performance score (0-100)""" if not benchmarks: return 0.0 # Weight different benchmark types weights = { 'pytorch_matmul': 0.2, 'cupy_matmul': 0.2, 'gpu_hash_computation': 0.25, 'pow_simulation': 0.25, 'neural_forward': 0.1 } total_score = 0.0 total_weight = 0.0 for name, data in benchmarks.items(): weight = weights.get(name, 0.1) # Normalize ops/sec to 0-100 scale (arbitrary baseline) normalized_score = min(100, data['ops_per_sec'] / 100) # 100 ops/sec = 100 points total_score += normalized_score * weight total_weight += weight return total_score / total_weight if total_weight > 0 else 0.0 def get_performance_status(ops_per_sec: float) -> str: """Get performance status based on operations per second""" if ops_per_sec > 100: return "status-good" elif ops_per_sec > 50: return "status-warning" else: return "status-bad" def format_benchmark_name(name: str) -> str: """Format benchmark name for display""" return name.replace('_', ' ').title() def compare_with_history(current_results: Dict, history_file: str) -> Dict: """Compare current results with historical data""" try: with open(history_file, 'r') as f: history = json.load(f) except FileNotFoundError: return {"status": "no_history"} # Get most recent historical data if not history.get('results'): return {"status": "no_history"} latest_history = history['results'][-1] current_benchmarks = current_results['benchmarks'] history_benchmarks = latest_history['benchmarks'] comparison = { "status": "comparison_available", "timestamp_diff": current_results['timestamp'] - latest_history['timestamp'], "changes": {} } for name, current_data in current_benchmarks.items(): if name in history_benchmarks: history_data = history_benchmarks[name] change_percent = ((current_data['ops_per_sec'] - history_data['ops_per_sec']) / history_data['ops_per_sec']) * 100 comparison['changes'][name] = { 'current_ops': current_data['ops_per_sec'], 'history_ops': history_data['ops_per_sec'], 'change_percent': change_percent, 'status': 'improved' if change_percent > 5 else 'degraded' if change_percent < -5 else 'stable' } return comparison def main(): parser = argparse.ArgumentParser(description='Generate GPU benchmark report') parser.add_argument('--input', required=True, help='Input JSON file with benchmark results') parser.add_argument('--output', required=True, help='Output HTML file') parser.add_argument('--history-file', help='Historical benchmark data file') args = parser.parse_args() # Load benchmark results results = load_benchmark_results(args.input) # Generate HTML report generate_html_report(results, args.output) # Compare with history if available if args.history_file: comparison = compare_with_history(results, args.history_file) print(f"Performance comparison: {comparison['status']}") if comparison['status'] == 'comparison_available': for name, change in comparison['changes'].items(): print(f"{name}: {change['change_percent']:+.2f}% ({change['status']})") print(f"✅ Benchmark report generated: {args.output}") if __name__ == "__main__": main()