#!/usr/bin/env python3 import argparse import importlib.util import json import math import os import sys from pathlib import Path from types import ModuleType from typing import Any, Dict # Add resources to path for imports HERE = Path(__file__).resolve().parent RESOURCES_DIR = HERE / "resources" sys.path.insert(0, str(RESOURCES_DIR)) from benchmark import run_benchmark from baseline import fused_linear_jsd as baseline_fused_linear_jsd import torch import triton DEFAULT_SPEC = HERE / "resources" / "submission_spec.json" ARTIFACT_PATH = Path("./output_ans").resolve() def load_solution_module(solution_path: Path) -> ModuleType: """Load the solution module from the given path.""" if not solution_path.exists(): raise FileNotFoundError(f"solution.py not found at {solution_path}") spec = importlib.util.spec_from_file_location("submitted_solution", solution_path) if spec is None or spec.loader is None: raise ImportError(f"Failed to load spec for {solution_path}") module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) return module def materialize_artifact(result: Any, solution_path: Path) -> Path: """Materialize the solution result into an artifact file.""" ARTIFACT_PATH.parent.mkdir(parents=True, exist_ok=True) if isinstance(result, dict): with ARTIFACT_PATH.open("w", encoding="utf-8") as fout: json.dump(result, fout) return ARTIFACT_PATH if isinstance(result, str): # Check if the string could be a file path (reasonable length and no newlines) # before calling is_file() to avoid "File name too long" errors is_possible_path = len(result) < 4096 and '\n' not in result if is_possible_path: candidate = Path(result) try: if candidate.is_file(): with ARTIFACT_PATH.open("w", encoding="utf-8") as fout: json.dump({"program_path": str(candidate.resolve())}, fout) return ARTIFACT_PATH except OSError: # Path too long or other OS error - treat as code string pass # Treat as code string with ARTIFACT_PATH.open("w", encoding="utf-8") as fout: fout.write(result) return ARTIFACT_PATH raise TypeError( "Solution.solve() must return a dict/path-string/code-string; got " f"{type(result)!r}." ) def load_fused_linear_jsd_from_artifact(artifact_path: Path) -> Any: """Load the fused_linear_jsd function from the artifact.""" with artifact_path.open("r", encoding="utf-8") as fin: artifact = json.load(fin) if "code" in artifact: # Write code to temporary file and import as module to avoid Triton source inspection issues import tempfile import os try: # Create temporary file with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as f: f.write(artifact["code"]) temp_file = f.name # Import the module import importlib.util spec = importlib.util.spec_from_file_location("temp_fused_linear_jsd_module", temp_file) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) if not hasattr(module, "fused_linear_jsd"): raise ValueError("Code must define a 'fused_linear_jsd' function") # Clean up temporary file os.unlink(temp_file) return module.fused_linear_jsd except Exception as e: # Clean up temporary file if it exists try: if 'temp_file' in locals(): os.unlink(temp_file) except: pass raise elif "program_path" in artifact: # Load from external file program_path = Path(artifact["program_path"]) if not program_path.exists(): raise FileNotFoundError(f"Program file not found: {program_path}") spec = importlib.util.spec_from_file_location("submitted_program", program_path) if spec is None or spec.loader is None: raise ImportError(f"Failed to load spec for {program_path}") module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) if not hasattr(module, "fused_linear_jsd"): raise ValueError("Program must define a 'fused_linear_jsd' function") return module.fused_linear_jsd else: raise ValueError("Artifact must contain either 'code' or 'program_path'") def evaluate_kernel_performance(fused_linear_jsd_func: Any, metadata: Dict[str, Any] = None) -> Dict[str, Any]: """Evaluate the performance of a Triton kernel implementation.""" try: # Run benchmark comparing against baseline (enable print_output for debugging) result = run_benchmark(fused_linear_jsd_func, baseline_fused_linear_jsd, print_output=True, metadata=metadata) # Extract key metrics geometric_mean_speedup = result["geometric_mean_speedup"] arithmetic_mean_speedup = result["arithmetic_mean_speedup"] median_speedup = result["median_speedup"] pass_all = result["pass_all"] # Enforce strict correctness: if any test fails, score 0 if not pass_all: return { "error": "Correctness not 100% across all tests", "geometric_mean_speedup": geometric_mean_speedup, "arithmetic_mean_speedup": arithmetic_mean_speedup, "median_speedup": median_speedup, "score": 0, "pass_all": False, "total_tests": len(result["rows"]), "passed_tests": sum(1 for r in result["rows"] if r["close_passed"]), } # Calculate score (0-100 scale) # Map 3x CPU baseline (0 points) to 7x GPU baseline (100 points) # Linear interpolation: score = 100 * (cpu_time/3 - answer_time) / (cpu_time/3 - gpu_time/7) geo_mean_cpu_time = result.get("geo_mean_cpu_time", 0.0) geo_mean_gpu_time = result.get("geo_mean_gpu_time", 0.0) geo_mean_answer_time = result.get("geo_mean_answer_time", 0.0) if geo_mean_cpu_time > 0 and geo_mean_gpu_time > 0 and geo_mean_answer_time > 0: # Target time for 0 points: cpu_time / 3 (3x speedup over CPU) target_time_0 = geo_mean_cpu_time / 3.0 # Target time for 100 points: gpu_time / 7 (7x speedup over GPU) target_time_100 = geo_mean_gpu_time / 7.0 if geo_mean_answer_time >= target_time_0: # Slower than or equal to 3x CPU baseline: score = 0 score = 0.0 elif geo_mean_answer_time <= target_time_100: # Faster than 7x GPU baseline: score = 100 score = 100.0 else: # Linear interpolation between 3x CPU baseline and 7x GPU baseline score = 100.0 * (target_time_0 - geo_mean_answer_time) / (target_time_0 - target_time_100) else: # Fallback: use speedup vs GPU if times not available raw_score = min(geometric_mean_speedup, 7.0) score = max(0, (raw_score - 1.0) / 6.0 * 100) return { "geometric_mean_speedup": geometric_mean_speedup, "arithmetic_mean_speedup": arithmetic_mean_speedup, "median_speedup": median_speedup, "score": score, "pass_all": pass_all, "total_tests": len(result["rows"]), "passed_tests": sum(1 for r in result["rows"] if r["close_passed"]), } except Exception as e: return { "error": str(e), "score": 0, "pass_all": False, } def evaluate(solution_path: Path, spec_path: Path) -> dict: """Main evaluation function.""" try: # Load solution module module = load_solution_module(solution_path) if not hasattr(module, "Solution"): raise ValueError("Solution module must define a 'Solution' class") solution_class = module.Solution solution_instance = solution_class() if not hasattr(solution_instance, "solve"): raise ValueError("Solution class must have a 'solve' method") # Load metadata from spec if available metadata = None if spec_path.exists(): with spec_path.open("r", encoding="utf-8") as f: spec = json.load(f) metadata = spec.get("metadata", None) # Get solution result result = solution_instance.solve(str(spec_path)) # Materialize artifact artifact_path = materialize_artifact(result, solution_path) # Load fused_linear_jsd function from artifact fused_linear_jsd_func = load_fused_linear_jsd_from_artifact(artifact_path) # Evaluate performance evaluation_result = evaluate_kernel_performance(fused_linear_jsd_func, metadata=metadata) return { "status": "success", "artifact_path": str(artifact_path), **evaluation_result, } except Exception as e: return { "status": "error", "error": str(e), "score": 0, } def main(): parser = argparse.ArgumentParser(description="Evaluate Fused Linear Jensen-Shannon Divergence optimization solutions") parser.add_argument( "--solution-path", type=Path, default=Path("./solution.py"), help="Path to solution.py file", ) parser.add_argument( "--spec-path", type=Path, default=DEFAULT_SPEC, help="Path to specification file", ) parser.add_argument( "--output-path", type=Path, default=Path("./result.json"), help="Path to output result file", ) args = parser.parse_args() # Run evaluation result = evaluate(args.solution_path, args.spec_path) # Write result with args.output_path.open("w", encoding="utf-8") as fout: json.dump(result, fout, indent=2) # Print summary if result["status"] == "success": print(f"Evaluation completed successfully!") print(f"Score: {result['score']:.2f}/100") # Check if there's an error (e.g., correctness failure) if "error" in result: print(f"Error: {result['error']}") if "geometric_mean_speedup" in result: print(f"Geometric mean speedup: {result['geometric_mean_speedup']:.3f}x") if "passed_tests" in result and "total_tests" in result: print(f"Tests passed: {result['passed_tests']}/{result['total_tests']}") else: # Successful evaluation if "geometric_mean_speedup" in result: print(f"Geometric mean speedup: {result['geometric_mean_speedup']:.3f}x") if "passed_tests" in result and "total_tests" in result: print(f"Tests passed: {result['passed_tests']}/{result['total_tests']}") # Print score as last line for main_loop.sh to extract print(result['score']) else: print(f"Evaluation failed: {result['error']}") # Print error score as last line print("0") sys.exit(1) if __name__ == "__main__": main()