|
|
#!/bin/bash |
|
|
|
|
|
|
|
|
|
|
|
echo "π§ Upgrading to Real MLIR Compilation" |
|
|
echo "=====================================" |
|
|
|
|
|
|
|
|
if [[ ! -f "evaluator.py" ]]; then |
|
|
echo "β Error: evaluator.py not found" |
|
|
echo "Please run this from: openevolve/examples/attention_optimization/" |
|
|
exit 1 |
|
|
fi |
|
|
|
|
|
|
|
|
echo "π Testing MLIR tools..." |
|
|
if ! command -v mlir-opt &> /dev/null; then |
|
|
echo "β mlir-opt not found in PATH" |
|
|
echo "Please add your MLIR bin directory to PATH" |
|
|
exit 1 |
|
|
fi |
|
|
|
|
|
if ! command -v mlir-translate &> /dev/null; then |
|
|
echo "β mlir-translate not found in PATH" |
|
|
echo "Please add your MLIR bin directory to PATH" |
|
|
exit 1 |
|
|
fi |
|
|
|
|
|
echo "β
MLIR tools found" |
|
|
|
|
|
|
|
|
echo "πΎ Backing up current evaluator..." |
|
|
cp evaluator.py evaluator_simulated.py.backup |
|
|
echo "β
Backup saved as evaluator_simulated.py.backup" |
|
|
|
|
|
|
|
|
echo "π Installing real MLIR evaluator..." |
|
|
cat > evaluator.py << 'EOF' |
|
|
|
|
|
""" |
|
|
Real MLIR compiler integration for attention optimization. |
|
|
Uses actual mlir-opt and mlir-translate for compilation and benchmarking. |
|
|
""" |
|
|
|
|
|
import sys |
|
|
import json |
|
|
import subprocess |
|
|
import tempfile |
|
|
import time |
|
|
import os |
|
|
import shlex |
|
|
from pathlib import Path |
|
|
|
|
|
class RealMLIRCompiler: |
|
|
"""Real MLIR compilation and benchmarking""" |
|
|
|
|
|
def __init__(self, mlir_opt_path="mlir-opt", mlir_translate_path="mlir-translate"): |
|
|
self.mlir_opt = mlir_opt_path |
|
|
self.mlir_translate = mlir_translate_path |
|
|
self.temp_dir = Path(tempfile.mkdtemp(prefix="mlir_attention_")) |
|
|
|
|
|
|
|
|
self.verify_mlir_tools() |
|
|
|
|
|
def verify_mlir_tools(self): |
|
|
"""Verify MLIR tools are available and working""" |
|
|
try: |
|
|
|
|
|
result = subprocess.run([self.mlir_opt, "--version"], |
|
|
capture_output=True, text=True, timeout=10) |
|
|
if result.returncode != 0: |
|
|
raise RuntimeError(f"mlir-opt not working: {result.stderr}") |
|
|
|
|
|
print(f"β
MLIR tools verified: {self.mlir_opt}") |
|
|
|
|
|
except FileNotFoundError as e: |
|
|
raise RuntimeError(f"MLIR tools not found in PATH. Please add MLIR bin directory to PATH.") |
|
|
except Exception as e: |
|
|
raise RuntimeError(f"MLIR tools verification failed: {e}") |
|
|
|
|
|
def compile_mlir(self, mlir_code, optimization_passes=None): |
|
|
"""Compile MLIR code with real mlir-opt""" |
|
|
try: |
|
|
|
|
|
mlir_file = self.temp_dir / "input.mlir" |
|
|
with open(mlir_file, 'w') as f: |
|
|
f.write(mlir_code) |
|
|
|
|
|
|
|
|
if optimization_passes: |
|
|
cmd = [self.mlir_opt, str(mlir_file)] + optimization_passes |
|
|
else: |
|
|
|
|
|
cmd = [self.mlir_opt, str(mlir_file), |
|
|
"--canonicalize", |
|
|
"--cse", |
|
|
"--symbol-dce"] |
|
|
|
|
|
|
|
|
result = subprocess.run(cmd, capture_output=True, text=True, timeout=60) |
|
|
|
|
|
if result.returncode != 0: |
|
|
return None, result.stderr |
|
|
|
|
|
return result.stdout, None |
|
|
|
|
|
except subprocess.TimeoutExpired: |
|
|
return None, "MLIR compilation timed out" |
|
|
except Exception as e: |
|
|
return None, f"MLIR compilation error: {e}" |
|
|
|
|
|
def apply_transform_passes(self, mlir_code, transform_params): |
|
|
"""Apply transformation passes based on optimization parameters""" |
|
|
|
|
|
passes = [] |
|
|
|
|
|
|
|
|
passes.extend(["--canonicalize", "--cse"]) |
|
|
|
|
|
|
|
|
tile_size_m = transform_params.get('tile_size_m', 0) |
|
|
tile_size_n = transform_params.get('tile_size_n', 0) |
|
|
|
|
|
if tile_size_m > 1 and tile_size_n > 1: |
|
|
|
|
|
passes.append(f"--linalg-tile-to-parallel-loops={{tile-sizes={tile_size_m},{tile_size_n}}}") |
|
|
|
|
|
|
|
|
vectorization = transform_params.get('vectorization', 'none') |
|
|
if vectorization != 'none': |
|
|
passes.append("--convert-linalg-to-vector") |
|
|
if vectorization == 'full': |
|
|
passes.append("--vector-bufferize") |
|
|
|
|
|
|
|
|
unroll_factor = transform_params.get('unroll_factor', 1) |
|
|
if unroll_factor > 1: |
|
|
passes.append(f"--affine-loop-unroll={{unroll-factor={unroll_factor}}}") |
|
|
|
|
|
|
|
|
fusion_strategy = transform_params.get('fusion_strategy', 'none') |
|
|
if fusion_strategy != 'none': |
|
|
passes.append("--linalg-fuse-elementwise-ops") |
|
|
|
|
|
|
|
|
passes.extend(["--canonicalize", "--cse", "--symbol-dce"]) |
|
|
|
|
|
return self.compile_mlir(mlir_code, passes) |
|
|
|
|
|
def benchmark_mlir(self, optimized_mlir, test_config): |
|
|
"""Benchmark MLIR implementation using compilation time and IR complexity""" |
|
|
|
|
|
try: |
|
|
batch, heads, seq_len, head_dim = test_config |
|
|
|
|
|
|
|
|
benchmark_file = self.temp_dir / f"benchmark_{batch}_{heads}_{seq_len}_{head_dim}.mlir" |
|
|
with open(benchmark_file, 'w') as f: |
|
|
f.write(optimized_mlir) |
|
|
|
|
|
|
|
|
start_time = time.time() |
|
|
|
|
|
|
|
|
cmd = [self.mlir_opt, str(benchmark_file), |
|
|
"--canonicalize", |
|
|
"--cse", |
|
|
"--symbol-dce", |
|
|
"--convert-linalg-to-loops", |
|
|
"--convert-scf-to-cf", |
|
|
"--convert-cf-to-llvm", |
|
|
"--convert-func-to-llvm", |
|
|
"--reconcile-unrealized-casts"] |
|
|
|
|
|
result = subprocess.run(cmd, capture_output=True, text=True, timeout=30) |
|
|
compilation_time = time.time() - start_time |
|
|
|
|
|
if result.returncode != 0: |
|
|
|
|
|
return 1000.0, f"Compilation failed: {result.stderr[:200]}" |
|
|
|
|
|
|
|
|
ir_lines = len(result.stdout.split('\n')) |
|
|
|
|
|
|
|
|
|
|
|
base_complexity = 50 |
|
|
complexity_factor = ir_lines / base_complexity |
|
|
time_factor = compilation_time * 5 |
|
|
|
|
|
estimated_runtime = complexity_factor * time_factor |
|
|
|
|
|
|
|
|
workload_scale = (batch * heads * seq_len * head_dim) / (1 * 8 * 128 * 64) |
|
|
estimated_runtime *= workload_scale |
|
|
|
|
|
return estimated_runtime, None |
|
|
|
|
|
except subprocess.TimeoutExpired: |
|
|
return 1000.0, "Compilation timeout" |
|
|
except Exception as e: |
|
|
return 1000.0, f"Benchmark error: {e}" |
|
|
|
|
|
class RealMLIRAttentionEvaluator: |
|
|
"""Evaluates MLIR attention optimizations using real MLIR compiler""" |
|
|
|
|
|
def __init__(self): |
|
|
|
|
|
self.compiler = RealMLIRCompiler() |
|
|
|
|
|
|
|
|
self.base_mlir_file = Path(__file__).parent / "mlir" / "self_attention_torch_mlir_gen.mlir" |
|
|
self.reference_performance = None |
|
|
|
|
|
|
|
|
self.test_configs = [ |
|
|
(1, 8, 128, 64), |
|
|
(2, 12, 256, 64), |
|
|
] |
|
|
|
|
|
def load_base_mlir(self): |
|
|
"""Load the baseline MLIR implementation""" |
|
|
if not self.base_mlir_file.exists(): |
|
|
return self.create_baseline_mlir() |
|
|
|
|
|
with open(self.base_mlir_file, 'r') as f: |
|
|
return f.read() |
|
|
|
|
|
def create_baseline_mlir(self): |
|
|
"""Create a realistic baseline MLIR attention implementation""" |
|
|
baseline = ''' |
|
|
module { |
|
|
func.func @baseline_attention( |
|
|
%query: tensor<1x8x128x64xf32>, |
|
|
%key: tensor<1x8x128x64xf32>, |
|
|
%value: tensor<1x8x128x64xf32> |
|
|
) -> tensor<1x8x128x64xf32> { |
|
|
|
|
|
%c0 = arith.constant 0.0 : f32 |
|
|
%c128 = arith.constant 128 : index |
|
|
%c64 = arith.constant 64 : index |
|
|
|
|
|
// Initialize output tensors |
|
|
%scores_init = tensor.empty() : tensor<1x8x128x128xf32> |
|
|
%output_init = tensor.empty() : tensor<1x8x128x64xf32> |
|
|
|
|
|
// Compute Q @ K^T |
|
|
%attention_scores = linalg.generic { |
|
|
indexing_maps = [ |
|
|
affine_map<(b, h, s1, s2, d) -> (b, h, s1, d)>, |
|
|
affine_map<(b, h, s1, s2, d) -> (b, h, s2, d)>, |
|
|
affine_map<(b, h, s1, s2, d) -> (b, h, s1, s2)> |
|
|
], |
|
|
iterator_types = ["parallel", "parallel", "parallel", "parallel", "reduction"] |
|
|
} ins(%query, %key : tensor<1x8x128x64xf32>, tensor<1x8x128x64xf32>) |
|
|
outs(%scores_init : tensor<1x8x128x128xf32>) { |
|
|
^bb0(%q: f32, %k: f32, %acc: f32): |
|
|
%prod = arith.mulf %q, %k : f32 |
|
|
%sum = arith.addf %acc, %prod : f32 |
|
|
linalg.yield %sum : f32 |
|
|
} |
|
|
|
|
|
// Apply attention weights to values |
|
|
%attention_output = linalg.generic { |
|
|
indexing_maps = [ |
|
|
affine_map<(b, h, s1, s2, d) -> (b, h, s1, s2)>, |
|
|
affine_map<(b, h, s1, s2, d) -> (b, h, s2, d)>, |
|
|
affine_map<(b, h, s1, s2, d) -> (b, h, s1, d)> |
|
|
], |
|
|
iterator_types = ["parallel", "parallel", "parallel", "parallel", "reduction"] |
|
|
} ins(%attention_scores, %value : tensor<1x8x128x128xf32>, tensor<1x8x128x64xf32>) |
|
|
outs(%output_init : tensor<1x8x128x64xf32>) { |
|
|
^bb0(%weight: f32, %v: f32, %acc: f32): |
|
|
%weighted = arith.mulf %weight, %v : f32 |
|
|
%sum = arith.addf %acc, %weighted : f32 |
|
|
linalg.yield %sum : f32 |
|
|
} |
|
|
|
|
|
return %attention_output : tensor<1x8x128x64xf32> |
|
|
} |
|
|
} |
|
|
''' |
|
|
return baseline.strip() |
|
|
|
|
|
def compile_with_optimizations(self, base_mlir, optimization_params): |
|
|
"""Apply real MLIR optimizations and compile""" |
|
|
try: |
|
|
print(f"π§ Applying optimizations: {optimization_params}") |
|
|
|
|
|
|
|
|
optimized_mlir, error = self.compiler.apply_transform_passes(base_mlir, optimization_params) |
|
|
|
|
|
if optimized_mlir is None: |
|
|
return False, f"Optimization failed: {error}" |
|
|
|
|
|
print(f"β
Optimization succeeded, IR size: {len(optimized_mlir)} chars") |
|
|
return True, optimized_mlir |
|
|
|
|
|
except Exception as e: |
|
|
return False, f"Optimization error: {e}" |
|
|
|
|
|
def get_reference_performance(self): |
|
|
"""Get baseline performance using real MLIR compilation""" |
|
|
if self.reference_performance is None: |
|
|
base_mlir = self.load_base_mlir() |
|
|
|
|
|
|
|
|
baseline_compiled, error = self.compiler.compile_mlir(base_mlir) |
|
|
if baseline_compiled is None: |
|
|
print(f"β Baseline compilation failed: {error}") |
|
|
|
|
|
self.reference_performance = 10.0 |
|
|
return self.reference_performance |
|
|
|
|
|
|
|
|
total_time = 0 |
|
|
for config in self.test_configs: |
|
|
runtime, bench_error = self.compiler.benchmark_mlir(baseline_compiled, config) |
|
|
if bench_error: |
|
|
print(f"β οΈ Baseline benchmark warning: {bench_error}") |
|
|
total_time += runtime |
|
|
|
|
|
self.reference_performance = total_time / len(self.test_configs) |
|
|
print(f"π Reference performance: {self.reference_performance:.4f}") |
|
|
|
|
|
return self.reference_performance |
|
|
|
|
|
|
|
|
evaluator = RealMLIRAttentionEvaluator() |
|
|
|
|
|
def evaluate_program(program_content): |
|
|
""" |
|
|
Main evaluation function using real MLIR compilation. |
|
|
""" |
|
|
try: |
|
|
|
|
|
exec_globals = {} |
|
|
exec(program_content, exec_globals) |
|
|
|
|
|
if 'optimize_attention' not in exec_globals: |
|
|
return {"error": 1000.0, "compilation_error": "No optimize_attention function"} |
|
|
|
|
|
|
|
|
params = exec_globals['optimize_attention']() |
|
|
print(f"𧬠Evaluating parameters: {params}") |
|
|
|
|
|
|
|
|
base_mlir = evaluator.load_base_mlir() |
|
|
|
|
|
|
|
|
success, optimized_result = evaluator.compile_with_optimizations(base_mlir, params) |
|
|
|
|
|
if not success: |
|
|
|
|
|
print(f"β Compilation failed: {optimized_result}") |
|
|
return {"error": 500.0, "compilation_error": str(optimized_result)[:200]} |
|
|
|
|
|
|
|
|
total_runtime = 0 |
|
|
benchmark_errors = [] |
|
|
|
|
|
for config in evaluator.test_configs: |
|
|
runtime, bench_error = evaluator.compiler.benchmark_mlir(optimized_result, config) |
|
|
if bench_error: |
|
|
benchmark_errors.append(bench_error) |
|
|
total_runtime += runtime |
|
|
|
|
|
avg_runtime = total_runtime / len(evaluator.test_configs) |
|
|
|
|
|
|
|
|
reference_time = evaluator.get_reference_performance() |
|
|
speedup = reference_time / avg_runtime if avg_runtime > 0 else 0.0 |
|
|
|
|
|
|
|
|
target_speedup = 1.32 |
|
|
|
|
|
if speedup >= target_speedup: |
|
|
|
|
|
error = max(0.1, (target_speedup - speedup) * 10) |
|
|
else: |
|
|
|
|
|
error = (target_speedup - speedup) * 100 |
|
|
|
|
|
error = max(0.01, error) |
|
|
|
|
|
|
|
|
result = { |
|
|
"error": error, |
|
|
"speedup": speedup, |
|
|
"runtime": avg_runtime, |
|
|
"reference_runtime": reference_time, |
|
|
"real_mlir_compilation": True, |
|
|
"ir_size": len(optimized_result), |
|
|
} |
|
|
|
|
|
|
|
|
for key, value in params.items(): |
|
|
if isinstance(value, (int, float, bool)): |
|
|
result[f"param_{key}"] = float(value) if isinstance(value, bool) else value |
|
|
|
|
|
|
|
|
if benchmark_errors: |
|
|
result["benchmark_warnings"] = "; ".join(benchmark_errors[:3]) |
|
|
|
|
|
print(f"π Result: error={error:.3f}, speedup={speedup:.3f}x, runtime={avg_runtime:.6f}") |
|
|
|
|
|
return result |
|
|
|
|
|
except Exception as e: |
|
|
print(f"β Evaluation exception: {e}") |
|
|
return {"error": 1000.0, "exception": str(e)[:200]} |
|
|
|
|
|
def main(): |
|
|
"""Main evaluation entry point for command line testing""" |
|
|
if len(sys.argv) != 2: |
|
|
print("Usage: python evaluator.py <program_file>") |
|
|
sys.exit(1) |
|
|
|
|
|
program_file = sys.argv[1] |
|
|
|
|
|
try: |
|
|
with open(program_file, 'r') as f: |
|
|
program_content = f.read() |
|
|
|
|
|
result = evaluate_program(program_content) |
|
|
print(json.dumps(result, indent=2)) |
|
|
|
|
|
except Exception as e: |
|
|
error_result = {"error": 1000.0, "exception": str(e)} |
|
|
print(json.dumps(error_result, indent=2)) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
EOF |
|
|
|
|
|
echo "β
Real MLIR evaluator installed" |
|
|
|
|
|
|
|
|
echo "π Updating baseline MLIR file..." |
|
|
cat > mlir/baseline_attention.mlir << 'EOF' |
|
|
module { |
|
|
func.func @baseline_attention( |
|
|
%query: tensor<1x8x128x64xf32>, |
|
|
%key: tensor<1x8x128x64xf32>, |
|
|
%value: tensor<1x8x128x64xf32> |
|
|
) -> tensor<1x8x128x64xf32> { |
|
|
|
|
|
%c0 = arith.constant 0.0 : f32 |
|
|
|
|
|
// Initialize output tensors |
|
|
%scores_init = tensor.empty() : tensor<1x8x128x128xf32> |
|
|
%output_init = tensor.empty() : tensor<1x8x128x64xf32> |
|
|
|
|
|
// Compute Q @ K^T (simplified for real compilation) |
|
|
%attention_scores = linalg.generic { |
|
|
indexing_maps = [ |
|
|
affine_map<(b, h, s1, s2, d) -> (b, h, s1, d)>, |
|
|
affine_map<(b, h, s1, s2, d) -> (b, h, s2, d)>, |
|
|
affine_map<(b, h, s1, s2, d) -> (b, h, s1, s2)> |
|
|
], |
|
|
iterator_types = ["parallel", "parallel", "parallel", "parallel", "reduction"] |
|
|
} ins(%query, %key : tensor<1x8x128x64xf32>, tensor<1x8x128x64xf32>) |
|
|
outs(%scores_init : tensor<1x8x128x128xf32>) { |
|
|
^bb0(%q: f32, %k: f32, %acc: f32): |
|
|
%prod = arith.mulf %q, %k : f32 |
|
|
%sum = arith.addf %acc, %prod : f32 |
|
|
linalg.yield %sum : f32 |
|
|
} |
|
|
|
|
|
// Apply attention weights to values |
|
|
%attention_output = linalg.generic { |
|
|
indexing_maps = [ |
|
|
affine_map<(b, h, s1, s2, d) -> (b, h, s1, s2)>, |
|
|
affine_map<(b, h, s1, s2, d) -> (b, h, s2, d)>, |
|
|
affine_map<(b, h, s1, s2, d) -> (b, h, s1, d)> |
|
|
], |
|
|
iterator_types = ["parallel", "parallel", "parallel", "parallel", "reduction"] |
|
|
} ins(%attention_scores, %value : tensor<1x8x128x128xf32>, tensor<1x8x128x64xf32>) |
|
|
outs(%output_init : tensor<1x8x128x64xf32>) { |
|
|
^bb0(%weight: f32, %v: f32, %acc: f32): |
|
|
%weighted = arith.mulf %weight, %v : f32 |
|
|
%sum = arith.addf %acc, %weighted : f32 |
|
|
linalg.yield %sum : f32 |
|
|
} |
|
|
|
|
|
return %attention_output : tensor<1x8x128x64xf32> |
|
|
} |
|
|
} |
|
|
EOF |
|
|
|
|
|
echo "β
Updated baseline MLIR file" |
|
|
|
|
|
|
|
|
echo "π§ͺ Testing real MLIR integration..." |
|
|
python test_setup.py |
|
|
|
|
|
echo "" |
|
|
echo "π― Upgrade Complete!" |
|
|
echo "==================" |
|
|
echo "β
Now using REAL MLIR compilation with mlir-opt" |
|
|
echo "β
Actual optimization passes applied" |
|
|
echo "β
Real compilation time and IR complexity measured" |
|
|
echo "" |
|
|
echo "π Ready to run with real MLIR:" |
|
|
echo "python ../../openevolve-run.py initial_program.py evaluator.py --config config.yaml --iterations 10" |
|
|
echo "" |
|
|
echo "π What's different now:" |
|
|
echo "- Uses actual mlir-opt compilation" |
|
|
echo "- Applies real tiling, vectorization, fusion passes" |
|
|
echo "- Measures real compilation time and IR complexity" |
|
|
echo "- Much more accurate performance modeling" |