introvoyz041's picture
Migrated from GitHub
5e4510c verified
#!/bin/bash
# upgrade_to_real_mlir.sh
# Upgrade the evaluator to use real MLIR compilation
echo "πŸ”§ Upgrading to Real MLIR Compilation"
echo "====================================="
# Check we're in the right directory
if [[ ! -f "evaluator.py" ]]; then
echo "❌ Error: evaluator.py not found"
echo "Please run this from: openevolve/examples/attention_optimization/"
exit 1
fi
# Test MLIR tools are available
echo "πŸ” Testing MLIR tools..."
if ! command -v mlir-opt &> /dev/null; then
echo "❌ mlir-opt not found in PATH"
echo "Please add your MLIR bin directory to PATH"
exit 1
fi
if ! command -v mlir-translate &> /dev/null; then
echo "❌ mlir-translate not found in PATH"
echo "Please add your MLIR bin directory to PATH"
exit 1
fi
echo "βœ… MLIR tools found"
# Backup current evaluator
echo "πŸ’Ύ Backing up current evaluator..."
cp evaluator.py evaluator_simulated.py.backup
echo "βœ… Backup saved as evaluator_simulated.py.backup"
# Replace with real MLIR evaluator
echo "πŸ”„ Installing real MLIR evaluator..."
cat > evaluator.py << 'EOF'
#!/usr/bin/env python3
"""
Real MLIR compiler integration for attention optimization.
Uses actual mlir-opt and mlir-translate for compilation and benchmarking.
"""
import sys
import json
import subprocess
import tempfile
import time
import os
import shlex
from pathlib import Path
class RealMLIRCompiler:
"""Real MLIR compilation and benchmarking"""
def __init__(self, mlir_opt_path="mlir-opt", mlir_translate_path="mlir-translate"):
self.mlir_opt = mlir_opt_path
self.mlir_translate = mlir_translate_path
self.temp_dir = Path(tempfile.mkdtemp(prefix="mlir_attention_"))
# Verify MLIR tools are available
self.verify_mlir_tools()
def verify_mlir_tools(self):
"""Verify MLIR tools are available and working"""
try:
# Test mlir-opt
result = subprocess.run([self.mlir_opt, "--version"],
capture_output=True, text=True, timeout=10)
if result.returncode != 0:
raise RuntimeError(f"mlir-opt not working: {result.stderr}")
print(f"βœ… MLIR tools verified: {self.mlir_opt}")
except FileNotFoundError as e:
raise RuntimeError(f"MLIR tools not found in PATH. Please add MLIR bin directory to PATH.")
except Exception as e:
raise RuntimeError(f"MLIR tools verification failed: {e}")
def compile_mlir(self, mlir_code, optimization_passes=None):
"""Compile MLIR code with real mlir-opt"""
try:
# Write MLIR to temporary file
mlir_file = self.temp_dir / "input.mlir"
with open(mlir_file, 'w') as f:
f.write(mlir_code)
# Build optimization pipeline
if optimization_passes:
cmd = [self.mlir_opt, str(mlir_file)] + optimization_passes
else:
# Default passes for basic optimization
cmd = [self.mlir_opt, str(mlir_file),
"--canonicalize",
"--cse",
"--symbol-dce"]
# Run compilation
result = subprocess.run(cmd, capture_output=True, text=True, timeout=60)
if result.returncode != 0:
return None, result.stderr
return result.stdout, None
except subprocess.TimeoutExpired:
return None, "MLIR compilation timed out"
except Exception as e:
return None, f"MLIR compilation error: {e}"
def apply_transform_passes(self, mlir_code, transform_params):
"""Apply transformation passes based on optimization parameters"""
passes = []
# Basic cleanup passes
passes.extend(["--canonicalize", "--cse"])
# Tiling passes
tile_size_m = transform_params.get('tile_size_m', 0)
tile_size_n = transform_params.get('tile_size_n', 0)
if tile_size_m > 1 and tile_size_n > 1:
# Apply linalg tiling
passes.append(f"--linalg-tile-to-parallel-loops={{tile-sizes={tile_size_m},{tile_size_n}}}")
# Vectorization passes
vectorization = transform_params.get('vectorization', 'none')
if vectorization != 'none':
passes.append("--convert-linalg-to-vector")
if vectorization == 'full':
passes.append("--vector-bufferize")
# Loop optimization passes
unroll_factor = transform_params.get('unroll_factor', 1)
if unroll_factor > 1:
passes.append(f"--affine-loop-unroll={{unroll-factor={unroll_factor}}}")
# Fusion passes
fusion_strategy = transform_params.get('fusion_strategy', 'none')
if fusion_strategy != 'none':
passes.append("--linalg-fuse-elementwise-ops")
# Final cleanup
passes.extend(["--canonicalize", "--cse", "--symbol-dce"])
return self.compile_mlir(mlir_code, passes)
def benchmark_mlir(self, optimized_mlir, test_config):
"""Benchmark MLIR implementation using compilation time and IR complexity"""
try:
batch, heads, seq_len, head_dim = test_config
# Write optimized MLIR to file
benchmark_file = self.temp_dir / f"benchmark_{batch}_{heads}_{seq_len}_{head_dim}.mlir"
with open(benchmark_file, 'w') as f:
f.write(optimized_mlir)
# Measure compilation time
start_time = time.time()
# Compile with lowering passes
cmd = [self.mlir_opt, str(benchmark_file),
"--canonicalize",
"--cse",
"--symbol-dce",
"--convert-linalg-to-loops",
"--convert-scf-to-cf",
"--convert-cf-to-llvm",
"--convert-func-to-llvm",
"--reconcile-unrealized-casts"]
result = subprocess.run(cmd, capture_output=True, text=True, timeout=30)
compilation_time = time.time() - start_time
if result.returncode != 0:
# Compilation failed
return 1000.0, f"Compilation failed: {result.stderr[:200]}"
# Measure IR complexity
ir_lines = len(result.stdout.split('\n'))
# Calculate performance metric
# Faster compilation + simpler IR = better performance
base_complexity = 50
complexity_factor = ir_lines / base_complexity
time_factor = compilation_time * 5
estimated_runtime = complexity_factor * time_factor
# Scale by workload size
workload_scale = (batch * heads * seq_len * head_dim) / (1 * 8 * 128 * 64)
estimated_runtime *= workload_scale
return estimated_runtime, None
except subprocess.TimeoutExpired:
return 1000.0, "Compilation timeout"
except Exception as e:
return 1000.0, f"Benchmark error: {e}"
class RealMLIRAttentionEvaluator:
"""Evaluates MLIR attention optimizations using real MLIR compiler"""
def __init__(self):
# Initialize real MLIR compiler
self.compiler = RealMLIRCompiler()
# Load base MLIR implementation
self.base_mlir_file = Path(__file__).parent / "mlir" / "self_attention_torch_mlir_gen.mlir"
self.reference_performance = None
# Test configurations
self.test_configs = [
(1, 8, 128, 64), # Small
(2, 12, 256, 64), # Medium
]
def load_base_mlir(self):
"""Load the baseline MLIR implementation"""
if not self.base_mlir_file.exists():
return self.create_baseline_mlir()
with open(self.base_mlir_file, 'r') as f:
return f.read()
def create_baseline_mlir(self):
"""Create a realistic baseline MLIR attention implementation"""
baseline = '''
module {
func.func @baseline_attention(
%query: tensor<1x8x128x64xf32>,
%key: tensor<1x8x128x64xf32>,
%value: tensor<1x8x128x64xf32>
) -> tensor<1x8x128x64xf32> {
%c0 = arith.constant 0.0 : f32
%c128 = arith.constant 128 : index
%c64 = arith.constant 64 : index
// Initialize output tensors
%scores_init = tensor.empty() : tensor<1x8x128x128xf32>
%output_init = tensor.empty() : tensor<1x8x128x64xf32>
// Compute Q @ K^T
%attention_scores = linalg.generic {
indexing_maps = [
affine_map<(b, h, s1, s2, d) -> (b, h, s1, d)>,
affine_map<(b, h, s1, s2, d) -> (b, h, s2, d)>,
affine_map<(b, h, s1, s2, d) -> (b, h, s1, s2)>
],
iterator_types = ["parallel", "parallel", "parallel", "parallel", "reduction"]
} ins(%query, %key : tensor<1x8x128x64xf32>, tensor<1x8x128x64xf32>)
outs(%scores_init : tensor<1x8x128x128xf32>) {
^bb0(%q: f32, %k: f32, %acc: f32):
%prod = arith.mulf %q, %k : f32
%sum = arith.addf %acc, %prod : f32
linalg.yield %sum : f32
}
// Apply attention weights to values
%attention_output = linalg.generic {
indexing_maps = [
affine_map<(b, h, s1, s2, d) -> (b, h, s1, s2)>,
affine_map<(b, h, s1, s2, d) -> (b, h, s2, d)>,
affine_map<(b, h, s1, s2, d) -> (b, h, s1, d)>
],
iterator_types = ["parallel", "parallel", "parallel", "parallel", "reduction"]
} ins(%attention_scores, %value : tensor<1x8x128x128xf32>, tensor<1x8x128x64xf32>)
outs(%output_init : tensor<1x8x128x64xf32>) {
^bb0(%weight: f32, %v: f32, %acc: f32):
%weighted = arith.mulf %weight, %v : f32
%sum = arith.addf %acc, %weighted : f32
linalg.yield %sum : f32
}
return %attention_output : tensor<1x8x128x64xf32>
}
}
'''
return baseline.strip()
def compile_with_optimizations(self, base_mlir, optimization_params):
"""Apply real MLIR optimizations and compile"""
try:
print(f"πŸ”§ Applying optimizations: {optimization_params}")
# Apply transformation passes
optimized_mlir, error = self.compiler.apply_transform_passes(base_mlir, optimization_params)
if optimized_mlir is None:
return False, f"Optimization failed: {error}"
print(f"βœ… Optimization succeeded, IR size: {len(optimized_mlir)} chars")
return True, optimized_mlir
except Exception as e:
return False, f"Optimization error: {e}"
def get_reference_performance(self):
"""Get baseline performance using real MLIR compilation"""
if self.reference_performance is None:
base_mlir = self.load_base_mlir()
# Compile baseline without optimizations
baseline_compiled, error = self.compiler.compile_mlir(base_mlir)
if baseline_compiled is None:
print(f"❌ Baseline compilation failed: {error}")
# Fallback to estimated performance
self.reference_performance = 10.0
return self.reference_performance
# Benchmark baseline performance
total_time = 0
for config in self.test_configs:
runtime, bench_error = self.compiler.benchmark_mlir(baseline_compiled, config)
if bench_error:
print(f"⚠️ Baseline benchmark warning: {bench_error}")
total_time += runtime
self.reference_performance = total_time / len(self.test_configs)
print(f"πŸ“Š Reference performance: {self.reference_performance:.4f}")
return self.reference_performance
# Global evaluator instance using real MLIR
evaluator = RealMLIRAttentionEvaluator()
def evaluate_program(program_content):
"""
Main evaluation function using real MLIR compilation.
"""
try:
# Execute the evolved program to get optimization parameters
exec_globals = {}
exec(program_content, exec_globals)
if 'optimize_attention' not in exec_globals:
return {"error": 1000.0, "compilation_error": "No optimize_attention function"}
# Get optimization parameters
params = exec_globals['optimize_attention']()
print(f"🧬 Evaluating parameters: {params}")
# Load base MLIR
base_mlir = evaluator.load_base_mlir()
# Apply real MLIR optimizations and compile
success, optimized_result = evaluator.compile_with_optimizations(base_mlir, params)
if not success:
# Compilation failed - high error penalty
print(f"❌ Compilation failed: {optimized_result}")
return {"error": 500.0, "compilation_error": str(optimized_result)[:200]}
# Benchmark optimized performance using real MLIR
total_runtime = 0
benchmark_errors = []
for config in evaluator.test_configs:
runtime, bench_error = evaluator.compiler.benchmark_mlir(optimized_result, config)
if bench_error:
benchmark_errors.append(bench_error)
total_runtime += runtime
avg_runtime = total_runtime / len(evaluator.test_configs)
# Calculate speedup vs reference
reference_time = evaluator.get_reference_performance()
speedup = reference_time / avg_runtime if avg_runtime > 0 else 0.0
# Convert speedup to error metric
target_speedup = 1.32 # 32% improvement target
if speedup >= target_speedup:
# Achieved target!
error = max(0.1, (target_speedup - speedup) * 10)
else:
# Below target
error = (target_speedup - speedup) * 100
error = max(0.01, error)
# Prepare result
result = {
"error": error,
"speedup": speedup,
"runtime": avg_runtime,
"reference_runtime": reference_time,
"real_mlir_compilation": True,
"ir_size": len(optimized_result),
}
# Add parameter metrics
for key, value in params.items():
if isinstance(value, (int, float, bool)):
result[f"param_{key}"] = float(value) if isinstance(value, bool) else value
# Add any benchmark warnings
if benchmark_errors:
result["benchmark_warnings"] = "; ".join(benchmark_errors[:3])
print(f"πŸ“Š Result: error={error:.3f}, speedup={speedup:.3f}x, runtime={avg_runtime:.6f}")
return result
except Exception as e:
print(f"❌ Evaluation exception: {e}")
return {"error": 1000.0, "exception": str(e)[:200]}
def main():
"""Main evaluation entry point for command line testing"""
if len(sys.argv) != 2:
print("Usage: python evaluator.py <program_file>")
sys.exit(1)
program_file = sys.argv[1]
try:
with open(program_file, 'r') as f:
program_content = f.read()
result = evaluate_program(program_content)
print(json.dumps(result, indent=2))
except Exception as e:
error_result = {"error": 1000.0, "exception": str(e)}
print(json.dumps(error_result, indent=2))
if __name__ == "__main__":
main()
EOF
echo "βœ… Real MLIR evaluator installed"
# Update the baseline MLIR file to be more realistic
echo "πŸ“„ Updating baseline MLIR file..."
cat > mlir/baseline_attention.mlir << 'EOF'
module {
func.func @baseline_attention(
%query: tensor<1x8x128x64xf32>,
%key: tensor<1x8x128x64xf32>,
%value: tensor<1x8x128x64xf32>
) -> tensor<1x8x128x64xf32> {
%c0 = arith.constant 0.0 : f32
// Initialize output tensors
%scores_init = tensor.empty() : tensor<1x8x128x128xf32>
%output_init = tensor.empty() : tensor<1x8x128x64xf32>
// Compute Q @ K^T (simplified for real compilation)
%attention_scores = linalg.generic {
indexing_maps = [
affine_map<(b, h, s1, s2, d) -> (b, h, s1, d)>,
affine_map<(b, h, s1, s2, d) -> (b, h, s2, d)>,
affine_map<(b, h, s1, s2, d) -> (b, h, s1, s2)>
],
iterator_types = ["parallel", "parallel", "parallel", "parallel", "reduction"]
} ins(%query, %key : tensor<1x8x128x64xf32>, tensor<1x8x128x64xf32>)
outs(%scores_init : tensor<1x8x128x128xf32>) {
^bb0(%q: f32, %k: f32, %acc: f32):
%prod = arith.mulf %q, %k : f32
%sum = arith.addf %acc, %prod : f32
linalg.yield %sum : f32
}
// Apply attention weights to values
%attention_output = linalg.generic {
indexing_maps = [
affine_map<(b, h, s1, s2, d) -> (b, h, s1, s2)>,
affine_map<(b, h, s1, s2, d) -> (b, h, s2, d)>,
affine_map<(b, h, s1, s2, d) -> (b, h, s1, d)>
],
iterator_types = ["parallel", "parallel", "parallel", "parallel", "reduction"]
} ins(%attention_scores, %value : tensor<1x8x128x128xf32>, tensor<1x8x128x64xf32>)
outs(%output_init : tensor<1x8x128x64xf32>) {
^bb0(%weight: f32, %v: f32, %acc: f32):
%weighted = arith.mulf %weight, %v : f32
%sum = arith.addf %acc, %weighted : f32
linalg.yield %sum : f32
}
return %attention_output : tensor<1x8x128x64xf32>
}
}
EOF
echo "βœ… Updated baseline MLIR file"
# Test the real MLIR setup
echo "πŸ§ͺ Testing real MLIR integration..."
python test_setup.py
echo ""
echo "🎯 Upgrade Complete!"
echo "=================="
echo "βœ… Now using REAL MLIR compilation with mlir-opt"
echo "βœ… Actual optimization passes applied"
echo "βœ… Real compilation time and IR complexity measured"
echo ""
echo "πŸš€ Ready to run with real MLIR:"
echo "python ../../openevolve-run.py initial_program.py evaluator.py --config config.yaml --iterations 10"
echo ""
echo "πŸ“Š What's different now:"
echo "- Uses actual mlir-opt compilation"
echo "- Applies real tiling, vectorization, fusion passes"
echo "- Measures real compilation time and IR complexity"
echo "- Much more accurate performance modeling"