|
|
|
|
|
""" |
|
|
Initial attention optimization program for AlphaEvolve reproduction. |
|
|
This program defines MLIR transformation parameters that will be evolved. |
|
|
Targets 32% speedup like the original AlphaEvolve paper. |
|
|
""" |
|
|
|
|
|
import json |
|
|
import sys |
|
|
import random |
|
|
|
|
|
def optimize_attention(): |
|
|
""" |
|
|
Define attention optimization parameters for evolution. |
|
|
|
|
|
The goal is to achieve 32% speedup (1.32x) like AlphaEvolve paper |
|
|
by optimizing compiler-generated MLIR IR for attention kernels. |
|
|
""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tile_options_m = [16, 32, 64, 128] |
|
|
tile_options_n = [32, 64, 128, 256] |
|
|
|
|
|
|
|
|
tile_size_m = random.choice([32, 64]) |
|
|
tile_size_n = random.choice([64, 128]) |
|
|
|
|
|
|
|
|
vectorization_options = ['none', 'affine', 'linalg'] |
|
|
vectorization = random.choice(vectorization_options) |
|
|
|
|
|
|
|
|
unroll_factors = [1, 2, 4, 8] |
|
|
|
|
|
unroll_factor = random.choice([2, 4] if random.random() > 0.5 else unroll_factors) |
|
|
|
|
|
|
|
|
fusion_strategies = ['none', 'producer', 'consumer', 'both'] |
|
|
|
|
|
fusion_strategy = random.choice(['both', 'producer'] if random.random() > 0.3 else fusion_strategies) |
|
|
|
|
|
|
|
|
loop_interchange = random.choice([True, False]) |
|
|
|
|
|
|
|
|
use_shared_memory = random.choice([True, False]) |
|
|
|
|
|
|
|
|
optimize_for_latency = random.choice([True, False]) |
|
|
|
|
|
|
|
|
enable_blocking = random.choice([True, False]) |
|
|
enable_recomputation = random.choice([True, False]) |
|
|
|
|
|
optimization_params = { |
|
|
|
|
|
'tile_size_m': tile_size_m, |
|
|
'tile_size_n': tile_size_n, |
|
|
|
|
|
|
|
|
'vectorization': vectorization, |
|
|
'unroll_factor': unroll_factor, |
|
|
'loop_interchange': loop_interchange, |
|
|
|
|
|
|
|
|
'fusion_strategy': fusion_strategy, |
|
|
'use_shared_memory': use_shared_memory, |
|
|
|
|
|
|
|
|
'optimize_for_latency': optimize_for_latency, |
|
|
'enable_blocking': enable_blocking, |
|
|
'enable_recomputation': enable_recomputation, |
|
|
|
|
|
|
|
|
'optimization_strategy': 'alphaevolve_inspired', |
|
|
'target_speedup': 1.32, |
|
|
} |
|
|
|
|
|
return optimization_params |
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
|
|
params = optimize_attention() |
|
|
print(json.dumps(params, indent=2)) |