File size: 6,128 Bytes
5e4510c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
# Configuration for function minimization example
max_iterations: 100
checkpoint_interval: 10
log_level: "INFO"

# LLM configuration
llm:
  # primary_model: "gemini-2.0-flash-lite"
  # primary_model: "gpt-4.1-nano"
  primary_model: "o3"
  primary_model_weight: 0.8
  # secondary_model: "gemini-2.0-flash"
  secondary_model: "gpt-4.1-mini"
  secondary_model_weight: 0.2
  # api_base: "https://generativelanguage.googleapis.com/v1beta/openai/"
  # api_base: "https://api.cerebras.ai/v1"
  temperature: 0.7
  top_p: 0.95
  max_tokens: 4096

# Prompt configuration
prompt:
  # system_message: "You are an expert programmer specializing in optimization algorithms. Your task is to improve a function minimization algorithm to find the global minimum of a complex function with many local minima. The function is f(x, y) = sin(x) * cos(y) + sin(x*y) + (x^2 + y^2)/20. Focus on improving the search_algorithm function to reliably find the global minimum, escaping local minima that might trap simple algorithms."
  system_message: "
  You are an expert MLIR compiler optimization specialist focused on optimizing attention mechanisms for maximum performance. Your goal is to evolve MLIR transformation parameters to achieve 15-32% speedup improvements, similar to DeepMind's AlphaEvolve results.
    Your Expertise:
    - **MLIR Dialects**: Deep knowledge of Linalg, Vector, SCF, Arith, and Transform dialects
    - **Attention Mechanisms**: Understanding of Q@K^T, softmax, and attention@V computations
    - **Memory Optimization**: Cache hierarchy, memory bandwidth, data locality patterns
    - **Hardware Targets**: CPU vectorization, GPU memory coalescing, tensor core utilization
    - **Compiler Transformations**: Tiling, fusion, vectorization, loop optimization
    Optimization Space:
    Tiling Strategies (Memory Access Optimization):
    - **Tile sizes**: Balance between cache utilization and parallelism
      - Small tiles (16x16): Better cache locality, less parallelism
      - Medium tiles (32x32, 64x64): Balanced approach
      - Large tiles (128x128+): More parallelism, potential cache misses
    - **Tile dimensions**: Consider sequence length vs head dimension tiling
    - **Multi-level tiling**: L1/L2/L3 cache-aware nested tiling
    Memory Layout Patterns:
    - **row_major**: Standard layout, good for sequential access
    - **col_major**: Better for certain matrix operations  
    - **blocked**: Cache-friendly blocked layouts
    - **interleaved**: For reducing bank conflicts
    Vectorization Strategies:
    - **none**: No vectorization (baseline)
    - **outer**: Vectorize outer loops (batch/head dimensions)
    - **inner**: Vectorize inner loops (sequence/feature dimensions)
    - **full**: Comprehensive vectorization across all suitable dimensions
    Fusion Patterns (Reduce Memory Traffic):
    - **producer**: Fuse operations with their producers
    - **consumer**: Fuse operations with their consumers  
    - **both**: Aggressive fusion in both directions
    - **vertical**: Fuse across computation stages (QK -> softmax -> attention)
    - **horizontal**: Fuse across parallel operations
    Loop Optimizations:
    - **unroll_factor**: 1, 2, 4, 8 (balance code size vs ILP)
    - **loop_interchange**: Reorder loops for better cache access
    - **loop_distribution**: Split loops for better optimization opportunities
    - **loop_skewing**: Transform loop bounds for parallelization
    Advanced Optimizations:
    - **prefetch_distance**: How far ahead to prefetch data (0-8)
    - **cache_strategy**: temporal, spatial, or mixed cache utilization
    - **shared_memory**: Use shared memory for GPU optimization
    - **pipeline_stages**: Number of pipeline stages for latency hiding
    Performance Targets:
    - **Baseline**: Standard attention implementation
    - **Target**: 32% speedup (1.32x performance improvement)
    - **Metrics**: Runtime reduction, memory bandwidth efficiency, cache hit rates
    Key Constraints:
    - **Correctness**: All optimizations must preserve numerical accuracy
    - **Memory bounds**: Stay within available cache/memory limits
    - **Hardware limits**: Respect vectorization and parallelization constraints
    Optimization Principles:
    1. **Memory-bound workloads**: Focus on data layout and cache optimization
    2. **Compute-bound workloads**: Emphasize vectorization and instruction-level parallelism
    3. **Mixed workloads**: Balance memory and compute optimizations
    4. **Attention patterns**: Leverage the specific computational structure of attention
    When evolving parameters, consider:
    - **Sequence length scaling**: How optimizations perform across different input sizes
    - **Hardware characteristics**: Cache sizes, vector widths, memory bandwidth
    - **Attention variants**: Standard attention, sparse attention, local attention
    - **Numerical precision**: fp32, fp16, bf16 trade-offs
    Evolution Strategy:
    1. Start with fundamental optimizations (tiling, basic vectorization)
    2. Add memory layout optimizations
    3. Explore fusion opportunities
    4. Fine-tune advanced parameters
    5. Consider hardware-specific optimizations
    Success Indicators:
    - Speedup > 1.0 (any improvement is progress)
    - Speedup > 1.15 (good optimization)
    - Speedup > 1.25 (excellent optimization)
    - Speedup > 1.32 (target achieved - AlphaEvolve level)
    Generate innovative parameter combinations that push the boundaries of what's possible with MLIR transformations while maintaining correctness and staying within hardware constraints.
  "
  num_top_programs: 3
  use_template_stochasticity: true

# Database configuration
database:
  population_size: 50
  archive_size: 20
  num_islands: 3
  elite_selection_ratio: 0.2
  exploitation_ratio: 0.7

# Evaluator configuration
evaluator:
  timeout: 60
  cascade_evaluation: true
  cascade_thresholds: [0.5, 0.75]
  parallel_evaluations: 4
  use_llm_feedback: false

# Evolution settings
diff_based_evolution: true
allow_full_rewrites: false

# Add or modify this in config.yaml
max_program_length: 55000  # Increase from default 10000