|
|
|
|
|
""" |
|
|
Simple Test Script for Optimized MLX Attention |
|
|
|
|
|
This script demonstrates how to monkey patch the official mlx-lm library |
|
|
with the AlphaEvolve optimized attention kernel and shows the performance |
|
|
difference on a test prompt. |
|
|
|
|
|
Usage: |
|
|
python test_optimized_attention.py [path_to_best_program.py] |
|
|
|
|
|
If no path is provided, it will use the default best_program.py from |
|
|
openevolve_output/best/ |
|
|
""" |
|
|
|
|
|
import os |
|
|
import sys |
|
|
import time |
|
|
import argparse |
|
|
import subprocess |
|
|
import tempfile |
|
|
from typing import Optional, Dict, Any |
|
|
import traceback |
|
|
|
|
|
|
|
|
def find_best_program() -> Optional[str]: |
|
|
"""Find the best_program.py file in the expected location""" |
|
|
|
|
|
default_path = os.path.join( |
|
|
os.path.dirname(__file__), "openevolve_output", "best", "best_program.py" |
|
|
) |
|
|
|
|
|
if os.path.exists(default_path): |
|
|
return default_path |
|
|
|
|
|
|
|
|
alternatives = [ |
|
|
"best_program.py", |
|
|
"openevolve_output/best/best_program.py", |
|
|
"../best_program.py", |
|
|
] |
|
|
|
|
|
for alt in alternatives: |
|
|
if os.path.exists(alt): |
|
|
return alt |
|
|
|
|
|
return None |
|
|
|
|
|
|
|
|
def load_custom_attention_class(program_path: str): |
|
|
"""Load the CustomGQAAttention class from the evolved program""" |
|
|
print(f"π Loading optimized attention from: {program_path}") |
|
|
|
|
|
try: |
|
|
|
|
|
with open(program_path, "r") as f: |
|
|
program_text = f.read() |
|
|
|
|
|
|
|
|
import mlx.core as mx |
|
|
import mlx.nn as nn |
|
|
import numpy as np |
|
|
from typing import Optional, Tuple, Any |
|
|
|
|
|
exec_globals = { |
|
|
"__builtins__": __builtins__, |
|
|
"mx": mx, |
|
|
"nn": nn, |
|
|
"np": np, |
|
|
"time": time, |
|
|
"Optional": Optional, |
|
|
"Tuple": Tuple, |
|
|
"Any": Any, |
|
|
} |
|
|
|
|
|
|
|
|
try: |
|
|
exec_globals["mlx_lm"] = __import__("mlx_lm") |
|
|
except ImportError: |
|
|
print("β οΈ Could not import mlx_lm, RoPE may not work") |
|
|
|
|
|
|
|
|
exec(program_text, exec_globals) |
|
|
|
|
|
|
|
|
custom_class = exec_globals.get("CustomGQAAttention") |
|
|
if custom_class is None: |
|
|
raise ValueError("CustomGQAAttention class not found in program") |
|
|
|
|
|
print("β
Successfully loaded CustomGQAAttention class") |
|
|
return custom_class |
|
|
|
|
|
except Exception as e: |
|
|
print(f"β Failed to load custom attention: {e}") |
|
|
traceback.print_exc() |
|
|
return None |
|
|
|
|
|
|
|
|
def apply_monkey_patch(custom_attention_class): |
|
|
"""Apply monkey patch to replace Qwen3 attention with custom implementation""" |
|
|
print("π§ Applying monkey patch to mlx-lm...") |
|
|
|
|
|
try: |
|
|
import mlx_lm.models.qwen3 as qwen3_module |
|
|
|
|
|
|
|
|
original_attention = qwen3_module.Attention |
|
|
|
|
|
|
|
|
qwen3_module.Attention = custom_attention_class |
|
|
|
|
|
print("β
Successfully applied monkey patch") |
|
|
return original_attention |
|
|
|
|
|
except ImportError as e: |
|
|
print(f"β Could not import mlx_lm.models.qwen3: {e}") |
|
|
print(" Make sure mlx-lm is installed: pip install mlx-lm") |
|
|
return None |
|
|
except Exception as e: |
|
|
print(f"β Failed to apply monkey patch: {e}") |
|
|
return None |
|
|
|
|
|
|
|
|
def remove_monkey_patch(original_attention): |
|
|
"""Remove the monkey patch and restore original attention""" |
|
|
if original_attention is None: |
|
|
return |
|
|
|
|
|
try: |
|
|
import mlx_lm.models.qwen3 as qwen3_module |
|
|
|
|
|
qwen3_module.Attention = original_attention |
|
|
print("β
Removed monkey patch") |
|
|
except ImportError: |
|
|
pass |
|
|
|
|
|
|
|
|
def run_mlx_lm_generation( |
|
|
prompt: str, |
|
|
max_tokens: int = 1000, |
|
|
model: str = "mlx-community/Qwen3-0.6B-bf16", |
|
|
debug: bool = False, |
|
|
) -> Dict[str, Any]: |
|
|
"""Run mlx-lm generation and parse the output""" |
|
|
print(f"π§ͺ Running generation with prompt: '{prompt[:50]}...'") |
|
|
|
|
|
try: |
|
|
|
|
|
cmd = [ |
|
|
"python", |
|
|
"-m", |
|
|
"mlx_lm", |
|
|
"generate", |
|
|
"--model", |
|
|
model, |
|
|
"--prompt", |
|
|
prompt, |
|
|
"--max-tokens", |
|
|
str(max_tokens), |
|
|
"--temp", |
|
|
"0.1", |
|
|
] |
|
|
|
|
|
if debug: |
|
|
print(f"π§ Running command: {' '.join(cmd)}") |
|
|
|
|
|
|
|
|
start_time = time.perf_counter() |
|
|
result = subprocess.run(cmd, capture_output=True, text=True, timeout=120) |
|
|
end_time = time.perf_counter() |
|
|
|
|
|
if debug: |
|
|
print(f"π€ Command output:") |
|
|
print(f"Return code: {result.returncode}") |
|
|
print(f"STDOUT length: {len(result.stdout)}") |
|
|
print(f"STDERR length: {len(result.stderr)}") |
|
|
if result.stdout: |
|
|
print("First 500 chars of stdout:") |
|
|
print(result.stdout[:500]) |
|
|
if result.stderr: |
|
|
print("STDERR:") |
|
|
print(result.stderr[:500]) |
|
|
|
|
|
if result.returncode != 0: |
|
|
print(f"β Generation failed with return code {result.returncode}") |
|
|
if result.stderr: |
|
|
print(f"Error: {result.stderr[:200]}") |
|
|
return {"success": False, "error": result.stderr} |
|
|
|
|
|
|
|
|
output_lines = result.stdout.strip().split("\n") |
|
|
|
|
|
prompt_tokens = 0 |
|
|
generation_tokens = 0 |
|
|
prompt_speed = 0.0 |
|
|
generation_speed = 0.0 |
|
|
peak_memory = 0.0 |
|
|
generated_text = "" |
|
|
|
|
|
|
|
|
capture_text = False |
|
|
found_prompt_stats = False |
|
|
found_generation_stats = False |
|
|
|
|
|
for line in output_lines: |
|
|
if debug: |
|
|
print(f"Parsing line: {line[:100]}") |
|
|
|
|
|
if line.startswith("=========="): |
|
|
capture_text = True |
|
|
continue |
|
|
elif ( |
|
|
capture_text |
|
|
and line.strip() |
|
|
and not line.startswith("Prompt:") |
|
|
and not line.startswith("Generation:") |
|
|
and not line.startswith("Peak memory:") |
|
|
): |
|
|
generated_text += line + "\n" |
|
|
elif "Prompt:" in line and "tokens-per-sec" in line: |
|
|
try: |
|
|
|
|
|
parts = line.split(",") |
|
|
prompt_tokens = int(parts[0].split(":")[1].strip().split()[0]) |
|
|
prompt_speed = float(parts[1].strip().split()[0]) |
|
|
found_prompt_stats = True |
|
|
if debug: |
|
|
print(f"Found prompt stats: {prompt_tokens} tokens, {prompt_speed} tok/sec") |
|
|
except (ValueError, IndexError) as e: |
|
|
if debug: |
|
|
print(f"Failed to parse prompt line: {e}") |
|
|
elif "Generation:" in line and "tokens-per-sec" in line: |
|
|
try: |
|
|
|
|
|
parts = line.split(",") |
|
|
generation_tokens = int(parts[0].split(":")[1].strip().split()[0]) |
|
|
generation_speed = float(parts[1].strip().split()[0]) |
|
|
found_generation_stats = True |
|
|
if debug: |
|
|
print( |
|
|
f"Found generation stats: {generation_tokens} tokens, {generation_speed} tok/sec" |
|
|
) |
|
|
except (ValueError, IndexError) as e: |
|
|
if debug: |
|
|
print(f"Failed to parse generation line: {e}") |
|
|
elif "Peak memory:" in line: |
|
|
try: |
|
|
memory_str = line.split(":")[1].strip() |
|
|
if "GB" in memory_str: |
|
|
peak_memory = float(memory_str.replace("GB", "").strip()) |
|
|
elif "MB" in memory_str: |
|
|
peak_memory = float(memory_str.replace("MB", "").strip()) / 1024 |
|
|
if debug: |
|
|
print(f"Found memory: {peak_memory} GB") |
|
|
except (ValueError, IndexError) as e: |
|
|
if debug: |
|
|
print(f"Failed to parse memory line: {e}") |
|
|
|
|
|
|
|
|
if not found_generation_stats or generation_tokens == 0: |
|
|
print("β οΈ No generation statistics found in output") |
|
|
if debug: |
|
|
print(f"found_prompt_stats: {found_prompt_stats}") |
|
|
print(f"found_generation_stats: {found_generation_stats}") |
|
|
print(f"generation_tokens: {generation_tokens}") |
|
|
print("Full output for debugging:") |
|
|
print(result.stdout) |
|
|
return {"success": False, "error": "No generation statistics found"} |
|
|
|
|
|
result_dict = { |
|
|
"success": True, |
|
|
"prompt_tokens": prompt_tokens, |
|
|
"generation_tokens": generation_tokens, |
|
|
"prompt_speed": prompt_speed, |
|
|
"generation_speed": generation_speed, |
|
|
"peak_memory": peak_memory, |
|
|
"total_time": end_time - start_time, |
|
|
"generated_text": generated_text.strip(), |
|
|
"full_output": result.stdout, |
|
|
} |
|
|
|
|
|
if debug: |
|
|
print(f"Parsed result: {result_dict}") |
|
|
|
|
|
return result_dict |
|
|
|
|
|
except subprocess.TimeoutExpired: |
|
|
print("β° Generation timed out after 120 seconds") |
|
|
return {"success": False, "error": "Timeout"} |
|
|
except Exception as e: |
|
|
print(f"β Generation failed: {e}") |
|
|
if debug: |
|
|
traceback.print_exc() |
|
|
return {"success": False, "error": str(e)} |
|
|
|
|
|
|
|
|
def run_comparison_test( |
|
|
prompt: str, custom_attention_class, max_tokens: int = 1000, debug: bool = False |
|
|
): |
|
|
"""Run comparison test between standard and optimized attention""" |
|
|
print(f"\n{'='*60}") |
|
|
print("π¬ ATTENTION COMPARISON TEST") |
|
|
print(f"{'='*60}") |
|
|
print(f"Prompt: {prompt}") |
|
|
print(f"Max tokens: {max_tokens}") |
|
|
print() |
|
|
|
|
|
|
|
|
print("π Testing STANDARD attention...") |
|
|
standard_result = run_mlx_lm_generation(prompt, max_tokens, debug=debug) |
|
|
|
|
|
if not standard_result.get("success", False): |
|
|
print("β Standard attention test failed") |
|
|
if debug and "error" in standard_result: |
|
|
print(f" Error: {standard_result['error']}") |
|
|
print("\nπ§ Troubleshooting tips:") |
|
|
print(" β’ Check that mlx-lm is installed: pip install mlx-lm") |
|
|
print(" β’ Try a shorter prompt or fewer tokens") |
|
|
print(" β’ Run with --debug flag for more info") |
|
|
print(" β’ Check if the model downloads successfully") |
|
|
return |
|
|
|
|
|
print(f"β
Standard Results:") |
|
|
print(f" Decode Speed: {standard_result['generation_speed']:.1f} tokens/sec") |
|
|
print(f" Memory Usage: {standard_result['peak_memory']:.2f} GB") |
|
|
print(f" Total Time: {standard_result['total_time']:.2f} seconds") |
|
|
print(f" Generated: {standard_result['generation_tokens']} tokens") |
|
|
|
|
|
|
|
|
if standard_result["generation_tokens"] == 0: |
|
|
print("β οΈ Warning: Standard attention generated 0 tokens") |
|
|
print(" This might indicate an issue with the model or prompt") |
|
|
print(" Generated text preview:") |
|
|
print(f" '{standard_result['generated_text'][:100]}'") |
|
|
|
|
|
|
|
|
try: |
|
|
response = input("\nβ Continue with optimized test anyway? (y/n): ").lower() |
|
|
if response != "y": |
|
|
print("Test cancelled") |
|
|
return |
|
|
except KeyboardInterrupt: |
|
|
print("\nTest cancelled") |
|
|
return |
|
|
|
|
|
|
|
|
original_attention = apply_monkey_patch(custom_attention_class) |
|
|
if original_attention is None: |
|
|
print("β Failed to apply monkey patch") |
|
|
return |
|
|
|
|
|
try: |
|
|
|
|
|
print("\nπ Testing OPTIMIZED attention...") |
|
|
optimized_result = run_mlx_lm_generation(prompt, max_tokens, debug=debug) |
|
|
|
|
|
if not optimized_result.get("success", False): |
|
|
print("β Optimized attention test failed") |
|
|
if debug and "error" in optimized_result: |
|
|
print(f" Error: {optimized_result['error']}") |
|
|
return |
|
|
|
|
|
print(f"β
Optimized Results:") |
|
|
print(f" Decode Speed: {optimized_result['generation_speed']:.1f} tokens/sec") |
|
|
print(f" Memory Usage: {optimized_result['peak_memory']:.2f} GB") |
|
|
print(f" Total Time: {optimized_result['total_time']:.2f} seconds") |
|
|
print(f" Generated: {optimized_result['generation_tokens']} tokens") |
|
|
|
|
|
|
|
|
if standard_result["generation_speed"] > 0: |
|
|
speed_improvement = ( |
|
|
(optimized_result["generation_speed"] - standard_result["generation_speed"]) |
|
|
/ standard_result["generation_speed"] |
|
|
) * 100 |
|
|
else: |
|
|
speed_improvement = 0.0 |
|
|
print("β οΈ Cannot calculate speed improvement (standard speed was 0)") |
|
|
|
|
|
memory_change = optimized_result["peak_memory"] - standard_result["peak_memory"] |
|
|
|
|
|
if standard_result["total_time"] > 0: |
|
|
time_improvement = ( |
|
|
(standard_result["total_time"] - optimized_result["total_time"]) |
|
|
/ standard_result["total_time"] |
|
|
) * 100 |
|
|
else: |
|
|
time_improvement = 0.0 |
|
|
|
|
|
print(f"\nπ PERFORMANCE COMPARISON:") |
|
|
if standard_result["generation_speed"] > 0: |
|
|
print(f" Speed Improvement: {speed_improvement:+.1f}%") |
|
|
else: |
|
|
print( |
|
|
f" Speed Comparison: {standard_result['generation_speed']:.1f} β {optimized_result['generation_speed']:.1f} tokens/sec" |
|
|
) |
|
|
print(f" Memory Change: {memory_change:+.2f} GB") |
|
|
print(f" Time Improvement: {time_improvement:+.1f}%") |
|
|
|
|
|
if speed_improvement > 5: |
|
|
print("π― SIGNIFICANT IMPROVEMENT achieved!") |
|
|
elif speed_improvement > 0: |
|
|
print("π Modest improvement achieved") |
|
|
elif standard_result["generation_speed"] == 0 and optimized_result["generation_speed"] > 0: |
|
|
print("π₯ Optimized version works where standard failed!") |
|
|
else: |
|
|
print("β οΈ No improvement or regression") |
|
|
|
|
|
|
|
|
print(f"\nπ GENERATED TEXT COMPARISON:") |
|
|
std_text = ( |
|
|
standard_result["generated_text"][:200] |
|
|
if standard_result["generated_text"] |
|
|
else "[No text generated]" |
|
|
) |
|
|
opt_text = ( |
|
|
optimized_result["generated_text"][:200] |
|
|
if optimized_result["generated_text"] |
|
|
else "[No text generated]" |
|
|
) |
|
|
|
|
|
print(f"Standard: {std_text}...") |
|
|
print(f"Optimized: {opt_text}...") |
|
|
|
|
|
if standard_result["generated_text"] and optimized_result["generated_text"]: |
|
|
if standard_result["generated_text"][:100] == optimized_result["generated_text"][:100]: |
|
|
print("β
Generated text is identical (good!)") |
|
|
else: |
|
|
print("β οΈ Generated text differs (check randomness/temperature)") |
|
|
elif not standard_result["generated_text"] and not optimized_result["generated_text"]: |
|
|
print("β οΈ Both versions generated no text") |
|
|
else: |
|
|
print("βΉοΈ Different text generation behavior") |
|
|
|
|
|
finally: |
|
|
|
|
|
remove_monkey_patch(original_attention) |
|
|
|
|
|
|
|
|
def main(): |
|
|
parser = argparse.ArgumentParser(description="Test optimized MLX attention kernel") |
|
|
parser.add_argument("program_path", nargs="?", help="Path to best_program.py") |
|
|
parser.add_argument( |
|
|
"--prompt", default="The future of artificial intelligence is", help="Test prompt" |
|
|
) |
|
|
parser.add_argument("--max-tokens", type=int, default=100, help="Maximum tokens to generate") |
|
|
parser.add_argument("--model", default="mlx-community/Qwen3-0.6B-bf16", help="Model to use") |
|
|
parser.add_argument("--debug", action="store_true", help="Enable debug output") |
|
|
|
|
|
args = parser.parse_args() |
|
|
|
|
|
|
|
|
if args.program_path: |
|
|
program_path = args.program_path |
|
|
else: |
|
|
program_path = find_best_program() |
|
|
|
|
|
if not program_path or not os.path.exists(program_path): |
|
|
print("β Could not find best_program.py") |
|
|
print(" Please provide the path to the optimized program:") |
|
|
print(" python test_optimized_attention.py path/to/best_program.py") |
|
|
print("\n Or make sure you have run AlphaEvolve and have results in:") |
|
|
print(" openevolve_output/best/best_program.py") |
|
|
sys.exit(1) |
|
|
|
|
|
print("π MLX Optimized Attention Tester") |
|
|
print(f"Using program: {program_path}") |
|
|
print(f"Model: {args.model}") |
|
|
if args.debug: |
|
|
print("π Debug mode enabled") |
|
|
|
|
|
|
|
|
custom_attention_class = load_custom_attention_class(program_path) |
|
|
if custom_attention_class is None: |
|
|
sys.exit(1) |
|
|
|
|
|
|
|
|
try: |
|
|
import mlx_lm |
|
|
|
|
|
print("β
mlx-lm is available") |
|
|
except ImportError: |
|
|
print("β mlx-lm is not installed") |
|
|
print(" Please install it: pip install mlx-lm") |
|
|
sys.exit(1) |
|
|
|
|
|
|
|
|
run_comparison_test(args.prompt, custom_attention_class, args.max_tokens, debug=args.debug) |
|
|
|
|
|
print(f"\n{'='*60}") |
|
|
print("β
Test completed!") |
|
|
print("π‘ To test with a different prompt:") |
|
|
print(f" python {sys.argv[0]} --prompt 'Your custom prompt here'") |
|
|
print("π‘ For debugging: add --debug flag") |
|
|
print("π‘ For help: python test_optimized_attention.py --help") |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|