| | |
| | """ |
| | Quick Demo: AlphaEvolve Optimized Attention |
| | |
| | Runs a quick demo showing performance differences. |
| | """ |
| |
|
| | import os |
| | import subprocess |
| |
|
| |
|
| | def main(): |
| | print("π AlphaEvolve MLX Attention Demo") |
| | print("=" * 40) |
| |
|
| | |
| | try: |
| | import mlx |
| | import mlx_lm |
| |
|
| | print("β
Dependencies available") |
| | except ImportError as e: |
| | print(f"β Missing: {e}") |
| | print(" Run: pip install -r requirements.txt") |
| | return |
| |
|
| | |
| | locations = ["openevolve_output/best/best_program.py", "best_program.py"] |
| | found = any(os.path.exists(loc) for loc in locations) |
| |
|
| | if not found: |
| | print("β No optimized program found!") |
| | print(" Please run AlphaEvolve first.") |
| | return |
| |
|
| | print(f"β
Found optimized program") |
| |
|
| | |
| | tests = [ |
| | ("Quick test", "The future of AI is", 500), |
| | ("Code generation", "def quicksort(arr):", 800), |
| | ("Reasoning", "To solve this step by step", 1600), |
| | ] |
| |
|
| | print(f"\nRunning {len(tests)} comparison tests...\n") |
| |
|
| | for i, (name, prompt, tokens) in enumerate(tests, 1): |
| | print(f"Test {i}/{len(tests)}: {name}") |
| | print(f"Prompt: '{prompt}'") |
| | print("-" * 30) |
| |
|
| | cmd = [ |
| | "python", |
| | "test_optimized_attention.py", |
| | "--prompt", |
| | prompt, |
| | "--max-tokens", |
| | str(tokens), |
| | ] |
| |
|
| | try: |
| | subprocess.run(cmd, check=True) |
| | print("β
Test completed") |
| | except subprocess.CalledProcessError: |
| | print("β Test failed") |
| | except KeyboardInterrupt: |
| | print("\nβ οΈ Demo interrupted") |
| | break |
| |
|
| | if i < len(tests): |
| | print("\n" + "=" * 40 + "\n") |
| |
|
| | print("\nπ― Demo completed!") |
| | print("π‘ Run individual tests: python test_optimized_attention.py --prompt 'Your prompt'") |
| |
|
| |
|
| | if __name__ == "__main__": |
| | main() |
| |
|