| { | |
| "problem_name": "ragged_attention", | |
| "description": "Triton kernel optimization problem for high-performance ragged attention computation", | |
| "requirements": { | |
| "cuda_backend": true, | |
| "gpu_required": true, | |
| "triton_version": ">=2.1.0", | |
| "torch_version": ">=2.0.0" | |
| }, | |
| "evaluation": { | |
| "timeout_seconds": 300, | |
| "memory_limit_mb": 8192, | |
| "gpu_memory_limit_mb": 4096 | |
| }, | |
| "metadata": { | |
| "M_list": [512, 1024], | |
| "N": 1024, | |
| "Dq": 64, | |
| "Dv": 64, | |
| "len_min_ratio": 0.25 | |
| } | |
| } | |