{ "problem_name": "decoding_attn", "description": "Triton kernel optimization problem for high-performance Decoding Attention computation", "requirements": { "cuda_backend": true, "gpu_required": true, "triton_version": ">=2.1.0", "torch_version": ">=2.0.0" }, "evaluation": { "timeout_seconds": 300, "memory_limit_mb": 8192, "gpu_memory_limit_mb": 4096 }, "metadata": { "Z": 1, "H": 8, "M": 1, "Dq": 64, "Dv": 64, "N_list": [1024, 2048, 4096, 8192] } }