Sequence
int64 1
25.2k
| Time
int64 1
858M
| File
stringclasses 830
values | RangeOffset
int64 0
2.21M
| RangeLength
int64 0
168k
| Text
stringlengths 1
4.7M
⌀ | Language
stringclasses 20
values | Type
stringclasses 9
values |
|---|---|---|---|---|---|---|---|
180
| 1,135,954
|
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
| 32,836
| 9
|
is_causal
|
python
|
selection_mouse
|
181
| 1,165,080
|
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
| 32,836
| 3,599
|
is_causal: If true, causal attention will be applied. Note, some\n implementations like `xla` will generate a mask tensor and apply it to the\n logits to mask out the non-causal parts of the attention matrix, but other\n implementations like `cudnn` will avoid computing the non-causal regions,\n providing speedups.\n query_seq_lengths: `int32` array of sequence lengths for query; shape\n :code:`(B)`\n key_value_seq_lengths: `int32` array of sequence lengths for key and value;\n shape :code:`(B)`\n local_window_size: Window sizes to make self attention to attend to each\n token's local window. If set, this specifies the (left_window_size,\n right_window_size) for each token. E.g., if local_window_size == (3, 2)\n and the sequence is [0, 1, 2, 3, 4, 5, c, 7, 8, 9], token `c` can attend\n to [3, 4, 5, c, 7, 8]. If a single int is given, it will be interpreted as\n a symmetric window (window_size, window_size).\n implementation: A string to control which implementation backend to use.\n Supported strings are `xla`, `cudnn` (cuDNN flash attention). It defaults\n to `None`, which will automatically select the best available backend.\n Note, `cudnn` supports only a subset of shapes/dtypes, and an exception\n will be thrown if its not supported.\n\n Returns:\n An array of the attention output with the same shape as :code:`query`.\n """\n output_shape = jnp.asarray(query).shape\n def _ensure_4d(t):\n t = jnp.asarray(t)\n dims_to_add = 4 - t.ndim\n if dims_to_add > 0:\n return jnp.expand_dims(t, axis=tuple(range(dims_to_add)))\n return t\n\n query_arr = _ensure_4d(query)\n key_arr = _ensure_4d(key)\n value_arr = _ensure_4d(value)\n bias = _ensure_4d(bias) if bias is not None else None\n mask = _ensure_4d(mask) if mask is not None else None\n if query_seq_lengths is not None:\n query_seq_lengths = jnp.asarray(query_seq_lengths)\n if key_value_seq_lengths is not None:\n key_value_seq_lengths = jnp.asarray(key_value_seq_lengths)\n if isinstance(local_window_size, int):\n local_window_size = (local_window_size, local_window_size)\n\n def _check_shape_and_dtype(t: Array | None, shape: Sequence[int],\n dtype: DType | None, name: str) -> None:\n if t is None:\n return\n if t.ndim != len(shape):\n raise ValueError(f"{name} ndim should be {len(shape)}, but got {t.ndim}")\n if dtype is not None and t.dtype != dtype:\n raise ValueError(f"{name} dtype should be {dtype}, but got {t.dtype}")\n for i in range(t.ndim):\n if shape[i] != -1 and t.shape[i] != shape[i]:\n raise ValueError(f"{name} shape should be {shape}: but got {t.shape}")\n\n B, S, K, H = key_arr.shape\n _check_shape_and_dtype(value_arr, [B, S, K, H], key_arr.dtype, 'value')\n _check_shape_and_dtype(query_arr, [B, -1, -1, H], key_arr.dtype, 'query')\n _check_shape_and_dtype(mask, [-1] * 4, jnp.bool_, 'mask')\n _check_shape_and_dtype(bias, [-1] * 4, None, 'bias')\n _check_shape_and_dtype(query_seq_lengths, [B], jnp.int32,\n 'query_seq_lengths')\n _check_shape_and_dtype(key_value_seq_lengths, [B], jnp.int32,\n 'key_value_seq_lengths')\n if query_arr.shape[-2] % K != 0:\n raise ValueError(f"The number of query heads must be a multiple of "\n f"key/value heads, but got {query_arr.shape[-2]} vs {K}")\n\n scale_val = (1.0 / np.sqrt(H)) if scale is None else scale\n\n match implementation:\n case 'xla':\n out = _dot_product_attention_xla(\n query_arr, key_arr, value_arr, bias, mask, i
|
python
|
selection_command
|
182
| 1,166,275
|
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
| 36,434
| 0
| null |
python
|
selection_command
|
183
| 1,171,382
|
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
| 36,380
| 0
| null |
python
|
selection_mouse
|
184
| 1,171,383
|
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
| 36,379
| 0
| null |
python
|
selection_command
|
185
| 1,171,541
|
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
| 36,380
| 0
| null |
python
|
selection_mouse
|
186
| 1,171,594
|
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
| 36,379
| 0
| null |
python
|
selection_command
|
187
| 1,172,370
|
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
| 36,429
| 0
| null |
python
|
selection_mouse
|
188
| 1,172,491
|
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
| 36,428
| 4
|
mask
|
python
|
selection_mouse
|
189
| 1,173,009
|
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
| 36,439
| 0
| null |
python
|
selection_mouse
|
190
| 1,173,154
|
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
| 36,434
| 9
|
is_causal
|
python
|
selection_mouse
|
191
| 1,176,818
|
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
| 36,437
| 0
| null |
python
|
selection_mouse
|
192
| 1,176,819
|
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
| 36,434
| 9
|
is_causal
|
python
|
selection_mouse
|
193
| 1,181,016
|
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
| 36,615
| 0
| null |
python
|
selection_mouse
|
194
| 1,181,379
|
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
| 36,618
| 0
| null |
python
|
selection_mouse
|
195
| 1,181,506
|
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
| 36,618
| 5
|
cudnn
|
python
|
selection_mouse
|
196
| 1,182,336
|
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
| 36,742
| 0
| null |
python
|
selection_mouse
|
197
| 1,182,478
|
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
| 36,737
| 7
|
asarray
|
python
|
selection_mouse
|
198
| 1,183,274
|
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
| 36,741
| 0
| null |
python
|
selection_mouse
|
199
| 1,183,716
|
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
| 36,858
| 0
| null |
python
|
selection_mouse
|
200
| 1,183,761
|
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
| 36,857
| 0
| null |
python
|
selection_command
|
201
| 1,184,889
|
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
| 36,439
| 0
| null |
python
|
selection_mouse
|
202
| 1,186,080
|
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
| 36,444
| 0
| null |
python
|
selection_command
|
203
| 1,187,108
|
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
| 37,189
| 0
| null |
python
|
selection_command
|
204
| 1,210,175
|
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
| 37,255
| 0
| null |
python
|
selection_command
|
205
| 1,211,084
|
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
| 38,286
| 0
| null |
python
|
selection_command
|
206
| 1,215,718
|
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
| 37,868
| 0
| null |
python
|
selection_mouse
|
207
| 1,215,875
|
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
| 37,849
| 27
|
cudnn_dot_product_attention
|
python
|
selection_mouse
|
208
| 1,217,360
|
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
| 37,927
| 0
| null |
python
|
selection_mouse
|
209
| 1,217,517
|
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
| 37,925
| 4
|
mask
|
python
|
selection_mouse
|
210
| 1,221,234
|
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
| 37,926
| 0
| null |
python
|
selection_mouse
|
211
| 1,221,234
|
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
| 37,925
| 4
|
mask
|
python
|
selection_mouse
|
212
| 1,222,944
|
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
| 37,280
| 0
| null |
python
|
selection_mouse
|
213
| 1,223,075
|
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
| 37,274
| 9
|
mask_type
|
python
|
selection_mouse
|
214
| 1,224,509
|
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
| 38,015
| 0
| null |
python
|
selection_mouse
|
215
| 1,224,646
|
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
| 38,010
| 9
|
mask_type
|
python
|
selection_mouse
|
216
| 1,225,347
|
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
| 37,925
| 0
| null |
python
|
selection_mouse
|
217
| 1,225,491
|
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
| 37,925
| 4
|
mask
|
python
|
selection_mouse
|
218
| 1,225,990
|
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
| 38,006
| 0
| null |
python
|
selection_mouse
|
219
| 1,226,174
|
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
| 38,000
| 9
|
mask_type
|
python
|
selection_mouse
|
220
| 1,226,742
|
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
| 37,926
| 0
| null |
python
|
selection_mouse
|
221
| 1,228,504
|
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
| 37,869
| 0
| null |
python
|
selection_mouse
|
222
| 1,228,986
|
.venv/lib/python3.10/site-packages/jax/_src/cudnn/fused_attention_stablehlo.py
| 0
| 0
|
# Copyright 2024 The JAX Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport enum\nimport functools\nimport json\nimport math\nfrom typing import TypedDict\n\nimport jax\nfrom jax import dtypes\nfrom jax._src import core\nfrom jax._src import dispatch\nfrom jax._src.custom_partitioning import custom_partitioning\nfrom jax._src.interpreters import batching\nfrom jax._src.interpreters import mlir\nfrom jax._src.lib import cuda_versions\nfrom jax._src import xla_bridge\nfrom jax._src.lib.mlir import ir\nfrom jax._src.lib.mlir.dialects import hlo\nimport jax.numpy as jnp\nfrom jax.sharding import NamedSharding, PartitionSpec\n\nArray = jnp.ndarray\n\nclass FP8Params(TypedDict):\n amax_dQ: float # Amax of gradient of query\n amax_dK: float # Amax of gradient of key\n amax_dV: float # Amax of gradient of value\n amax_dP: float # Amax of gradient of state\n descale_q: float # Descaling factor of query\n descale_k: float # Descaling factor of key\n descale_v: float # Descaling factor of value\n descale_s: float # Descaling factor of attention score\n scale_s: float # Scale factor for S tensor\n scale_o: float # Scale factor for output\n descale_o: float # Descale factor for output (bwd)\n descale_dO: float # Descale factor for output gradient (bwd)\n descale_dP: float # Descale factor for P gradient tensor (bwd)\n scale_dQ: float # Scale factor for query gradient (bwd)\n scale_dK: float # Scale factor for key gradient (bwd)\n scale_dV: float # Scale factor for value gradient (bwd)\n scale_dP: float # Scale factor for state gradient (bwd)\n\n\nclass AttentionLayout(enum.Enum):\n BTNH = 0\n BNTH = 1\n\n\nclass MaskType(enum.Enum):\n NO_MASK = 0\n PADDING = 1\n CAUSAL = 2\n PADDING_CAUSAL = 3\n ALIBI = 4\n\n\ndef convert_mask_type_to_string(mask_type: MaskType) -> str:\n if mask_type == MaskType.NO_MASK:\n return "NO_MASK"\n elif mask_type == MaskType.PADDING:\n return "PADDING"\n elif mask_type == MaskType.CAUSAL:\n return "CAUSAL"\n elif mask_type == MaskType.PADDING_CAUSAL:\n return "PADDING_CAUSAL"\n elif mask_type == MaskType.ALIBI:\n return "ALIBI"\n else:\n raise ValueError(f"Unexpected mask type: {mask_type}")\n\ndef has_padding(mask_type: MaskType) -> bool:\n return mask_type == MaskType.PADDING or mask_type == MaskType.PADDING_CAUSAL\n\ndef should_export_dbias(bias_shape, query_shape, layout) -> bool:\n b_B, b_N, _, _ = bias_shape\n if layout == AttentionLayout.BNTH.value:\n _, q_N, _, _ = query_shape\n else:\n _, _, q_N, _ = query_shape\n return b_B == 1 and b_N == q_N\n\ndef get_large_negative_number(dtype):\n # temp WAR as cuDNN has a bug for subtraction between two large negative value\n if dtype == jnp.bfloat16:\n return jnp.asarray(-2 << 40, dtype=dtype)\n elif dtype == jnp.float16:\n return jnp.asarray(-2 << 14, dtype=dtype)\n else:\n raise ValueError("Unsupported dtype for inputs.")\n\ndef _normalize_layout(layout: str) -> AttentionLayout:\n layout_upper = layout.upper()\n if layout_upper in ["BSNH", "BNSH", "BTNH", "BNTH"]:\n return AttentionLayout[layout_upper.replace("S", "T")]\n else:\n raise ValueError(f"Unsupported qkv_layout: {layout}")\n\ndef element_type_to_backend_config_type_mapping(dtype):\n _element_type_to_backend_config_type_mapping = {\n ir.BF16Type.get(): "BF16",\n ir.F16Type.get(): "F16",\n }\n return _element_type_to_backend_config_type_mapping[dtype]\n\ndef default_layouts(*shapes):\n return [range(len(shape) - 1, -1, -1) for shape in shapes]\n\ndef get_max_seg_per_batch(q_offsets):\n return q_offsets.shape[1] - 1 if len(q_offsets.shape) == 2 else 1\n\ndef check_is_paged_attention(page_table_k):\n return len(page_table_k.shape) == 4\n\ndef create_dot_product_attention_backend_config_base(\n batch, num_heads, seq_q, seq_kv, dtype, fmha_scale, mask_type, layout, is_bwd\n):\n # Q, K, V: query, key, value in shape of BT(S)NH or BNT(S)H\n # P: BMM1 output in shape of BNTS\n # O: BMM2 output in the same shape with Q\n # BMM1: Q @ K -> P\n # BMM2: P @ V -> O\n # BMM1Grad1: dP @ Q -> dK\n # BMM1Grad2: dP @ K -> dQ\n # BMM2Grad1: P @ dO -> dV\n # BMM2Grad2: dO @ V -> dP\n cudnn_fmha_backend_config = {\n "algorithm": {\n "algo_id": "0",\n "math_type": "TENSOR_OP_MATH",\n "tuning_knobs": {"17": "1", "24": "0"},\n "is_cudnn_frontend": True,\n "workspace_size": "0",\n },\n "fmha_scale": fmha_scale,\n "intermediate_tensor_shape": {\n "element_type": element_type_to_backend_config_type_mapping(dtype),\n "dimensions": [str(batch), str(num_heads), str(seq_q), str(seq_kv)],\n "tuple_shapes": [],\n "layout": {\n "dim_level_types": [],\n "dim_unique": [],\n "dim_ordered": [],\n "minor_to_major": ["3", "2", "1", "0"],\n "tiles": [],\n "element_size_in_bits": "0",\n "memory_space": "0",\n "index_primitive_type": "PRIMITIVE_TYPE_INVALID",\n "pointer_primitive_type": "PRIMITIVE_TYPE_INVALID",\n "dynamic_shape_metadata_prefix_bytes": "0",\n },\n "is_dynamic_dimension": [False, False, False, False],\n },\n "is_flash_attention": True,\n "mask_type": convert_mask_type_to_string(mask_type),\n }\n\n # We define the contracting and batch dims in the format of\n # ((lhs_contracting_dims, rhs_contracting_dims), (lhs_batch_dims,\n # rhs_batch_dims)).\n if layout == AttentionLayout.BNTH.value:\n dims = [\n ((3, 3), ((0, 1), (0, 1))), # BMM1: BNTH,BNSH->BNTS\n ((3, 2), ((0, 1), (0, 1))), # BMM2: BNTS,BNSH->BNTH\n ((2, 2), ((0, 1), (0, 1))), # BMM1_grad_1: BNTS,BNTH->BNSH\n ((3, 2), ((0, 1), (0, 1))), # BMM1_grad_2: BNTS,BNSH->BNTH\n ((2, 2), ((0, 1), (0, 1))), # BMM2_grad_1: BNTS,BNTH->BNSH\n ((3, 3), ((0, 1), (0, 1))), # BMM2_grad_2: BNTH,BNSH->BNTS\n ]\n else:\n dims = [\n ((3, 3), ((0, 2), (0, 2))), # BMM1: BTNH,BSNH->BNTS\n ((3, 1), ((0, 1), (0, 2))), # BMM2: BNTS,BSNH->BTNH\n ((2, 1), ((0, 1), (0, 2))), # BMM1_grad_1: BNTS,BTNH->BSNH\n ((3, 1), ((0, 1), (0, 2))), # BMM1_grad_2: BNTS,BSNH->BTNH\n ((2, 1), ((0, 1), (0, 2))), # BMM2_grad_1: BNTS,BTNH->BSNH\n ((3, 3), ((0, 2), (0, 2))), # BMM2_grad_2: BTNH,BSNH->BNTS\n ]\n keys = [\n "bmm1_dot_dimension_numbers",\n "bmm2_dot_dimension_numbers",\n "bmm1_grad_gemm1_dot_dimension_numbers",\n "bmm1_grad_gemm2_dot_dimension_numbers",\n "bmm2_grad_gemm1_dot_dimension_numbers",\n "bmm2_grad_gemm2_dot_dimension_numbers",\n ]\n fwd_dot_number = {}\n bwd_dot_number = {}\n for idx, (key, ((lc, rc), (lb, rb))) in enumerate(zip(keys, dims)):\n dims_to_write = fwd_dot_number if idx < 2 else bwd_dot_number\n dims_to_write[key] = {\n "lhs_contracting_dimensions": [str(lc)],\n "rhs_contracting_dimensions": [str(rc)],\n "lhs_batch_dimensions": [str(i) for i in lb],\n "rhs_batch_dimensions": [str(i) for i in rb],\n }\n\n if is_bwd:\n cudnn_fmha_backend_config = {**cudnn_fmha_backend_config, **bwd_dot_number}\n else:\n cudnn_fmha_backend_config = {**cudnn_fmha_backend_config, **fwd_dot_number}\n backend_config = {\n "operation_queue_id":"0",\n "wait_on_operation_queues":[],\n "cudnn_fmha_backend_config": cudnn_fmha_backend_config\n }\n return backend_config\n\ndef create_dot_product_attention_backend_config(\n batch,\n num_heads,\n seq_q,\n seq_kv,\n dtype,\n fmha_scale,\n seed,\n dropout_rate,\n mask_type,\n layout,\n sliding_window_length,\n max_seg_per_batch,\n is_paged_attention,\n is_bwd\n):\n backend_config = create_dot_product_attention_backend_config_base(\n batch, num_heads, seq_q, seq_kv, dtype,\n fmha_scale, mask_type, layout, is_bwd\n )\n if sliding_window_length is None:\n sliding_window_length = 0\n backend_config['cudnn_fmha_backend_config']["dropout_rate"] = dropout_rate\n backend_config['cudnn_fmha_backend_config']["seed"] = seed\n backend_config['cudnn_fmha_backend_config']["sliding_window_length"] = sliding_window_length\n backend_config['cudnn_fmha_backend_config']["max_seg_per_batch"] = max_seg_per_batch\n backend_config['cudnn_fmha_backend_config']["is_paged_attention"] = is_paged_attention\n return json.dumps(backend_config)\n\ndef create_dot_product_attention_fp8_backend_config(\n batch, num_heads, seq_q, seq_kv, dtype, fmha_scale, mask_type, layout, is_bwd):\n backend_config = create_dot_product_attention_backend_config_base(\n batch, num_heads, seq_q, seq_kv, dtype, fmha_scale, mask_type, layout, is_bwd)\n return json.dumps(backend_config)\n\n# mapping from (is_bwd, has_dropout, has_bias) to custom call name\n_custom_name_maps = {\n # fMHA forward call targets.\n (False, False, False, False): "__cudnn$fmhaSoftmax",\n (False, False, True, False): "__cudnn$fmhaScaleBiasSoftmax",\n (False, True, False, False): "__cudnn$fmhaSoftmaxDropout",\n (False, True, True, False): "__cudnn$fmhaScaleBiasSoftmaxDropout",\n (False, False, False, True): "__cudnn$fmhaSoftmaxF8",\n # fMHA backward call targets.\n (True, False, False, False): "__cudnn$fmhaSoftmaxBackward",\n (True, False, True, False): "__cudnn$fmhaScaleBiasSoftmaxBackward",\n (True, True, False, False): "__cudnn$fmhaSoftmaxDropoutBackward",\n (True, True, True, False): "__cudnn$fmhaScaleBiasSoftmaxDropoutBackward",\n (True, False, False, True): "__cudnn$fmhaSoftmaxBackwardF8",\n}\n\ndef get_custom_call_name(has_bias, has_dropout, is_bwd, is_fp8=False):\n return _custom_name_maps[(is_bwd, has_dropout, has_bias, is_fp8)]\n\nget_fp8_custom_call_name = functools.partial(\n get_custom_call_name, has_bias=False, has_dropout=False, is_fp8=True\n)\n\ndef check_layout(query, key, value, bias, q_seqlen, kv_seqlen,\n q_offsets, kv_offsets, page_table_k, page_table_v, layout):\n def check_eq(a, b, c, msg):\n if not (a == b == c):\n raise ValueError(f"{msg} must be same, got {a}, {b}, {b}")\n\n q_rank, k_rank, v_rank = len(query.shape), len(key.shape), len(value.shape)\n if q_rank != 4:\n raise ValueError(f"Q must have a rank of 4, got {q_rank}")\n check_eq(q_rank, k_rank, v_rank, "QKV rank")\n\n q_dtype, k_dtype, v_dtype = query.dtype, key.dtype, value.dtype\n if q_dtype not in [jnp.bfloat16, jnp.float16, jnp.float8_e4m3fn, jnp.float8_e5m2]:\n raise NotImplementedError(f"Q must be fp16/bf16/fp8_e4m3fn/fp8_e5m2, got {q_dtype}")\n check_eq(q_dtype, k_dtype, v_dtype, "QKV dtype")\n\n if layout == AttentionLayout.BNTH:\n qB, qN, qT, qH = query.shape\n kB, kN, kS, kH = key.shape\n vB, vN, vS, vH = value.shape\n else:\n assert layout == AttentionLayout.BTNH\n qB, qT, qN, qH = query.shape\n kB, kS, kN, kH = key.shape\n vB, vS, vN, vH = value.shape\n\n if page_table_k is not None and page_table_v is not None:\n k_blocks, k_block_size = kB, kS\n v_blocks, v_block_size = vB, vS\n kB, _, k_blocks_per_batch, _ = page_table_k.shape\n vB, _, v_blocks_per_batch, _ = page_table_v.shape\n kS = k_blocks_per_batch * k_block_size\n vS = v_blocks_per_batch * v_block_size\n if kB * k_blocks_per_batch != k_blocks:\n raise ValueError(\n f"Key and page_table_k must have same number of blocks, "\n f"got {k_blocks} vs {kB * k_blocks_per_batch}")\n if vB * v_blocks_per_batch != v_blocks:\n raise ValueError(\n f"Value and page_table_v must have same number of blocks, "\n f"got {v_blocks} vs {vB * v_blocks_per_batch}")\n\n check_eq(qB, kB, vB, "QKV batch")\n check_eq(qH, kH, vH, "QKV dim_per_head")\n if kN != vN:\n raise ValueError(f"KV must have same number of heads, got {kN} vs {vN}")\n if kS != vS:\n raise ValueError(f"KV must have same seq length, got {kS} vs {vS}")\n\n # check bias\n if bias is not None:\n _, _, bT, bS = bias.shape\n if bT != qT or bS != vS:\n raise ValueError(\n f"Bias must have same seq length as QKV, got {bT} and {bS}")\n\n # check q_seqlen/kv_seqlen/q_offsets/kv_offsets\n expected_rank = 2 if q_offsets is not None else 1\n def check_seqlen_offsets(tensor, name):\n if tensor is not None:\n dtype = tensor.dtype\n rank = len(tensor.shape)\n if dtype != jnp.int32:\n raise ValueError(f"{name} must have int32 datatype, got {dtype}")\n if rank != expected_rank:\n raise ValueError(f"{name} must have a rank of {expected_rank}, got {rank}")\n b = tensor.shape[0]\n if b != qB:\n raise ValueError(f"{name} must have same batch as Q, got {b}")\n\n check_seqlen_offsets(q_seqlen, "q_seqlen")\n check_seqlen_offsets(kv_seqlen, "kv_seqlen")\n check_seqlen_offsets(q_offsets, "q_offsets")\n check_seqlen_offsets(kv_offsets, "kv_offsets")\n\n\ndef check_is_flash_attention(\n query, key, layout: int, cudnn_version, has_bias, is_training, is_packed=False,\n is_paged_attention=False, is_fp8=False):\n # Extract sequence length (T) and head dim (H) based on layout\n if layout == AttentionLayout.BNTH.value:\n _, _, T, H = query.shape\n _, _, S, _ = key.shape\n else:\n _, T, _, H = query.shape\n _, S, _, _ = key.shape\n\n # Flash attention conditions\n if is_fp8:\n # FP8 specific conditions\n if not ((is_training and H == 128 and T % 128 == 0 and S % 128 == 0) or\n (not is_training and H <= 256 and H % 16 == 0)):\n raise NotImplementedError(\n f"Unsupported sequence length Q {T}, KV {S} and head dim {H} for FP8."\n )\n else:\n # bf16/fp16 attention conditions\n # Check the head dim.\n is_on_hopper = is_cuda_compute_capability_equal("9.0")\n H_max = 256 if cudnn_version >= 90500 and is_on_hopper else 128\n if not (H <= H_max and H % 8 == 0):\n raise NotImplementedError(\n f"The head dim must be <= {H_max} and a multiple of 8, "\n f"but got {H}."\n )\n\n # Check patterns with bias, seqlen should be divisible by 2\n if (is_training and has_bias and (T % 2 != 0 or S % 2 != 0)):\n raise NotImplementedError(\n f"Unsupported sequence length Q {T}, KV {S}."\n )\n\n if is_packed and (cudnn_version < 90600 or not check_compute_capability("9.0")):\n raise NotImplementedError(\n "Packed layout requires cudnn version >= 9.6 and at least hopper arch.")\n if is_paged_attention and cudnn_version < 90500:\n raise NotImplementedError("Page attention requires cudnn version >= 9.5.")\n\ndef check_cudnn_version():\n # check if cuDNN is installed\n if cuda_versions is None:\n raise RuntimeError("cuDNN is not detected.")\n return cuda_versions.cudnn_get_version()\n\ndef check_compute_capability(capability):\n if not 'cuda' in xla_bridge.get_backend().platform_version:\n return False\n d, *_ = jax.local_devices(backend="gpu")\n target = tuple(int(x) for x in capability.split("."))\n current = tuple(int(x) for x in d.compute_capability.split("."))\n return current >= target\n\ndef is_cuda_compute_capability_equal(capability):\n if not 'cuda' in xla_bridge.get_backend().platform_version:\n return False\n d, *_ = jax.local_devices(backend="gpu")\n target = tuple(int(x) for x in capability.split("."))\n current = tuple(int(x) for x in d.compute_capability.split("."))\n return current == target\n\ndef _dot_product_attention_fwd(\n query, key, value, bias, q_seqlen, kv_seqlen, q_offsets, kv_offsets,\n page_table_k, page_table_v,\n scale, seed, dropout_rate, variadic_args, mask_type, layout,\n sliding_window_length, cudnn_version, return_residual):\n # check if flash attention is supported for this attention pattern\n check_is_flash_attention(\n query, key, layout, cudnn_version, bias is not None, False,\n get_max_seg_per_batch(q_offsets) > 1, check_is_paged_attention(page_table_k))\n outputs = _dot_product_attention_fwd_p_wrapper.bind(\n query, key, value, bias, q_seqlen, kv_seqlen, q_offsets, kv_offsets,\n page_table_k, page_table_v, scale=scale, seed=seed, dropout_rate=dropout_rate,\n variadic_args=variadic_args, mask_type=mask_type, layout=layout,\n sliding_window_length=sliding_window_length, is_training=False or return_residual)\n if return_residual:\n return tuple(outputs)\n else:\n return outputs[0]\n\ndef _dot_product_attention_fwd_rule(\n query, key, value, bias, q_seqlen, kv_seqlen, q_offsets, kv_offsets,\n page_table_k, page_table_v, scale, seed, dropout_rate, variadic_args,\n mask_type, layout, sliding_window_length, cudnn_version,\n return_residual):\n # check if flash attention is supported for this attention pattern\n check_is_flash_attention(\n query, key, layout, cudnn_version, bias is not None, True,\n get_max_seg_per_batch(q_offsets) > 1)\n outputs = _dot_product_attention_fwd_p_wrapper.bind(\n query, key, value, bias, q_seqlen, kv_seqlen, q_offsets, kv_offsets,\n page_table_k, page_table_v, scale=scale, seed=seed, dropout_rate=dropout_rate,\n variadic_args=variadic_args, mask_type=mask_type, layout=layout,\n sliding_window_length=sliding_window_length, is_training=True)\n res = (query, key, value, bias, q_seqlen, kv_seqlen, q_offsets,\n kv_offsets, page_table_k, page_table_v, outputs[1], outputs[0])\n if return_residual:\n return tuple(outputs), res\n else:\n return outputs[0], res\n\ndef _dot_product_attention_bwd_rule(\n scale, seed, dropout_rate, variadic_args, mask_type, layout,\n sliding_window_length, is_training, return_residual, res, grad_output):\n (query, key, value, bias, q_seqlen, kv_seqlen, q_offsets, kv_offsets,\n page_table_k, page_table_v, activation, fwd_output) = res\n if return_residual:\n grad_output = grad_output[0]\n grads = _dot_product_attention_bwd_p_wrapper.bind(\n query, key, value, bias, q_seqlen, kv_seqlen, q_offsets, kv_offsets,\n page_table_k, page_table_v, activation, fwd_output, grad_output,\n scale=scale, seed=seed, dropout_rate=dropout_rate, variadic_args=variadic_args,\n mask_type=mask_type, layout=layout,\n sliding_window_length=sliding_window_length\n )\n grads = (*grads,) + (None,) * (10 - len(grads))\n return grads\n\ndef _fix_seqlen_offsets(q_seqlen, kv_seqlen, q_offsets, kv_offsets, query, key):\n # fix seqlen and offsets to what cuDNN expects in sequence packing.\n # cuDNN expects seqlen to have shape [S] where S is the total number of segments\n # while the SDPA API accetps seqlen with shape [B, M] where B is the batch and M\n # is the maximum number of segments of one batch. B x M is larger than S and seqlen\n # is filled with -1 for padded regions. Therefore, we need to shift all non negative\n # values to left side to form a correct seqlen. Similar layout is required for\n # offsets tensors.\n # cuDNN expects offsets to have offset for each segment starting from first segment\n # while SDPA API accetps offsets to have offset for each segment starting from\n # current batch, therefore we need to calculate accumulative offset of each segment\n # starting from first segment.\n def _shift_to_left(x, fill_value):\n # shift any non-negative value to left\n # [[1, 3, -1, -1], [2, 3, 4, -1]]\n # -> [[1, 3, 2, 3], [4, -1, -1, -1]]\n x_shape = x.shape\n x = x.flatten()\n size = x.size\n indices = jnp.nonzero(x >= 0, size=size, fill_value=size)[0]\n y = jnp.take(x, indices, fill_value=fill_value)\n return jnp.reshape(y, x_shape)\n\n def _cu_offset(offsets, max_seq):\n # calculate accumulative offset by batch\n # [[1, 3, 5, 7], [4, 5, -1, -1]], max_seq = 8\n # -> [[1, 3, 5, 7], [12, 13, -1, -1]]\n batch = offsets.shape[0]\n offsets = jnp.where(\n offsets >= 0,\n offsets + (jnp.arange(batch, dtype=offsets.dtype) * max_seq)[..., jnp.newaxis],\n offsets,\n )\n return offsets\n\n if get_max_seg_per_batch(q_offsets) > 1:\n B, T, N, H = query.shape\n _, S, _, _ = key.shape\n\n q_seqlen = _shift_to_left(q_seqlen, -1)\n kv_seqlen = _shift_to_left(kv_seqlen, -1)\n\n q_offsets = _cu_offset(q_offsets, T)\n kv_offsets = _cu_offset(kv_offsets, S)\n q_offsets = _shift_to_left(q_offsets, -1)\n kv_offsets = _shift_to_left(kv_offsets, -1)\n\n # mark any invalid entries as maximum offset\n q_offsets = jnp.where(q_offsets < 0, B * T, q_offsets)\n kv_offsets = jnp.where(kv_offsets < 0, B * S, kv_offsets)\n\n # multiply by stride_per_token to get correct offsets\n # do it here because real stride changes after sharding\n q_offsets = q_offsets * N * H\n kv_offsets = kv_offsets * N * H\n\n return q_seqlen, kv_seqlen, q_offsets, kv_offsets\n\ndef _dot_product_attention_fwd_impl(\n query, key, value, bias, q_seqlen, kv_seqlen, q_offsets, kv_offsets,\n page_table_k, page_table_v, scale, seed, dropout_rate, variadic_args,\n mask_type, layout, sliding_window_length, is_training):\n # args: {Q, K, V, mask*, bias*}\n q_seqlen, kv_seqlen, q_offsets, kv_offsets = \\n _fix_seqlen_offsets(q_seqlen, kv_seqlen, q_offsets, kv_offsets, query, key)\n outputs = _dot_product_attention_fwd_p.bind(\n query, key, value, bias, q_seqlen, kv_seqlen, q_offsets, kv_offsets,\n page_table_k, page_table_v, scale=scale, seed=seed, dropout_rate=dropout_rate,\n variadic_args=variadic_args, mask_type=mask_type, layout=layout,\n sliding_window_length=sliding_window_length, is_training=is_training)\n return outputs\n\ndef _dot_product_attention_bwd_impl(\n query, key, value, bias, q_seqlen, kv_seqlen, q_offsets, kv_offsets,\n page_table_k, page_table_v, activation, fwd_output, grad_output, scale,\n seed, dropout_rate, variadic_args, mask_type, layout, sliding_window_length):\n q_seqlen, kv_seqlen, q_offsets, kv_offsets = \\n _fix_seqlen_offsets(q_seqlen, kv_seqlen, q_offsets, kv_offsets, query, key)\n grads = _dot_product_attention_bwd_p.bind(\n query, key, value, bias, q_seqlen, kv_seqlen, q_offsets, kv_offsets,\n page_table_k, page_table_v, activation, fwd_output, grad_output,\n scale=scale, seed=seed,\n dropout_rate=dropout_rate, variadic_args=variadic_args,\n mask_type=mask_type, layout=layout,\n sliding_window_length=sliding_window_length)\n return grads\n\ndef _dot_product_attention_fwd_abstract(\n query, key, value, bias, q_seqlen, kv_seqlen, q_offsets, kv_offsets,\n page_table_k, page_table_v, *, scale, seed, dropout_rate, variadic_args,\n mask_type, layout, sliding_window_length, is_training):\n query_dtype = dtypes.canonicalize_dtype(query.dtype)\n if layout == AttentionLayout.BNTH.value:\n B, N, T, _ = query.shape\n _, _, S, _ = key.shape\n else:\n B, T, N, _ = query.shape\n _, S, _, _ = key.shape\n output_shape = query.shape\n\n max_seg_per_batch = get_max_seg_per_batch(q_offsets)\n softmax_stat_shape = (B * max_seg_per_batch, N, T)\n\n if is_training:\n return (\n core.ShapedArray(output_shape, query_dtype), # output\n core.ShapedArray(softmax_stat_shape, jnp.float32), # softmax_stat\n )\n else:\n return (\n core.ShapedArray(output_shape, query_dtype), # output\n )\n\ndef _dot_product_attention_bwd_abstract(\n query, key, value, bias, q_seqlen, kv_seqlen, q_offsets, kv_offsets,\n page_table_k, page_table_v, activation, fwd_output, grad_output, *,\n scale, seed, dropout_rate, variadic_args, mask_type, layout, sliding_window_length):\n query_dtype = dtypes.canonicalize_dtype(query.dtype)\n key_dtype = dtypes.canonicalize_dtype(key.dtype)\n value_dtype = dtypes.canonicalize_dtype(value.dtype)\n\n _, has_dbias = variadic_args\n if has_dbias:\n # cuDNN supports bias for this case\n bias_dtype = dtypes.canonicalize_dtype(bias.dtype)\n return (\n core.ShapedArray(\n query.shape, query_dtype\n ), # grad query\n core.ShapedArray(\n key.shape, key_dtype\n ), # grad key\n core.ShapedArray(\n value.shape, value_dtype\n ), # grad value\n core.ShapedArray(\n bias.shape, bias_dtype\n ), # grad bias\n )\n else:\n return (\n core.ShapedArray(\n query.shape, query_dtype\n ), # grad query\n core.ShapedArray(\n key.shape, key_dtype\n ), # grad key\n core.ShapedArray(\n value.shape, value_dtype\n ), # grad value\n )\n\ndef _dot_product_attention_fwd_cuda_lowering(\n ctx, query, key, value, bias, q_seqlen, kv_seqlen, q_offsets,\n kv_offsets, page_table_k, page_table_v, scale, seed, dropout_rate,\n variadic_args, mask_type, layout, sliding_window_length, is_training):\n query_type = ir.RankedTensorType(query.type)\n query_shape = query_type.shape\n key_type = ir.RankedTensorType(key.type)\n key_shape = key_type.shape\n\n if layout == AttentionLayout.BNTH.value:\n B, N, T, H = query_shape\n _, _, S, _ = key_shape\n output_layout = (3, 2, 1, 0)\n output_transpose_perm = mlir.dense_int_array((0, 1, 2, 3))\n else:\n B, T, N, H = query_shape\n _, S, _, _ = key_shape\n output_layout = (3, 1, 2, 0)\n output_transpose_perm = mlir.dense_int_array((0, 2, 1, 3))\n\n max_seg_per_batch = get_max_seg_per_batch(ir.RankedTensorType(q_offsets.type))\n is_paged_attention = check_is_paged_attention(ir.RankedTensorType(page_table_k.type))\n\n output_shape = (B, N, T, H)\n softmax_stat_shape = (B * max_seg_per_batch, N, T)\n workspace_shape = (0,)\n workspace_type = ir.IntegerType.get_unsigned(8)\n\n has_bias, _ = variadic_args\n backend_config = create_dot_product_attention_backend_config(\n B, N, T, S, query_type.element_type, scale, seed, dropout_rate,\n mask_type, layout, sliding_window_length, max_seg_per_batch,\n is_paged_attention, is_bwd=False)\n # {Q, K, V, bias*, q_seqlen*, kv_seqlen*, q_offsets*, kv_offsets*}}\n # {output, activation*, workspace}\n has_dropout = dropout_rate > 0\n operands = [query, key, value]\n if has_bias:\n operands.append(bias)\n if has_padding(mask_type) or max_seg_per_batch > 1 or is_paged_attention:\n operands.append(q_seqlen)\n operands.append(kv_seqlen)\n if max_seg_per_batch > 1:\n operands.append(q_offsets)\n operands.append(kv_offsets)\n if is_paged_attention:\n operands.append(page_table_k)\n operands.append(page_table_v)\n\n custom_call_name = get_custom_call_name(has_bias, has_dropout, False)\n\n if is_training:\n result_types = [\n ir.RankedTensorType.get(output_shape, query_type.element_type),\n ir.RankedTensorType.get(softmax_stat_shape, ir.F32Type.get()),\n ir.RankedTensorType.get(workspace_shape, workspace_type),\n ]\n result_layouts = [output_layout] + default_layouts(softmax_stat_shape, workspace_shape)\n else:\n result_types = [\n ir.RankedTensorType.get(output_shape, query_type.element_type),\n ir.RankedTensorType.get(workspace_shape, workspace_type)\n ]\n result_layouts = [output_layout] + default_layouts(workspace_shape)\n # create custom call here\n out = mlir.custom_call(\n custom_call_name,\n result_types=result_types,\n operands=operands,\n backend_config=backend_config,\n operand_layouts=default_layouts(\n *[ir.RankedTensorType(operand.type).shape for operand in operands]),\n result_layouts=result_layouts,\n )\n # drop workspace memory\n # output should be (B, T, N, H) instead of (B, N, T, H)\n if is_training:\n return [hlo.transpose(out.results[0], output_transpose_perm), out.results[1]]\n else:\n return [hlo.transpose(out.results[0], output_transpose_perm)]\n\ndef _dot_product_attention_bwd_cuda_lowering(\n ctx, query, key, value, bias, q_seqlen, kv_seqlen, q_offsets, kv_offsets,\n page_table_k, page_table_v, activation, fwd_output, grad_output,\n scale, seed, dropout_rate, variadic_args, mask_type, layout, sliding_window_length):\n query_type = ir.RankedTensorType(query.type)\n query_shape = query_type.shape\n key_type = ir.RankedTensorType(key.type)\n key_shape = key_type.shape\n value_type = ir.RankedTensorType(value.type)\n\n if layout == AttentionLayout.BNTH.value:\n B, q_N, T, H = query_shape\n _, k_N, S, _ = key_shape\n grad_layout = (3, 2, 1, 0)\n grad_transpose_perm = mlir.dense_int_array((0, 1, 2, 3))\n else:\n B, T, q_N, H = query_shape\n _, S, k_N, _ = key_shape\n grad_layout = (3, 1, 2, 0)\n grad_transpose_perm = mlir.dense_int_array((0, 2, 1, 3))\n\n workspace_shape = (0,)\n workspace_type = ir.IntegerType.get_unsigned(8)\n\n grad_query_shape = (B, q_N, T, H)\n grad_key_shape = (B, k_N, S, H)\n grad_value_shape = (B, k_N, S, H)\n\n has_bias, has_dbias = variadic_args\n max_seg_per_batch = get_max_seg_per_batch(ir.RankedTensorType(q_offsets.type))\n backend_config = create_dot_product_attention_backend_config(\n B, q_N, T, S, query_type.element_type, scale, seed, dropout_rate,\n mask_type, layout, sliding_window_length, max_seg_per_batch,\n False, is_bwd=True)\n # {Q, K, V, activation, dO, bias*, O, q_seqlen*, kv_seqlen*,\n # q_offsets*, kv_offsets*}\n # {dQ, dK, dV, dbias*, workspace}\n has_dropout = dropout_rate > 0\n # create operands\n operands = [query, key, value, activation, grad_output]\n if has_bias:\n # flash attention requires bias in the bwd for remat\n operands.append(bias)\n operands.append(fwd_output)\n if has_padding(mask_type) or max_seg_per_batch > 1:\n operands.append(q_seqlen)\n operands.append(kv_seqlen)\n if max_seg_per_batch > 1:\n operands.append(q_offsets)\n operands.append(kv_offsets)\n # get custom call name\n custom_call_name = get_custom_call_name(has_bias, has_dropout, True)\n\n # create output types and layouts\n # grad_query, grad_key, grad_value\n result_types = [\n ir.RankedTensorType.get(grad_query_shape, query_type.element_type),\n ir.RankedTensorType.get(grad_key_shape, key_type.element_type),\n ir.RankedTensorType.get(grad_value_shape, value_type.element_type),\n ]\n result_layouts = [grad_layout, grad_layout, grad_layout]\n bias_type = ir.RankedTensorType(bias.type)\n bias_shape = bias_type.shape\n if has_dbias:\n # cuDNN supports bias for this case\n result_types.append(\n ir.RankedTensorType.get(bias_shape, bias_type.element_type))\n result_layouts = result_layouts + default_layouts(bias_shape)\n # workspace\n result_types.append(ir.RankedTensorType.get(workspace_shape, workspace_type))\n result_layouts = result_layouts + default_layouts(workspace_shape)\n out = mlir.custom_call(\n custom_call_name,\n result_types=result_types,\n operands=operands,\n backend_config=backend_config,\n operand_layouts=default_layouts(\n *[ir.RankedTensorType(operand.type).shape for operand in operands]),\n result_layouts=result_layouts,\n )\n dqkv = (hlo.transpose(out.results[0], grad_transpose_perm),\n hlo.transpose(out.results[1], grad_transpose_perm),\n hlo.transpose(out.results[2], grad_transpose_perm))\n # Only keep dQ, dK, dV and dBias here\n if has_dbias:\n return dqkv + (out.results[3],)\n else:\n return dqkv\n\n# batcher\ndef _check_valid_batch_dims(bdims):\n for dim in bdims:\n if dim not in [0, None]:\n raise NotImplementedError(\n f"Currently only support batch_dim in [0, None], but got {dim=}")\n\ndef _dot_product_attention_fwd_batcher(\n batched_args, batch_dims, *, scale, seed, dropout_rate, variadic_args,\n mask_type, layout, sliding_window_length, is_training):\n _check_valid_batch_dims(batch_dims)\n query, key, value, bias, q_seqlen, kv_seqlen, \\n q_offsets, kv_offsets, page_table_k, page_table_v = batched_args\n query_bdim = batch_dims[0]\n if is_training:\n out_bdims = query_bdim, query_bdim\n else:\n out_bdims = (query_bdim,)\n\n if layout == AttentionLayout.BNTH.value:\n *Bs, N, T, _ = query.shape\n *_, _, S, _ = key.shape\n else:\n *Bs, T, N, _ = query.shape\n *_, S, _, _ = key.shape\n B = math.prod(Bs)\n has_bias, _ = variadic_args\n original_shape = query.shape\n # reshape to 4D shape\n query = jnp.reshape(query, (B,) + query.shape[-3:])\n key = jnp.reshape(key, (B,) + key.shape[-3:])\n value = jnp.reshape(value, (B,) + key.shape[-3:])\n if has_bias and batch_dims[3] is not None:\n bias = jnp.reshape(bias, (B, N, T, S))\n if has_padding(mask_type):\n q_seqlen = jnp.reshape(q_seqlen, (B, ))\n kv_seqlen = jnp.reshape(kv_seqlen, (B, ))\n\n outputs = _dot_product_attention_fwd_p_wrapper.bind(\n query, key, value, bias, q_seqlen, kv_seqlen, q_offsets, kv_offsets,\n page_table_k, page_table_v, scale=scale, seed=seed, dropout_rate=dropout_rate,\n variadic_args=variadic_args, mask_type=mask_type, layout=layout,\n sliding_window_length=sliding_window_length, is_training=is_training)\n\n # reshape to original shape\n output = outputs[0]\n output = jnp.reshape(output, original_shape)\n if is_training:\n activation = outputs[1]\n activation = jnp.reshape(activation, (*Bs, N, T))\n return (output, activation), out_bdims\n else:\n return (output,), out_bdims\n\ndef _dot_product_attention_bwd_batcher(\n batched_args, batch_dims, *, scale, seed, dropout_rate, variadic_args,\n mask_type, layout, sliding_window_length):\n _check_valid_batch_dims(batch_dims)\n query, key, value, bias, q_seqlen, kv_seqlen, q_offsets, kv_offsets, \\n page_table_k, page_table_v, activation, fwd_output, grad_output = batched_args\n query_bdim = batch_dims[0]\n out_bdims = query_bdim, query_bdim, query_bdim\n\n if layout == AttentionLayout.BNTH.value:\n *Bs, N, T, _ = query.shape\n *_, _, S, _ = key.shape\n else:\n *Bs, T, N, _ = query.shape\n *_, S, _, _ = key.shape\n B = math.prod(Bs)\n has_bias, has_dbias = variadic_args\n # Reset the has_dbias if the combined batch size is not 1, because cuDNN only\n # supports dbias with a single batch. In this case, an all-zero dbias will be\n # appended instead.\n if B > 1:\n variadic_args = (has_bias, False)\n original_query_shape = query.shape\n original_key_shape = key.shape\n original_value_shape = value.shape\n original_bias_shape = bias.shape if has_bias else None\n # reshape to 4D shape\n query = jnp.reshape(query, (B,) + query.shape[-3:])\n key = jnp.reshape(key, (B,) + key.shape[-3:])\n value = jnp.reshape(value, (B,) + key.shape[-3:])\n if has_bias and batch_dims[3] is not None:\n bias = jnp.reshape(bias, (B, N, T, S))\n if has_padding(mask_type):\n q_seqlen = jnp.reshape(q_seqlen, (B, ))\n kv_seqlen = jnp.reshape(kv_seqlen, (B, ))\n\n activation = jnp.reshape(activation, (B, N, T))\n fwd_output = jnp.reshape(fwd_output, (B,) + query.shape[-3:])\n grad_output = jnp.reshape(grad_output, (B,) + query.shape[-3:])\n\n grads = _dot_product_attention_bwd_p_wrapper.bind(\n query, key, value, bias, q_seqlen, kv_seqlen, q_offsets, kv_offsets,\n page_table_k, page_table_v, activation, fwd_output, grad_output,\n scale=scale, seed=seed, dropout_rate=dropout_rate, variadic_args=variadic_args,\n mask_type=mask_type, layout=layout,\n sliding_window_length=sliding_window_length,\n )\n\n # reshape to original shape\n grads[0] = jnp.reshape(grads[0], original_query_shape)\n grads[1] = jnp.reshape(grads[1], original_key_shape)\n grads[2] = jnp.reshape(grads[2], original_value_shape)\n if has_dbias:\n assert has_bias\n if variadic_args[1]:\n grads[3] = jnp.reshape(grads[3], original_bias_shape)\n else:\n grads.append(jnp.zeros(original_bias_shape, bias.dtype))\n out_bdims += (batch_dims[3],)\n return grads, out_bdims\n\n# custom partitioning\ndef _get_padded_spec(arg_info):\n spec = None if arg_info.sharding is None else arg_info.sharding.spec\n ndim = arg_info.ndim\n if spec is None:\n return (None,) * ndim\n assert len(spec) <= ndim\n return spec + (None,) * (ndim - len(spec))\n\ndef _check_qkv_bias_mask_spec(\n query_spec, key_spec, value_spec, bias_spec, layout):\n # check qkv spec\n if not query_spec == key_spec == value_spec:\n raise ValueError("Query, key and value should have same sharding.")\n if layout == AttentionLayout.BNTH.value:\n *batch_spec, num_head_spec, q_seq_spec, head_spec = query_spec\n else:\n *batch_spec, q_seq_spec, num_head_spec, head_spec = query_spec\n if q_seq_spec is not None:\n raise ValueError("Sharding on sequence dim is not allowed.")\n if head_spec is not None:\n raise ValueError("Sharding on head dim is not allowed.")\n # check bias spec\n if bias_spec:\n *bias_batch_spec, bias_num_head_spec, bias_q_seq_spec, bias_kv_seq_spec = bias_spec\n if any(bias_batch_spec) and bias_batch_spec != batch_spec or \\n bias_num_head_spec is not None and bias_num_head_spec != num_head_spec:\n raise ValueError(\n "Query and bias should have same sharding on batch and num_head dim.")\n if bias_q_seq_spec is not None or bias_kv_seq_spec is not None:\n raise ValueError("Sharding on bias sequence dim is not allowed.")\n\n\n# fwd custom partition\ndef _infer_fwd_output_sharding(mesh, arg_shapes, variadic_args,is_training, layout):\n # only sharding on batch and num_head dim is allowed\n # (*batch, q_seq, num_head, head)\n query_spec = _get_padded_spec(arg_shapes[0])\n # (*batch, kv_seq, num_head, head)\n key_spec = _get_padded_spec(arg_shapes[1])\n value_spec = _get_padded_spec(arg_shapes[2])\n has_bias, _ = variadic_args\n bias_spec = _get_padded_spec(arg_shapes[3]) if has_bias else None\n\n _check_qkv_bias_mask_spec(\n query_spec, key_spec, value_spec, bias_spec, layout)\n # keep out sharding same as query sharding since they have same shape\n out_sharding = NamedSharding(mesh, PartitionSpec(*query_spec))\n if is_training:\n # activation sharding\n *batch_spec, q_seq_spec, num_head_spec, _ = query_spec\n activation_sharding = NamedSharding(\n mesh, PartitionSpec(*batch_spec, num_head_spec, q_seq_spec, None))\n return [out_sharding, activation_sharding]\n return [out_sharding]\n\n_dot_product_attention_fwd_lower = custom_partitioning(\n _dot_product_attention_fwd_impl, static_argnums=(10, 11, 12, 13, 14, 15, 16, 17))\n\ndef _dot_product_attention_fwd_infer_sharding_from_operands(\n scale, seed, dropout_rate, variadic_args, mask_type, layout, sliding_window_length,\n is_training, mesh, arg_shapes, result_shape):\n return _infer_fwd_output_sharding(mesh, arg_shapes, variadic_args, is_training, layout)\n\ndef _dot_product_attention_fwd_partition(\n scale, seed, dropout_rate, variadic_args, mask_type, layout, sliding_window_length,\n is_training, mesh, arg_shapes, result_shape):\n # args sharding\n arg_shardings = tuple(arg_i.sharding for arg_i in arg_shapes)\n out_shardings = _infer_fwd_output_sharding(\n mesh, arg_shapes, variadic_args, is_training, layout)\n impl = functools.partial(\n _dot_product_attention_fwd_impl,\n scale=scale,\n seed=seed,\n dropout_rate=dropout_rate,\n variadic_args=variadic_args,\n mask_type=mask_type,\n layout=layout,\n sliding_window_length=sliding_window_length,\n is_training=is_training,\n )\n return mesh, impl, out_shardings, arg_shardings\n\n# bwd custom partition\ndef _infer_bwd_output_sharding(mesh, arg_shapes, layout, variadic_args):\n # (*batch, q_seq, num_head, head)\n query_spec = _get_padded_spec(arg_shapes[0])\n # (*batch, kv_seq, num_head, head)\n key_spec = _get_padded_spec(arg_shapes[1])\n value_spec = _get_padded_spec(arg_shapes[2])\n has_bias, has_dbias = variadic_args\n bias_spec = _get_padded_spec(arg_shapes[3]) if has_bias else None\n _check_qkv_bias_mask_spec(\n query_spec, key_spec, value_spec, bias_spec, layout)\n # keep grad query sharding same as query sharding\n grad_query_sharding = NamedSharding(mesh, PartitionSpec(*query_spec))\n grad_key_sharding = NamedSharding(mesh, PartitionSpec(*key_spec))\n grad_value_sharding = NamedSharding(mesh, PartitionSpec(*key_spec))\n out_shardings = [grad_query_sharding, grad_key_sharding, grad_value_sharding]\n if has_dbias:\n grad_bias_sharding = NamedSharding(mesh, PartitionSpec(*bias_spec))\n out_shardings = out_shardings + [grad_bias_sharding]\n return out_shardings\n\n_dot_product_attention_bwd_lower = custom_partitioning(\n _dot_product_attention_bwd_impl, static_argnums=(13, 14, 15, 16, 17, 18, 19)\n)\n\ndef _dot_product_attention_bwd_infer_sharding_from_operands(\n scale, seed, dropout_rate, variadic_args, mask_type, layout,\n sliding_window_length, mesh, arg_shapes, result_shape):\n return _infer_bwd_output_sharding(mesh, arg_shapes, layout, variadic_args)\n\ndef _dot_product_attention_bwd_partition(\n scale, seed, dropout_rate, variadic_args, mask_type, layout,\n sliding_window_length, mesh, arg_shapes, result_shape):\n out_shardings = _infer_bwd_output_sharding(mesh, arg_shapes, layout, variadic_args)\n # args sharding\n arg_shardings = tuple(arg_i.sharding for arg_i in arg_shapes)\n def sharded_impl(*args):\n impl = functools.partial(\n _dot_product_attention_bwd_impl,\n scale=scale,\n seed=seed,\n dropout_rate=dropout_rate,\n variadic_args=variadic_args,\n mask_type=mask_type,\n layout=layout,\n sliding_window_length=sliding_window_length,\n )\n grads = impl(*args)\n _, has_dbias = variadic_args\n if has_dbias:\n query_spec = arg_shardings[0].spec\n batch_spec = query_spec[0]\n local_dbias = grads[3]\n global_dbias = jax.lax.psum(local_dbias, batch_spec)\n grads = grads[:3] + [global_dbias]\n return grads\n return mesh, sharded_impl, out_shardings, arg_shardings\n\n# Create dot_product_attention_fwd_p for forward operation.\n_dot_product_attention_fwd_p = core.Primitive("dot_product_attention_fwd")\n_dot_product_attention_fwd_p.multiple_results = True\n_dot_product_attention_fwd_p.def_impl(\n functools.partial(dispatch.apply_primitive, _dot_product_attention_fwd_p)\n)\n_dot_product_attention_fwd_p.def_abstract_eval(\n _dot_product_attention_fwd_abstract\n)\n\nmlir.register_lowering(\n _dot_product_attention_fwd_p,\n _dot_product_attention_fwd_cuda_lowering,\n platform="cuda",\n)\n\n_dot_product_attention_fwd_p_wrapper = core.Primitive(\n "dot_product_attention_fwd_wrapper"\n)\n_dot_product_attention_fwd_p_wrapper.multiple_results = True\n_dot_product_attention_fwd_p_wrapper.def_impl(_dot_product_attention_fwd_impl)\n_dot_product_attention_fwd_p_wrapper.def_abstract_eval(\n _dot_product_attention_fwd_abstract\n)\n\n# Create dot_product_attention_bwd_p for backward operation.\n_dot_product_attention_bwd_p = core.Primitive("dot_product_attention_bwd")\n_dot_product_attention_bwd_p.multiple_results = True\n_dot_product_attention_bwd_p.def_impl(\n functools.partial(dispatch.apply_primitive, _dot_product_attention_bwd_p)\n)\n_dot_product_attention_bwd_p.def_abstract_eval(\n _dot_product_attention_bwd_abstract\n)\n\nmlir.register_lowering(\n _dot_product_attention_bwd_p,\n _dot_product_attention_bwd_cuda_lowering,\n platform="cuda",\n)\n\n_dot_product_attention_bwd_p_wrapper = core.Primitive(\n "dot_product_attention_bwd_wrapper"\n)\n_dot_product_attention_bwd_p_wrapper.multiple_results = True\n_dot_product_attention_bwd_p_wrapper.def_impl(_dot_product_attention_bwd_impl)\n_dot_product_attention_bwd_p_wrapper.def_abstract_eval(\n _dot_product_attention_bwd_abstract\n)\n\nbatching.primitive_batchers[\n _dot_product_attention_fwd_p_wrapper\n] = _dot_product_attention_fwd_batcher\nbatching.primitive_batchers[\n _dot_product_attention_bwd_p_wrapper\n] = _dot_product_attention_bwd_batcher\n\ndef not_implemented_sharding_rule(*args, **kwargs):\n return NotImplementedError("Sharding rule not implemented.")\n\n_dot_product_attention_fwd_lower.def_partition(\n infer_sharding_from_operands=_dot_product_attention_fwd_infer_sharding_from_operands,\n partition=_dot_product_attention_fwd_partition,\n sharding_rule=not_implemented_sharding_rule)\n\nmlir.register_lowering(_dot_product_attention_fwd_p_wrapper,\n mlir.lower_fun(_dot_product_attention_fwd_lower, multiple_results=True))\n\n_dot_product_attention_bwd_lower.def_partition(\n infer_sharding_from_operands=_dot_product_attention_bwd_infer_sharding_from_operands,\n partition=_dot_product_attention_bwd_partition,\n sharding_rule=not_implemented_sharding_rule)\n\nmlir.register_lowering(_dot_product_attention_bwd_p_wrapper,\n mlir.lower_fun(_dot_product_attention_bwd_lower, multiple_results=True))\n\ndispatch.prim_requires_devices_during_lowering.add(\n _dot_product_attention_fwd_p\n)\ndispatch.prim_requires_devices_during_lowering.add(\n _dot_product_attention_fwd_p_wrapper\n)\ndispatch.prim_requires_devices_during_lowering.add(\n _dot_product_attention_bwd_p\n)\ndispatch.prim_requires_devices_during_lowering.add(\n _dot_product_attention_bwd_p_wrapper\n)\n\n@functools.partial(jax.custom_vjp, nondiff_argnums=(10, 11, 12, 13, 14, 15, 16, 17, 18))\ndef _dot_product_attention(query: Array,\n key: Array,\n value: Array,\n bias: Array,\n q_seqlen: Array,\n kv_seqlen: Array,\n q_offsets: Array,\n kv_offsets: Array,\n page_table_k: Array,\n page_table_v: Array,\n scale: float,\n seed: int,\n dropout_rate: float,\n variadic_args: tuple[bool, ...],\n mask_type: bool,\n layout: int,\n sliding_window_length: int | None,\n cudnn_version: int,\n return_residual: bool):\n output = _dot_product_attention_fwd(\n query, key, value, bias, q_seqlen, kv_seqlen, q_offsets, kv_offsets,\n page_table_k, page_table_v, scale=scale, seed=seed, dropout_rate=dropout_rate,\n variadic_args=variadic_args, mask_type=mask_type, layout=layout,\n sliding_window_length=sliding_window_length,\n cudnn_version=cudnn_version, return_residual=return_residual)\n return output\n\n_dot_product_attention.defvjp(\n _dot_product_attention_fwd_rule, _dot_product_attention_bwd_rule\n)\n\nfp8_params_keys = [\n 'amax_dQ', 'amax_dK', 'amax_dV', 'amax_dP', # place holder for bwd output\n 'descale_q', 'descale_k', 'descale_v', 'descale_s',\n 'scale_s', 'scale_o', 'descale_o', 'descale_dO',\n 'descale_dP', 'scale_dQ', 'scale_dK', 'scale_dV',\n 'scale_dP'\n]\n\nfp8_params_keys_fwd = [\n 'descale_q', 'descale_k', 'descale_v', 'descale_s', 'scale_s', 'scale_o'\n]\nfp8_params_keys_bwd = [\n 'descale_q', 'descale_k', 'descale_v', 'descale_o', 'descale_dO', 'descale_s',\n 'descale_dP', 'scale_s', 'scale_dQ', 'scale_dK', 'scale_dV', 'scale_dP',\n]\nparams_from_keys = lambda params, keys: [params[key] for key in keys]\n\ndef check_fp8_params(params):\n # Check if all required keys are present\n missing_keys = set(fp8_params_keys) - set(params)\n if missing_keys:\n raise ValueError(f"The following keys are missing from fp8_params: {', '.join(missing_keys)}")\n\ncheck_is_flash_attention_fp8 = functools.partial(\n check_is_flash_attention,\n has_bias=False,\n is_fp8=True\n)\n\ndef _dot_product_attention_fp8_fwd(\n query, key, value,\n fp8_params_fwd,\n scale, use_causal_mask, layout, cudnn_version):\n check_is_flash_attention_fp8(\n query, key, layout, cudnn_version, is_training=False)\n descale_q, descale_k, descale_v, descale_s, scale_s, scale_o = fp8_params_fwd\n outputs = _dot_product_attention_fp8_fwd_p_wrapper.bind(\n query, key, value,\n descale_q, descale_k, descale_v, descale_s,\n scale_s, scale_o,\n scale=scale, use_causal_mask=use_causal_mask, layout=layout, is_training=False)\n return outputs\n\ndef _dot_product_attention_fp8_fwd_rule(\n query, key, value,\n fp8_params,\n scale, use_causal_mask, layout, cudnn_version):\n check_is_flash_attention_fp8(\n query, key, layout, cudnn_version, is_training=True)\n\n outputs = _dot_product_attention_fp8_fwd_p_wrapper.bind(\n query, key, value, *params_from_keys(fp8_params, fp8_params_keys_fwd),\n scale=scale, use_causal_mask=use_causal_mask, layout=layout, is_training=True)\n res = (query, key, value, outputs[3], outputs[0], params_from_keys(fp8_params, fp8_params_keys_bwd))\n return (outputs[0], outputs[1], outputs[2]), res\n\ndef _dot_product_attention_fp8_bwd_rule(\n scale, use_causal_mask, layout, cudnn_version, res, g):\n (query, key, value, activation, fwd_output, aux_params) = res\n grad_output = g[0]\n grads = _dot_product_attention_fp8_bwd_p_wrapper.bind(\n query,\n key,\n value,\n fwd_output,\n grad_output,\n activation,\n *aux_params,\n scale=scale,\n use_causal_mask=use_causal_mask,\n layout=layout,\n )\n\n fp8_params_grads = dict.fromkeys(fp8_params_keys)\n keys_to_grad_indices = ['amax_dQ', 'amax_dK', 'amax_dV', 'amax_dP']\n # grads structure: (dQ, dK, dV, amax_dq, amax_dk, amax_dv, amax_dp)\n for i, key in enumerate(keys_to_grad_indices, start=3):\n fp8_params_grads[key] = grads[i]\n\n return (grads[0], grads[1], grads[2], fp8_params_grads)\n\ndef _dot_product_attention_fp8_fwd_impl(\n query, key, value,\n descale_q, descale_k, descale_v, descale_s, scale_s, scale_o,\n scale, use_causal_mask, layout, is_training):\n outputs = _dot_product_attention_fp8_fwd_p.bind(\n query,\n key,\n value,\n descale_q,\n descale_k,\n descale_v,\n descale_s,\n scale_s,\n scale_o,\n scale=scale,\n use_causal_mask=use_causal_mask,\n layout=layout,\n is_training=is_training,\n )\n return outputs\n\ndef _dot_product_attention_fp8_bwd_impl(\n query, key, value, fwd_output, grad_output, activation,\n descale_q, descale_k, descale_v, descale_o, descale_dO, descale_s,\n descale_dP, scale_s, scale_dQ, scale_dK, scale_dV, scale_dP,\n scale, use_causal_mask, layout):\n grads = _dot_product_attention_fp8_bwd_p.bind(\n query, key, value, fwd_output, grad_output, activation,\n descale_q, descale_k, descale_v, descale_o, descale_dO, descale_s,\n descale_dP, scale_s, scale_dQ, scale_dK, scale_dV, scale_dP,\n scale=scale, use_causal_mask=use_causal_mask, layout=layout)\n return grads\n\n\ndef _dot_product_attention_fp8_fwd_abstract(\n query, key, value,\n descale_q, descale_k, descale_v, descale_s, scale_s, scale_o,\n scale, use_causal_mask, layout, is_training):\n query_dtype = dtypes.canonicalize_dtype(query.dtype)\n if layout == AttentionLayout.BNTH.value:\n B, N, T, _ = query.shape\n _, _, S, _ = key.shape\n else:\n B, T, N, _ = query.shape\n _, S, _, _ = key.shape\n output_shape = query.shape\n softmax_stat_shape = (B, N, T)\n\n # output, amax_s, amax_o[, softmax_stat]\n if is_training:\n return (\n core.ShapedArray(output_shape, query_dtype),\n core.ShapedArray((1,1,1,1), jnp.float32),\n core.ShapedArray((1,1,1,1), jnp.float32),\n core.ShapedArray(softmax_stat_shape, jnp.float32),\n )\n else:\n return (\n core.ShapedArray(output_shape, query_dtype),\n core.ShapedArray((1,1,1,1), jnp.float32),\n core.ShapedArray((1,1,1,1), jnp.float32),\n )\n\ndef _dot_product_attention_fp8_bwd_abstract(\n query, key, value, fwd_output, grad_output, activation,\n descale_q, descale_k, descale_v, descale_o, descale_dO, descale_s,\n descale_dP, scale_s, scale_dQ, scale_dK, scale_dV, scale_dP,\n scale, use_causal_mask, layout):\n query_dtype = dtypes.canonicalize_dtype(query.dtype)\n key_dtype = dtypes.canonicalize_dtype(key.dtype)\n value_dtype = dtypes.canonicalize_dtype(value.dtype)\n\n amax_shape = (1,1,1,1)\n\n return (\n core.ShapedArray(query.shape, query_dtype),\n core.ShapedArray(key.shape, key_dtype),\n core.ShapedArray(value.shape, value_dtype),\n core.ShapedArray(amax_shape, jnp.float32),\n core.ShapedArray(amax_shape, jnp.float32),\n core.ShapedArray(amax_shape, jnp.float32),\n core.ShapedArray(amax_shape, jnp.float32),\n )\n\ndef _dot_product_attention_fp8_fwd_cuda_lowering(\n ctx, query, key, value,\n descale_q, descale_k, descale_v, descale_s, scale_s, scale_o,\n scale, use_causal_mask, layout, is_training):\n query_type = ir.RankedTensorType(query.type)\n query_shape = query_type.shape\n key_type = ir.RankedTensorType(key.type)\n key_shape = key_type.shape\n\n if layout == AttentionLayout.BNTH.value:\n B, N, T, H = query_shape\n _, _, S, _ = key_shape\n output_layout = (3, 2, 1, 0)\n output_transpose_perm = mlir.dense_int_array((0, 1, 2, 3))\n else:\n B, T, N, H = query_shape\n _, S, _, _ = key_shape\n output_layout = (3, 1, 2, 0)\n output_transpose_perm = mlir.dense_int_array((0, 2, 1, 3))\n\n output_shape = (B, N, T, H)\n softmax_stat_shape = (B, N, T)\n workspace_shape = (0,)\n amax_shape = (1,1,1,1)\n workspace_type = ir.IntegerType.get_unsigned(8)\n mask_type = MaskType.CAUSAL if use_causal_mask else MaskType.NO_MASK\n backend_config = create_dot_product_attention_fp8_backend_config(\n B, N, T, S, ir.BF16Type.get(), # query_type.element_type,\n scale, mask_type, layout, is_bwd=False,\n )\n\n operands = [query, key, value, descale_q, descale_k, descale_v, descale_s, scale_s, scale_o]\n custom_call_name = get_fp8_custom_call_name(is_bwd=False)\n\n if is_training:\n result_types = [\n ir.RankedTensorType.get(output_shape, query_type.element_type),\n ir.RankedTensorType.get((1,1,1,1), ir.F32Type.get()),\n ir.RankedTensorType.get((1,1,1,1), ir.F32Type.get()),\n ir.RankedTensorType.get(softmax_stat_shape, ir.F32Type.get()),\n ir.RankedTensorType.get(workspace_shape, workspace_type),\n ]\n result_layouts = [output_layout] + default_layouts(amax_shape, amax_shape, softmax_stat_shape, workspace_shape)\n else:\n result_types = [\n ir.RankedTensorType.get(output_shape, query_type.element_type),\n ir.RankedTensorType.get((1,1,1,1), ir.F32Type.get()),\n ir.RankedTensorType.get((1,1,1,1), ir.F32Type.get()),\n ir.RankedTensorType.get(workspace_shape, workspace_type)\n ]\n result_layouts = [output_layout] + default_layouts(amax_shape, amax_shape, workspace_shape)\n\n operand_shapes = [ir.RankedTensorType(operand.type).shape for operand in operands[:3]]\n operand_shapes += [[1, 1, 1, 1]] * 6\n operand_layouts = default_layouts(*operand_shapes)\n out = mlir.custom_call(\n custom_call_name,\n result_types=result_types,\n operands=operands,\n backend_config=backend_config,\n operand_layouts=operand_layouts,\n result_layouts=result_layouts,\n )\n\n if is_training:\n return [hlo.transpose(out.results[0], output_transpose_perm), out.results[1], out.results[2], out.results[3]]\n else:\n return [hlo.transpose(out.results[0], output_transpose_perm), out.results[1], out.results[2]]\n\n\n\ndef _dot_product_attention_fp8_bwd_cuda_lowering(\n ctx, query, key, value, fwd_output, grad_output, activation,\n descale_q, descale_k, descale_v, descale_o, descale_dO, descale_s,\n descale_dP, scale_s, scale_dQ, scale_dK, scale_dV, scale_dP, scale,\n use_causal_mask, layout):\n query_type = ir.RankedTensorType(query.type)\n query_shape = query_type.shape\n key_type = ir.RankedTensorType(key.type)\n key_shape = key_type.shape\n value_type = ir.RankedTensorType(value.type)\n\n if layout == AttentionLayout.BNTH.value:\n B, q_N, T, H = query_shape\n _, k_N, S, _ = key_shape\n grad_layout = (3, 2, 1, 0)\n grad_transpose_perm = mlir.dense_int_array((0, 1, 2, 3))\n else:\n B, T, q_N, H = query_shape\n _, S, k_N, _ = key_shape\n grad_layout = (3, 1, 2, 0)\n grad_transpose_perm = mlir.dense_int_array((0, 2, 1, 3))\n\n workspace_shape = (0,)\n workspace_type = ir.IntegerType.get_unsigned(8)\n amax_shape = (1,1,1,1)\n\n grad_query_shape = (B, q_N, T, H)\n grad_key_shape = (B, k_N, S, H)\n grad_value_shape = (B, k_N, S, H)\n mask_type = MaskType.CAUSAL if use_causal_mask else MaskType.NO_MASK\n\n backend_config = create_dot_product_attention_fp8_backend_config(\n B, q_N, T, S, ir.BF16Type.get(),\n scale, mask_type, layout, is_bwd=True,\n )\n\n operands = [\n query,\n key,\n value,\n fwd_output,\n grad_output,\n activation,\n descale_q,\n descale_k,\n descale_v,\n descale_o,\n descale_dO,\n descale_s,\n descale_dP,\n scale_s,\n scale_dQ,\n scale_dK,\n scale_dV,\n scale_dP,\n ]\n\n custom_call_name = get_fp8_custom_call_name(is_bwd=True)\n\n result_types = [\n ir.RankedTensorType.get(grad_query_shape, query_type.element_type),\n ir.RankedTensorType.get(grad_key_shape, key_type.element_type),\n ir.RankedTensorType.get(grad_value_shape, value_type.element_type),\n ir.RankedTensorType.get(amax_shape, ir.F32Type.get()),\n ir.RankedTensorType.get(amax_shape, ir.F32Type.get()),\n ir.RankedTensorType.get(amax_shape, ir.F32Type.get()),\n ir.RankedTensorType.get(amax_shape, ir.F32Type.get()),\n ]\n result_layouts = [grad_layout, grad_layout, grad_layout] + default_layouts(amax_shape, amax_shape, amax_shape, amax_shape)\n\n result_types.append(ir.RankedTensorType.get(workspace_shape, workspace_type))\n result_layouts = result_layouts + default_layouts(workspace_shape)\n out = mlir.custom_call(\n custom_call_name,\n result_types=result_types,\n operands=operands,\n backend_config=backend_config,\n operand_layouts=default_layouts(\n *[ir.RankedTensorType(operand.type).shape for operand in operands]),\n result_layouts=result_layouts,\n )\n dqkv_amaxs = (hlo.transpose(out.results[0], grad_transpose_perm),\n hlo.transpose(out.results[1], grad_transpose_perm),\n hlo.transpose(out.results[2], grad_transpose_perm),\n out.results[3], out.results[4], out.results[5], out.results[6])\n # Only keep dQ, dK, dV, amax_dQ, amax_dK, amax_dV, amax_dP here\n return dqkv_amaxs\n\ndef _dot_product_attention_fp8_fwd_batcher(\n batched_args, batch_dims, *, scale, use_causal_mask, layout, is_training):\n _check_valid_batch_dims(batch_dims)\n query, key, value,\\n descale_q, descale_k, descale_v, descale_s, scale_s, scale_o, = batched_args\n query_bdim = batch_dims[0]\n if is_training:\n out_bdims = query_bdim, query_bdim\n else:\n out_bdims = (query_bdim,)\n\n if layout == AttentionLayout.BNTH.value:\n *Bs, N, T, _ = query.shape\n *_, _, S, _ = key.shape\n else:\n *Bs, T, N, _ = query.shape\n *_, S, _, _ = key.shape\n B = math.prod(Bs)\n\n # reshape to 4D shape\n query = jnp.reshape(query, (B,) + query.shape[-3:])\n key = jnp.reshape(key, (B,) + key.shape[-3:])\n value = jnp.reshape(value, (B,) + key.shape[-3:])\n\n outputs = _dot_product_attention_fp8_fwd_p_wrapper.bind(\n query, key, value, descale_q, descale_k, descale_v, descale_s, scale_s, scale_o,\n scale=scale, use_causal_mask=use_causal_mask, layout=layout, is_training=is_training)\n\n # reshape to original shape\n output, amax_s, amax_o = outputs[0], outputs[1], outputs[2]\n output = jnp.reshape(output, query.shape)\n if is_training:\n activation = outputs[3]\n activation = jnp.reshape(activation, (*Bs, N, T))\n return (output, amax_s, amax_o, activation), out_bdims\n else:\n return (output, amax_s, amax_o), out_bdims\n\ndef _dot_product_attention_fp8_bwd_batcher(\n batched_args, batch_dims, *, scale, use_causal_mask, layout):\n _check_valid_batch_dims(batch_dims)\n query, key, value, fwd_output, grad_output, activation,\\n descale_q, descale_k, descale_v, descale_o, descale_dO, descale_s, descale_dP,\\n scale_s, scale_dQ, scale_dK, scale_dV, scale_dP = batched_args\n query_bdim = batch_dims[0]\n out_bdims = query_bdim, query_bdim, query_bdim\n\n if layout == AttentionLayout.BNTH.value:\n *Bs, N, T, _ = query.shape\n *_, _, S, _ = key.shape\n else:\n *Bs, T, N, _ = query.shape\n *_, S, _, _ = key.shape\n B = math.prod(Bs)\n\n # reshape to 4D shape\n query = jnp.reshape(query, (B,) + query.shape[-3:])\n key = jnp.reshape(key, (B,) + key.shape[-3:])\n value = jnp.reshape(value, (B,) + key.shape[-3:])\n\n activation = jnp.reshape(activation, (B, N, T))\n fwd_output = jnp.reshape(fwd_output, (B,) + query.shape[-3:])\n grad_output = jnp.reshape(grad_output, (B,) + query.shape[-3:])\n\n grads = _dot_product_attention_fp8_bwd_p_wrapper.bind(\n query, key, value, fwd_output, grad_output, activation,\n descale_q, descale_k, descale_v, descale_o, descale_dO, descale_s, descale_dP, scale_s, scale_dQ, scale_dK, scale_dV, scale_dP,\n scale=scale, use_causal_mask=use_causal_mask, layout=layout,\n )\n\n grad_query, grad_key, grad_value = grads[:3]\n # reshape to original shape\n grad_query = jnp.reshape(grad_query, query.shape)\n grad_key = jnp.reshape(grad_key, key.shape)\n grad_value = jnp.reshape(grad_value, value.shape)\n\n return grads, out_bdims\n\ndef _infer_fp8_fwd_output_sharding(mesh, arg_shapes, is_training, layout):\n # Prepare variadic_args for the original function\n has_bias = False # Adjust as needed\n variadic_args = (has_bias, None) # Dummy value, adjust as necessary\n\n # Call the original function with the required parameters\n output_sharding = _infer_fwd_output_sharding(mesh, arg_shapes, variadic_args, is_training, layout)\n amax_sharding = NamedSharding(mesh, PartitionSpec())\n if is_training:\n out_sharding, activation_sharding = output_sharding[0], output_sharding[1]\n return [out_sharding, amax_sharding, amax_sharding, activation_sharding]\n return output_sharding + [amax_sharding, amax_sharding]\n\n_dot_product_attention_fp8_fwd_lower = custom_partitioning(\n _dot_product_attention_fp8_fwd_impl, static_argnums=(9, 10, 11, 12))\n\ndef _dot_product_attention_fp8_fwd_infer_sharding_from_operands(\n scale, use_causal_mask, layout, is_training,\n mesh, arg_shapes, result_shape):\n return _infer_fp8_fwd_output_sharding(mesh, arg_shapes, is_training, layout)\n\ndef _dot_product_attention_fp8_fwd_partition(\n scale, use_causal_mask, layout, is_training,\n mesh, arg_shapes, result_shape):\n # args sharding\n arg_shardings = tuple(arg_i.sharding for arg_i in arg_shapes)\n out_shardings = _infer_fp8_fwd_output_sharding(\n mesh, arg_shapes, is_training, layout)\n impl = functools.partial(\n _dot_product_attention_fp8_fwd_impl, scale=scale, use_causal_mask=use_causal_mask,\n layout=layout, is_training=is_training)\n return mesh, impl, out_shardings, arg_shardings\n\ndef _infer_fp8_bwd_output_sharding(mesh, arg_shapes, layout):\n # Prepare variadic_args for the original function\n has_bias = False # Adjust as needed\n has_dbias = False # Adjust as needed\n variadic_args = (has_bias, has_dbias) # Dummy value, adjust as necessary\n\n # Call the original function with the required parameters\n output_shardings = _infer_bwd_output_sharding(mesh, arg_shapes, layout, variadic_args)\n\n # Prepare amax_sharding\n amax_sharding = NamedSharding(mesh, PartitionSpec()) # Use a default spec or adjust as needed\n\n # Append amax_sharding for each output sharding\n out_shardings_with_amax = output_shardings + [amax_sharding] * 4\n\n return out_shardings_with_amax\n\n_dot_product_attention_fp8_bwd_lower = custom_partitioning(\n _dot_product_attention_fp8_bwd_impl, static_argnums=(18,19,20)\n)\n\ndef _dot_product_attention_fp8_bwd_infer_sharding_from_operands(\n scale, use_causal_mask, layout, mesh,\n arg_shapes, result_shape):\n return _infer_fp8_bwd_output_sharding(mesh, arg_shapes, layout)\n\ndef _dot_product_attention_fp8_bwd_partition(\n scale, use_causal_mask, layout, mesh,\n arg_shapes, result_shape):\n out_shardings = _infer_fp8_bwd_output_sharding(mesh, arg_shapes, layout)\n # args sharding\n arg_shardings = tuple(arg_i.sharding for arg_i in arg_shapes)\n impl = functools.partial(\n _dot_product_attention_fp8_bwd_impl, scale=scale,\n use_causal_mask=use_causal_mask, layout=layout\n )\n return mesh, impl, out_shardings, arg_shardings\n\n# Create dot_product_attention_fp8_fwd_p for forward operation.\n_dot_product_attention_fp8_fwd_p = core.Primitive("dot_product_attention_fp8_fwd")\n_dot_product_attention_fp8_fwd_p.multiple_results = True\n_dot_product_attention_fp8_fwd_p.def_impl(\n functools.partial(dispatch.apply_primitive, _dot_product_attention_fp8_fwd_p)\n)\n_dot_product_attention_fp8_fwd_p.def_abstract_eval(\n _dot_product_attention_fp8_fwd_abstract\n)\n\nmlir.register_lowering(\n _dot_product_attention_fp8_fwd_p,\n _dot_product_attention_fp8_fwd_cuda_lowering,\n platform="cuda",\n)\n\n_dot_product_attention_fp8_fwd_p_wrapper = core.Primitive(\n "dot_product_attention_fp8_fwd_wrapper"\n)\n_dot_product_attention_fp8_fwd_p_wrapper.multiple_results = True\n_dot_product_attention_fp8_fwd_p_wrapper.def_impl(_dot_product_attention_fp8_fwd_impl)\n_dot_product_attention_fp8_fwd_p_wrapper.def_abstract_eval(\n _dot_product_attention_fp8_fwd_abstract\n)\n\n# Create dot_product_attention_bwd_p for backward operation.\n_dot_product_attention_fp8_bwd_p = core.Primitive("dot_product_attention_fp8_bwd")\n_dot_product_attention_fp8_bwd_p.multiple_results = True\n_dot_product_attention_fp8_bwd_p.def_impl(\n functools.partial(dispatch.apply_primitive, _dot_product_attention_fp8_bwd_p)\n)\n_dot_product_attention_fp8_bwd_p.def_abstract_eval(\n _dot_product_attention_fp8_bwd_abstract\n)\n\nmlir.register_lowering(\n _dot_product_attention_fp8_bwd_p,\n _dot_product_attention_fp8_bwd_cuda_lowering,\n platform="cuda",\n)\n\n_dot_product_attention_fp8_bwd_p_wrapper = core.Primitive(\n "dot_product_attention_fp8_bwd_wrapper"\n)\n_dot_product_attention_fp8_bwd_p_wrapper.multiple_results = True\n_dot_product_attention_fp8_bwd_p_wrapper.def_impl(_dot_product_attention_fp8_bwd_impl)\n_dot_product_attention_fp8_bwd_p_wrapper.def_abstract_eval(\n _dot_product_attention_fp8_bwd_abstract\n)\n\nbatching.primitive_batchers[\n _dot_product_attention_fp8_fwd_p_wrapper\n] = _dot_product_attention_fp8_fwd_batcher\nbatching.primitive_batchers[\n _dot_product_attention_fp8_bwd_p_wrapper\n] = _dot_product_attention_fp8_bwd_batcher\n\n_dot_product_attention_fp8_fwd_lower.def_partition(\n infer_sharding_from_operands=_dot_product_attention_fp8_fwd_infer_sharding_from_operands,\n partition=_dot_product_attention_fp8_fwd_partition)\n\nmlir.register_lowering(_dot_product_attention_fp8_fwd_p_wrapper,\n mlir.lower_fun(_dot_product_attention_fp8_fwd_lower, multiple_results=True))\n\n_dot_product_attention_fp8_bwd_lower.def_partition(\n infer_sharding_from_operands=_dot_product_attention_fp8_bwd_infer_sharding_from_operands,\n partition=_dot_product_attention_fp8_bwd_partition)\n\nmlir.register_lowering(_dot_product_attention_fp8_bwd_p_wrapper,\n mlir.lower_fun(_dot_product_attention_fp8_bwd_lower, multiple_results=True))\n\ndispatch.prim_requires_devices_during_lowering.add(\n _dot_product_attention_fp8_fwd_p\n)\ndispatch.prim_requires_devices_during_lowering.add(\n _dot_product_attention_fp8_fwd_p_wrapper\n)\ndispatch.prim_requires_devices_during_lowering.add(\n _dot_product_attention_fp8_bwd_p\n)\ndispatch.prim_requires_devices_during_lowering.add(\n _dot_product_attention_fp8_bwd_p_wrapper\n)\n\n@functools.partial(jax.custom_vjp, nondiff_argnums=(4, 5, 6, 7))\ndef _dot_product_attention_fp8(query: Array,\n key: Array,\n value: Array,\n fp8_params: dict[str, Array],\n scale: float,\n use_causal_mask: bool,\n layout: int,\n cudnn_version: int):\n output, amax_s, amax_o = _dot_product_attention_fp8_fwd(\n query, key, value, params_from_keys(fp8_params, fp8_params_keys_fwd),\n scale, use_causal_mask, layout, cudnn_version\n )\n return output, amax_s, amax_o\n\n_dot_product_attention_fp8.defvjp(_dot_product_attention_fp8_fwd_rule, _dot_product_attention_fp8_bwd_rule)\n\ndef combine_bias_and_mask(bias, mask, dtype):\n if bias is not None:\n # reshape bias to have 4D shape\n bias = bias.reshape((1,) * (4 - len(bias.shape)) + bias.shape)\n\n if mask is not None:\n if mask.dtype == jnp.bool:\n large_negative_number = get_large_negative_number(dtype)\n mask = jnp.where(mask, jnp.asarray(0, dtype), large_negative_number)\n # reshape mask to have 4D shape\n mask = mask.reshape((1,) * (4 - len(mask.shape)) + mask.shape) # type: ignore[union-attr]\n\n # combine bias and mask\n if bias is None:\n bias = mask\n else:\n if mask is not None:\n # should be broadcast to same shape\n bias = bias + mask\n return bias\n\n# User interface\ndef paged_attention(\n query: Array,\n key: Array,\n value: Array,\n q_seqlen: Array,\n kv_seqlen: Array,\n page_table_k: Array,\n page_table_v: Array,\n bias: Array | None = None,\n mask: Array | None = None,\n fp8_params: FP8Params | None = None,\n *,\n scale: float = 1.0,\n mask_type: MaskType = MaskType.NO_MASK,\n seed: int = 42,\n dropout_rate: float = 0.,\n qkv_layout: str = "BTNH",\n sliding_window_length: int | None = None,\n use_fp8: bool = False,\n return_residual: bool = False\n):\n """Computes paged attention described in https://arxiv.org/pdf/2309.06180.\n\n B = batch size\n S = length of the key/value (source)\n T = length of the query (target)\n N = number of attention heads\n H = dimensions of each attention head.\n\n Args:\n query: Queries for attention calculation with a shape of BTNH or BNTH.\n key: Keys for attention calculation with a shape of\n [num_blocks, block_size, N, H] or [num_blocks, N, block_size, H] where\n num_blocks = B * Ceil(S / block_size).\n value: Values to be used in attention with a shape of\n [num_blocks, block_size, N, H] or [num_blocks, N, block_size, H] where\n num_blocks = B * Ceil(S / block_size).\n q_seqlen: Non padded sequence length of query with a shape of B.\n kv_seqlen: Non padded sequence length of key and value with a shape of B.\n page_table_k: page table for key of shape [B, 1, num_blocks_per_batch, 1]\n where num_blocks_per_batch = Ceil(S / block_size).\n page_table_v: page table for value of shape [B, 1, num_blocks_per_batch, 1]\n where num_blocks_per_batch = Ceil(S / block_size).\n bias: Bias to be added to logits with a shape of BNTS.\n mask: Mask used to filter out logits with a shape of BNTS.\n scale: Scale for the query.\n qkv_layout: Layout string, with supported formats being BTNH, BNTH, BSNH,\n BNSH.\n sliding_window_length: Window size to make attention only attend to each\n token's left local window (pos - sliding_window_length, pos] where `pos`\n is the index of each token. E.g., if sliding_window_length == 3 and the\n sequence is [0, 1, 2, 3, c, 4, 5], token `c` can attend to [4, 5, c].\n use_fp8: Whether to use FP8 attention mechanism.\n return_residual: Whether to return the logsumexp tensor of shape BTN\n or BNT to users. See section 3.1.1 in the FlashAttention-2 paper:\n https://arxiv.org/pdf/2307.08691 to find the definition of logsumexp.\n Returns:\n output: the same shape as the query.\n residual: the logsumexp tensor if return_residual=True. (non fp8)\n """\n cudnn_version = check_cudnn_version()\n layout = _normalize_layout(qkv_layout)\n if use_fp8:\n raise ValueError("Paged attention doesn't support fp8 for now.")\n if has_padding(mask_type) and (q_seqlen is None or kv_seqlen is None):\n raise ValueError("Require q_seqlen and kv_seqlen to generate padding mask.")\n if sliding_window_length is not None and sliding_window_length <= 0:\n raise ValueError(\n f"Require sliding_window_length > 0, got {sliding_window_length}.")\n\n bias = combine_bias_and_mask(bias, mask, query.dtype)\n # check if input shape and data type is compatiable\n check_layout(query, key, value, bias, q_seqlen, kv_seqlen, None, None,\n page_table_k, page_table_v, layout)\n has_bias = bias is not None\n has_dbias = has_bias and \\n should_export_dbias(bias.shape, query.shape, layout) # type: ignore[union-attr]\n variadic_args = (has_bias, has_dbias)\n\n _not_used = jnp.zeros(0, dtype=query.dtype)\n if bias is None:\n bias = _not_used\n\n output = _dot_product_attention(\n query, key, value, bias, q_seqlen, kv_seqlen, _not_used, _not_used,\n page_table_k, page_table_v, scale, seed, dropout_rate, variadic_args,\n mask_type, layout.value, sliding_window_length, cudnn_version,\n return_residual)\n return output\n\n\ndef dot_product_attention(\n query: Array,\n key: Array,\n value: Array,\n bias: Array | None = None,\n mask: Array | None = None,\n q_seqlen: Array | None = None,\n kv_seqlen: Array | None = None,\n q_offsets: Array | None = None,\n kv_offsets: Array | None = None,\n fp8_params: FP8Params | None = None,\n *,\n scale: float = 1.0,\n mask_type: MaskType = MaskType.NO_MASK,\n seed: int = 42,\n dropout_rate: float = 0.,\n qkv_layout: str = "BTNH",\n sliding_window_length: int | None = None,\n use_fp8: bool = False,\n return_residual: bool = False\n):\n """Computes dot-product attention given query (Q), key (K), and value (V).\n\n This function serves as the core operation for applying attention\n mechanisms as described in the paper [https://arxiv.org/abs/1706.03762].\n Initially, it determines the attention weights by processing Q and K,\n subsequently combining the outcomes using K. Throughout this function, we\n utilize the following uppercase letters to represent specific parameters of\n array:\n\n B = batch size\n S = length of the key/value (source)\n T = length of the query (target)\n N = number of attention heads\n H = dimensions of each attention head.\n\n The supported layouts for Q, K, V are either BT(S)NH or BNT(S)H, and they must\n adhere to the same layout. The output layout remains consistent with Q,\n defaulting to BT(S)NH.\n\n Args:\n query: Queries for attention calculation with a shape of BTNH or BNTH.\n key: Keys for attention calculation with a shape of BSNH or BNSH.\n value: Values to be used in attention with a shape of BSNH or BNSH.\n bias: Bias to be added to logits with a shape of BNTS.\n mask: Mask used to filter out logits with a shape of BNTS.\n q_seqlen: Non padded sequence length of query with a shape of B.\n If q_offsets is set, q_seqlen should have shape [B,M] where M is the\n maximum number of segments per batch. For batch that has less segments\n than maximum segments, fill the padded entries with -1.\n kv_seqlen: Non padded sequence length of key and value with a shape of B.\n If kv_offsets is set, kv_seqlen should have shape [B,M] where M is the\n maximum number of segments per batch. For batch that has less segments\n than maximum segments, fill the padded entries with -1.\n q_offsets: offset of each segment packed in query with a shape of [B,M+1]\n where M is the maximum number of segments per batch. For batch that has\n less segments than maximum segments, fill the padded entries with -1.\n E.g, if 2 batches has 3 and 2 segments respectively, each segment has\n size 1, q_offsets = [[0,1,2,-1], [0,1,-1,-1]]. q_seqlen should be set\n to indicate the size of each segment.\n kv_offsets: offset of each segment packed in key with a shape of [B,M+1]\n where M is the maximum number of segments per batch. For batch that has\n less segments than maximum segments, fill the padded entries with -1.\n E.g, if 2 batches has 3 and 2 segments respectively, each segment has\n size 1, kv_offsets = [[0,1,2,-1], [0,1,-1,-1]]. kv_seqlen should be set\n to indicate the size of each segment.\n scale: Scale for the query.\n dropout_rate: Dropout rate.\n qkv_layout: Layout string, with supported formats being BTNH, BNTH, BSNH,\n BNSH.\n sliding_window_length: Window size to make attention only attend to each\n token's left local window (pos - sliding_window_length, pos] where `pos`\n is the index of each token. E.g., if sliding_window_length == 3 and the\n sequence is [0, 1, 2, 3, c, 4, 5], token `c` can attend to [4, 5, c].\n use_fp8: Whether to use FP8 attention mechanism.\n return_residual: Whether to return the logsumexp tensor of shape BTN\n or BNT to users. See section 3.1.1 in the FlashAttention-2 paper:\n https://arxiv.org/pdf/2307.08691 to find the definition of logsumexp.\n Returns:\n output: the same shape as the query.\n residual: the logsumexp tensor if return_residual=True. (non fp8)\n amax_s: amax of state. (fp8 only)\n amax_o: amax of output. (fp8 only)\n """\n # TODO(b/380898464): Check the compute capability, e.g., require GPU device,\n # in the kernel implementation (c++) code.\n cudnn_version = check_cudnn_version()\n layout = _normalize_layout(qkv_layout)\n\n if use_fp8:\n if fp8_params is None:\n raise ValueError("fp8_params should not be None.")\n if mask_type not in (MaskType.NO_MASK, MaskType.CAUSAL):\n raise ValueError("Only NO_MASK or CAUSAL masks are supported for fp8.")\n if not all(x is None for x in [bias, mask, q_seqlen, kv_seqlen]):\n raise ValueError(\n f"Expected 'None' for bias, mask, q_seqlen, and kv_seqlen, "\n f"but got: bias={bias}, mask={mask}, q_seqlen={q_seqlen}, kv_seqlen={kv_seqlen}"\n )\n check_fp8_params(fp8_params)\n check_layout(query, key, value, bias, q_seqlen, kv_seqlen, q_offsets, kv_offsets,\n None, None, layout)\n output, amax_s, amax_o = _dot_product_attention_fp8(\n query, key, value, fp8_params,\n scale, mask_type == MaskType.CAUSAL, layout.value, cudnn_version\n )\n return output, amax_s, amax_o\n else:\n if has_padding(mask_type) and (q_seqlen is None or kv_seqlen is None):\n raise ValueError("Require q_seqlen and kv_seqlen to generate padding mask")\n if sliding_window_length is not None and sliding_window_length <= 0:\n raise ValueError(\n f"Require sliding_window_length > 0, got {sliding_window_length}")\n if q_offsets is not None and (q_seqlen is None or kv_seqlen is None):\n raise ValueError("Require q_seqlen and kv_seqlen to use packed layout")\n\n bias = combine_bias_and_mask(bias, mask, query.dtype)\n # check if input shape and data type is compatiable\n check_layout(query, key, value, bias, q_seqlen, kv_seqlen, q_offsets, kv_offsets,\n None, None, layout)\n has_bias = bias is not None\n has_dbias = has_bias and \\n should_export_dbias(bias.shape, query.shape, layout) # type: ignore[union-attr]\n variadic_args = (has_bias, has_dbias)\n\n _not_used = jnp.zeros(0, dtype=query.dtype)\n if bias is None:\n bias = _not_used\n if q_seqlen is None:\n q_seqlen = _not_used\n if kv_seqlen is None:\n kv_seqlen = _not_used\n if q_offsets is None:\n q_offsets = _not_used\n if kv_offsets is None:\n kv_offsets = _not_used\n\n output = _dot_product_attention(\n query, key, value, bias, q_seqlen, kv_seqlen, q_offsets, kv_offsets,\n _not_used, _not_used, scale, seed, dropout_rate, variadic_args,\n mask_type, layout.value, sliding_window_length, cudnn_version,\n return_residual)\n return output\n
|
python
|
tab
|
223
| 1,234,663
|
.venv/lib/python3.10/site-packages/jax/_src/cudnn/fused_attention_stablehlo.py
| 72,206
| 0
| null |
python
|
selection_mouse
|
224
| 1,234,804
|
.venv/lib/python3.10/site-packages/jax/_src/cudnn/fused_attention_stablehlo.py
| 72,206
| 4
|
mask
|
python
|
selection_mouse
|
225
| 1,237,725
|
.venv/lib/python3.10/site-packages/jax/_src/cudnn/fused_attention_stablehlo.py
| 72,503
| 0
| null |
python
|
selection_mouse
|
226
| 1,238,320
|
.venv/lib/python3.10/site-packages/jax/_src/cudnn/fused_attention_stablehlo.py
| 72,458
| 0
| null |
python
|
selection_mouse
|
227
| 1,240,202
|
.venv/lib/python3.10/site-packages/jax/_src/cudnn/fused_attention_stablehlo.py
| 76,532
| 0
| null |
python
|
selection_command
|
228
| 1,247,417
|
.venv/lib/python3.10/site-packages/jax/_src/cudnn/fused_attention_stablehlo.py
| 77,184
| 0
| null |
python
|
selection_command
|
229
| 1,300,024
|
models/dynamics.py
| 0
| 0
| null |
python
|
tab
|
230
| 1,302,269
|
utils/nn.py
| 0
| 0
| null |
python
|
tab
|
231
| 1,311,996
|
utils/nn.py
| 10,604
| 0
| null |
python
|
selection_mouse
|
232
| 1,312,156
|
utils/nn.py
| 10,592
| 14
|
attention_mask
|
python
|
selection_mouse
|
233
| 1,314,807
|
utils/nn.py
| 10,600
| 0
| null |
python
|
selection_mouse
|
234
| 1,315,484
|
utils/nn.py
| 10,516
| 0
| null |
python
|
selection_mouse
|
235
| 1,315,628
|
utils/nn.py
| 10,507
| 14
|
attention_mask
|
python
|
selection_mouse
|
236
| 1,317,548
|
utils/nn.py
| 11,133
| 0
| null |
python
|
selection_mouse
|
237
| 1,317,559
|
utils/nn.py
| 11,132
| 0
| null |
python
|
selection_command
|
238
| 1,346,807
|
utils/nn.py
| 5,825
| 0
| null |
python
|
selection_mouse
|
239
| 1,347,918
|
utils/nn.py
| 5,825
| 0
|
,
|
python
|
content
|
240
| 1,347,920
|
utils/nn.py
| 5,826
| 0
| null |
python
|
selection_keyboard
|
241
| 1,348,041
|
utils/nn.py
| 5,826
| 0
|
python
|
content
|
|
242
| 1,348,041
|
utils/nn.py
| 5,827
| 0
| null |
python
|
selection_keyboard
|
243
| 1,348,200
|
utils/nn.py
| 5,827
| 0
|
c
|
python
|
content
|
244
| 1,348,201
|
utils/nn.py
| 5,828
| 0
| null |
python
|
selection_keyboard
|
245
| 1,348,368
|
utils/nn.py
| 5,828
| 0
|
a
|
python
|
content
|
246
| 1,348,370
|
utils/nn.py
| 5,829
| 0
| null |
python
|
selection_keyboard
|
247
| 1,348,508
|
utils/nn.py
| 5,829
| 0
|
u
|
python
|
content
|
248
| 1,348,510
|
utils/nn.py
| 5,830
| 0
| null |
python
|
selection_keyboard
|
249
| 1,349,159
|
utils/nn.py
| 5,830
| 0
|
s
|
python
|
content
|
250
| 1,349,160
|
utils/nn.py
| 5,831
| 0
| null |
python
|
selection_keyboard
|
251
| 1,349,344
|
utils/nn.py
| 5,831
| 0
|
a
|
python
|
content
|
252
| 1,349,345
|
utils/nn.py
| 5,832
| 0
| null |
python
|
selection_keyboard
|
253
| 1,349,428
|
utils/nn.py
| 5,832
| 0
|
l
|
python
|
content
|
254
| 1,349,429
|
utils/nn.py
| 5,833
| 0
| null |
python
|
selection_keyboard
|
255
| 1,349,742
|
utils/nn.py
| 5,833
| 0
|
_
|
python
|
content
|
256
| 1,349,743
|
utils/nn.py
| 5,834
| 0
| null |
python
|
selection_keyboard
|
257
| 1,350,058
|
utils/nn.py
| 5,834
| 0
|
m
|
python
|
content
|
258
| 1,350,059
|
utils/nn.py
| 5,835
| 0
| null |
python
|
selection_keyboard
|
259
| 1,350,139
|
utils/nn.py
| 5,835
| 0
|
a
|
python
|
content
|
260
| 1,350,140
|
utils/nn.py
| 5,836
| 0
| null |
python
|
selection_keyboard
|
261
| 1,350,140
|
utils/nn.py
| 5,836
| 0
|
s
|
python
|
content
|
262
| 1,350,140
|
utils/nn.py
| 5,837
| 0
| null |
python
|
selection_keyboard
|
263
| 1,350,191
|
utils/nn.py
| 5,837
| 0
|
k
|
python
|
content
|
264
| 1,350,192
|
utils/nn.py
| 5,838
| 0
| null |
python
|
selection_keyboard
|
265
| 1,352,407
|
utils/nn.py
| 5,827
| 0
|
m
|
python
|
content
|
266
| 1,352,408
|
utils/nn.py
| 5,828
| 0
| null |
python
|
selection_keyboard
|
267
| 1,352,549
|
utils/nn.py
| 5,828
| 0
|
a
|
python
|
content
|
268
| 1,352,550
|
utils/nn.py
| 5,829
| 0
| null |
python
|
selection_keyboard
|
269
| 1,352,652
|
utils/nn.py
| 5,829
| 0
|
s
|
python
|
content
|
270
| 1,352,656
|
utils/nn.py
| 5,830
| 0
| null |
python
|
selection_keyboard
|
271
| 1,352,704
|
utils/nn.py
| 5,830
| 0
|
k
|
python
|
content
|
272
| 1,352,705
|
utils/nn.py
| 5,831
| 0
| null |
python
|
selection_keyboard
|
273
| 1,353,117
|
utils/nn.py
| 5,831
| 0
|
?
|
python
|
content
|
274
| 1,353,118
|
utils/nn.py
| 5,832
| 0
| null |
python
|
selection_keyboard
|
275
| 1,353,570
|
utils/nn.py
| 5,831
| 1
| null |
python
|
content
|
276
| 1,353,790
|
utils/nn.py
| 5,831
| 0
|
=
|
python
|
content
|
277
| 1,353,791
|
utils/nn.py
| 5,832
| 0
| null |
python
|
selection_keyboard
|
278
| 1,353,924
|
utils/nn.py
| 5,831
| 0
| null |
python
|
selection_command
|
279
| 1,355,661
|
utils/nn.py
| 5,731
| 0
| null |
python
|
selection_command
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.