Sequence
int64
1
25.2k
Time
int64
1
858M
File
stringclasses
830 values
RangeOffset
int64
0
2.21M
RangeLength
int64
0
168k
Text
stringlengths
1
4.7M
Language
stringclasses
20 values
Type
stringclasses
9 values
80
988,022
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,382
0
null
python
selection_mouse
81
998,299
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,135
0
null
python
selection_mouse
82
998,423
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,134
21
dot_product_attention
python
selection_mouse
83
998,926
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,134
22
dot_product_attention
python
selection_mouse
84
998,941
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,134
24
dot_product_attention or
python
selection_mouse
85
998,984
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,134
25
dot_product_attention or
python
selection_mouse
86
999,025
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,134
35
dot_product_attention or compatible
python
selection_mouse
87
999,244
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,134
36
dot_product_attention or compatible
python
selection_mouse
88
999,261
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,134
44
dot_product_attention or compatible function
python
selection_mouse
89
999,286
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,134
118
dot_product_attention or compatible function. Accepts query,\n key, value, and returns output of shape ``[bs, dim1
python
selection_mouse
90
999,700
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,251
0
null
python
selection_mouse
91
1,000,643
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,202
0
null
python
selection_mouse
92
1,000,800
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,201
3
key
python
selection_mouse
93
1,001,007
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,201
4
key,
python
selection_mouse
94
1,001,039
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,201
86
key, value, and returns output of shape ``[bs, dim1, dim2, ..., dimN,,\n num_heads
python
selection_mouse
95
1,001,077
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,201
88
key, value, and returns output of shape ``[bs, dim1, dim2, ..., dimN,,\n num_heads,
python
selection_mouse
96
1,001,090
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,201
102
key, value, and returns output of shape ``[bs, dim1, dim2, ..., dimN,,\n num_heads, value_channels
python
selection_mouse
97
1,001,277
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,201
103
key, value, and returns output of shape ``[bs, dim1, dim2, ..., dimN,,\n num_heads, value_channels]
python
selection_mouse
98
1,001,306
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,201
104
key, value, and returns output of shape ``[bs, dim1, dim2, ..., dimN,,\n num_heads, value_channels]`
python
selection_mouse
99
1,001,313
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,201
105
key, value, and returns output of shape ``[bs, dim1, dim2, ..., dimN,,\n num_heads, value_channels]``
python
selection_mouse
100
1,001,526
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,201
34
key, value, and returns output of
python
selection_mouse
101
1,001,602
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,201
39
key, value, and returns output of shape
python
selection_mouse
102
1,002,077
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,238
0
null
python
selection_mouse
103
1,002,241
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,235
5
shape
python
selection_mouse
104
1,002,439
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,232
8
of shape
python
selection_mouse
105
1,002,485
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,225
15
output of shape
python
selection_mouse
106
1,002,527
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,224
16
output of shape
python
selection_mouse
107
1,002,527
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,217
23
returns output of shape
python
selection_mouse
108
1,002,564
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,216
24
returns output of shape
python
selection_mouse
109
1,002,642
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,213
27
and returns output of shape
python
selection_mouse
110
1,002,690
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,212
28
and returns output of shape
python
selection_mouse
111
1,002,690
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,211
29
, and returns output of shape
python
selection_mouse
112
1,002,724
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,206
34
value, and returns output of shape
python
selection_mouse
113
1,002,767
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,205
35
value, and returns output of shape
python
selection_mouse
114
1,002,809
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,204
36
, value, and returns output of shape
python
selection_mouse
115
1,002,851
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,201
39
key, value, and returns output of shape
python
selection_mouse
116
1,003,211
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,203
0
null
python
selection_mouse
117
1,003,211
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,201
3
key
python
selection_mouse
118
1,003,785
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,125
0
null
python
selection_mouse
119
1,003,940
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,120
12
attention_fn
python
selection_mouse
120
1,004,127
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,120
85
attention_fn: dot_product_attention or compatible function. Accepts query,\n key,
python
selection_mouse
121
1,004,167
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,120
91
attention_fn: dot_product_attention or compatible function. Accepts query,\n key, value
python
selection_mouse
122
1,004,208
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,120
168
attention_fn: dot_product_attention or compatible function. Accepts query,\n key, value, and returns output of shape ``[bs, dim1, dim2, ..., dimN,,\n num_heads,
python
selection_mouse
123
1,004,209
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,120
183
attention_fn: dot_product_attention or compatible function. Accepts query,\n key, value, and returns output of shape ``[bs, dim1, dim2, ..., dimN,,\n num_heads, value_channels
python
selection_mouse
124
1,004,465
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,120
184
attention_fn: dot_product_attention or compatible function. Accepts query,\n key, value, and returns output of shape ``[bs, dim1, dim2, ..., dimN,,\n num_heads, value_channels]
python
selection_mouse
125
1,004,511
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,120
185
attention_fn: dot_product_attention or compatible function. Accepts query,\n key, value, and returns output of shape ``[bs, dim1, dim2, ..., dimN,,\n num_heads, value_channels]`
python
selection_mouse
126
1,004,553
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,120
186
attention_fn: dot_product_attention or compatible function. Accepts query,\n key, value, and returns output of shape ``[bs, dim1, dim2, ..., dimN,,\n num_heads, value_channels]``
python
selection_mouse
127
1,005,063
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,306
0
null
python
selection_mouse
128
1,005,069
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,305
0
null
python
selection_command
129
1,005,206
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,306
0
null
python
selection_mouse
130
1,005,207
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,305
0
null
python
selection_command
131
1,005,361
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,305
1
`
python
selection_mouse
132
1,005,361
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,306
0
null
python
selection_command
133
1,005,384
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,304
2
``
python
selection_mouse
134
1,005,400
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,289
17
value_channels]``
python
selection_mouse
135
1,005,491
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,278
28
num_heads, value_channels]``
python
selection_mouse
136
1,005,575
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,201
105
key, value, and returns output of shape ``[bs, dim1, dim2, ..., dimN,,\n num_heads, value_channels]``
python
selection_mouse
137
1,005,576
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,200
106
key, value, and returns output of shape ``[bs, dim1, dim2, ..., dimN,,\n num_heads, value_channels]``
python
selection_mouse
138
1,005,577
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,199
107
key, value, and returns output of shape ``[bs, dim1, dim2, ..., dimN,,\n num_heads, value_channels]``
python
selection_mouse
139
1,005,594
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,198
108
key, value, and returns output of shape ``[bs, dim1, dim2, ..., dimN,,\n num_heads, value_channels]``
python
selection_mouse
140
1,005,610
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,197
109
key, value, and returns output of shape ``[bs, dim1, dim2, ..., dimN,,\n num_heads, value_channels]``
python
selection_mouse
141
1,005,641
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,196
110
key, value, and returns output of shape ``[bs, dim1, dim2, ..., dimN,,\n num_heads, value_channels]``
python
selection_mouse
142
1,005,725
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,195
111
key, value, and returns output of shape ``[bs, dim1, dim2, ..., dimN,,\n num_heads, value_channels]``
python
selection_mouse
143
1,006,534
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,122
0
null
python
selection_mouse
144
1,008,573
.venv/lib/python3.10/site-packages/flax/linen/attention.py
16,197
0
null
python
selection_mouse
145
1,008,717
.venv/lib/python3.10/site-packages/flax/linen/attention.py
16,190
8
use_bias
python
selection_mouse
146
1,009,218
.venv/lib/python3.10/site-packages/flax/linen/attention.py
16,221
0
null
python
selection_mouse
147
1,009,373
.venv/lib/python3.10/site-packages/flax/linen/attention.py
16,214
12
attention_fn
python
selection_mouse
148
1,043,588
.venv/lib/python3.10/site-packages/flax/linen/attention.py
16,221
0
null
python
selection_mouse
149
1,046,145
.venv/lib/python3.10/site-packages/flax/linen/attention.py
25,962
0
null
python
selection_command
150
1,056,862
.venv/lib/python3.10/site-packages/flax/linen/attention.py
26,550
0
null
python
selection_command
151
1,059,253
.venv/lib/python3.10/site-packages/flax/linen/attention.py
26,616
0
null
python
selection_command
152
1,060,204
.venv/lib/python3.10/site-packages/flax/linen/attention.py
26,694
0
null
python
selection_command
153
1,060,984
.venv/lib/python3.10/site-packages/flax/linen/attention.py
31,038
0
null
python
selection_command
154
1,064,980
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,120
0
null
python
selection_command
155
1,065,730
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,201
0
null
python
selection_command
156
1,067,579
.venv/lib/python3.10/site-packages/flax/linen/attention.py
15,122
0
null
python
selection_command
157
1,068,189
.venv/lib/python3.10/site-packages/flax/linen/attention.py
16,214
0
null
python
selection_command
158
1,070,354
.venv/lib/python3.10/site-packages/flax/linen/attention.py
25,962
0
null
python
selection_command
159
1,071,394
.venv/lib/python3.10/site-packages/flax/linen/attention.py
26,550
0
null
python
selection_command
160
1,072,995
.venv/lib/python3.10/site-packages/flax/linen/attention.py
26,616
0
null
python
selection_command
161
1,085,387
.venv/lib/python3.10/site-packages/flax/linen/attention.py
26,047
0
null
python
selection_mouse
162
1,086,092
.venv/lib/python3.10/site-packages/flax/linen/attention.py
26,046
4
mask
python
selection_mouse
163
1,092,526
.venv/lib/python3.10/site-packages/flax/linen/attention.py
26,022
0
null
python
selection_mouse
164
1,094,810
models/dynamics.py
0
0
null
python
tab
165
1,099,763
utils/nn.py
0
0
null
python
tab
166
1,100,959
.venv/lib/python3.10/site-packages/flax/linen/attention.py
0
0
null
python
tab
167
1,102,085
utils/nn.py
0
0
null
python
tab
168
1,104,064
utils/nn.py
4,976
0
null
python
selection_mouse
169
1,110,991
utils/nn.py
11,043
0
null
python
selection_mouse
170
1,111,395
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
0
0
# Copyright 2019 The JAX Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n"""Shared neural network activations and other functions."""\n\nfrom __future__ import annotations\n\nfrom collections.abc import Sequence\nfrom functools import partial\nimport operator\nimport math\nimport numpy as np\nfrom typing import Any, List, Literal\nimport warnings\n\nimport jax\nimport jax.numpy as jnp\nfrom jax import custom_jvp\nfrom jax import lax\nfrom jax._src import config\nfrom jax._src import core\nfrom jax._src import deprecations\nfrom jax._src import dtypes\nfrom jax._src import util\nfrom jax._src.core import AxisName\nfrom jax._src.sharding_impls import NamedSharding, PartitionSpec as P\nfrom jax._src.cudnn.fused_attention_stablehlo import (\n dot_product_attention as cudnn_dot_product_attention, MaskType)\nfrom jax._src.cudnn.scaled_matmul_stablehlo import (\n scaled_matmul_wrapper as cudnn_scaled_matmul,\n scaled_dot_general_wrapper as cudnn_scaled_dot_general,\n BlockScaleConfig)\nfrom jax._src.interpreters import batching\nfrom jax._src.interpreters import mlir\nfrom jax._src.numpy import util as numpy_util\nfrom jax._src.typing import Array, ArrayLike, DType, DTypeLike\nfrom jax._src.ops.special import logsumexp as _logsumexp\n\n\n# activations\n@jax.jit\ndef identity(x: ArrayLike) -> Array:\n r"""Identity activation function.\n\n Returns the argument unmodified.\n\n Args:\n x : input array\n\n Returns:\n The argument `x` unmodified.\n\n Examples:\n >>> jax.nn.identity(jax.numpy.array([-2., -1., -0.5, 0, 0.5, 1., 2.]))\n Array([-2. , -1. , -0.5, 0. , 0.5, 1. , 2. ], dtype=float32)\n\n """\n numpy_util.check_arraylike("identity", x)\n return jnp.asarray(x)\n\n@custom_jvp\n@jax.jit\ndef relu(x: ArrayLike) -> Array:\n r"""Rectified linear unit activation function.\n\n Computes the element-wise function:\n\n .. math::\n \mathrm{relu}(x) = \max(x, 0)\n\n except under differentiation, we take:\n\n .. math::\n \nabla \mathrm{relu}(0) = 0\n\n For more information see\n `Numerical influence of ReLU’(0) on backpropagation\n <https://dl.acm.org/doi/10.5555/3540261.3540297>`_.\n\n Args:\n x : input array\n\n Returns:\n An array.\n\n Examples:\n >>> jax.nn.relu(jax.numpy.array([-2., -1., -0.5, 0, 0.5, 1., 2.]))\n Array([0. , 0. , 0. , 0. , 0.5, 1. , 2. ], dtype=float32)\n\n See also:\n :func:`relu6`\n\n """\n return jnp.maximum(x, 0)\n# For behavior at 0, see https://dl.acm.org/doi/10.5555/3540261.3540297\nrelu.defjvps(lambda g, ans, x: lax.select(x > 0, g, lax.full_like(g, 0)))\n\n@jax.jit\ndef squareplus(x: ArrayLike, b: ArrayLike = 4) -> Array:\n r"""Squareplus activation function.\n\n Computes the element-wise function\n\n .. math::\n \mathrm{squareplus}(x) = \frac{x + \sqrt{x^2 + b}}{2}\n\n as described in https://arxiv.org/abs/2112.11687.\n\n Args:\n x : input array\n b : smoothness parameter\n """\n numpy_util.check_arraylike("squareplus", x)\n numpy_util.check_arraylike("squareplus", b)\n x = jnp.asarray(x)\n b = jnp.asarray(b)\n y = x + jnp.sqrt(jnp.square(x) + b)\n return y / 2\n\n@jax.jit\ndef softplus(x: ArrayLike) -> Array:\n r"""Softplus activation function.\n\n Computes the element-wise function\n\n .. math::\n \mathrm{softplus}(x) = \log(1 + e^x)\n\n Args:\n x : input array\n """\n return jnp.logaddexp(x, 0)\n\n@jax.jit\ndef sparse_plus(x: ArrayLike) -> Array:\n r"""Sparse plus function.\n\n Computes the function:\n\n .. math::\n\n \mathrm{sparse\_plus}(x) = \begin{cases}\n 0, & x \leq -1\\\n \frac{1}{4}(x+1)^2, & -1 < x < 1 \\\n x, & 1 \leq x\n \end{cases}\n\n This is the twin function of the softplus activation ensuring a zero output\n for inputs less than -1 and a linear output for inputs greater than 1,\n while remaining smooth, convex, monotonic by an adequate definition between\n -1 and 1.\n\n Args:\n x: input (float)\n """\n numpy_util.check_arraylike("sparse_plus", x)\n x = jnp.asarray(x)\n return jnp.where(x <= -1.0, 0.0, jnp.where(x >= 1.0, x, (x + 1.0)**2/4))\n\n@jax.jit\ndef soft_sign(x: ArrayLike) -> Array:\n r"""Soft-sign activation function.\n\n Computes the element-wise function\n\n .. math::\n \mathrm{soft\_sign}(x) = \frac{x}{|x| + 1}\n\n Args:\n x : input array\n """\n numpy_util.check_arraylike("soft_sign", x)\n x_arr = jnp.asarray(x)\n return x_arr / (jnp.abs(x_arr) + 1)\n\n@partial(jax.jit, inline=True)\ndef sigmoid(x: ArrayLike) -> Array:\n r"""Sigmoid activation function.\n\n Computes the element-wise function:\n\n .. math::\n \mathrm{sigmoid}(x) = \frac{1}{1 + e^{-x}}\n\n Args:\n x : input array\n\n Returns:\n An array.\n\n See also:\n :func:`log_sigmoid`\n\n """\n return lax.logistic(x)\n\n@jax.jit\ndef sparse_sigmoid(x: ArrayLike) -> Array:\n r"""Sparse sigmoid activation function.\n\n Computes the function:\n\n .. math::\n\n \mathrm{sparse\_sigmoid}(x) = \begin{cases}\n 0, & x \leq -1\\\n \frac{1}{2}(x+1), & -1 < x < 1 \\\n 1, & 1 \leq x\n \end{cases}\n\n This is the twin function of the ``sigmoid`` activation ensuring a zero output\n for inputs less than -1, a 1 output for inputs greater than 1, and a linear\n output for inputs between -1 and 1. It is the derivative of ``sparse_plus``.\n\n For more information, see `Learning with Fenchel-Young Losses (section 6.2)\n <https://arxiv.org/abs/1901.02324>`_.\n\n Args:\n x : input array\n\n Returns:\n An array.\n\n See also:\n :func:`sigmoid`\n """\n return 0.5 * jnp.clip(x + 1.0, 0.0, 2.0)\n\n@jax.jit\ndef silu(x: ArrayLike) -> Array:\n r"""SiLU (aka swish) activation function.\n\n Computes the element-wise function:\n\n .. math::\n \mathrm{silu}(x) = x \cdot \mathrm{sigmoid}(x) = \frac{x}{1 + e^{-x}}\n\n :func:`swish` and :func:`silu` are both aliases for the same function.\n\n Args:\n x : input array\n\n Returns:\n An array.\n\n See also:\n :func:`sigmoid`\n """\n numpy_util.check_arraylike("silu", x)\n x_arr = jnp.asarray(x)\n return x_arr * sigmoid(x_arr)\n\nswish = silu\n\n@jax.jit\ndef mish(x: ArrayLike) -> Array:\n r"""Mish activation function.\n\n Computes the element-wise function:\n\n .. math::\n \mathrm{mish}(x) = x \cdot \mathrm{tanh}(\mathrm{softplus}(x))\n\n For more information, see\n `Mish: A Self Regularized Non-Monotonic Activation Function\n <https://arxiv.org/abs/1908.08681>`_.\n\n Args:\n x : input array\n\n Returns:\n An array.\n """\n numpy_util.check_arraylike("mish", x)\n x_arr = jnp.asarray(x)\n return x_arr * jnp.tanh(softplus(x_arr))\n\n@jax.jit\ndef log_sigmoid(x: ArrayLike) -> Array:\n r"""Log-sigmoid activation function.\n\n Computes the element-wise function:\n\n .. math::\n \mathrm{log\_sigmoid}(x) = \log(\mathrm{sigmoid}(x)) = -\log(1 + e^{-x})\n\n Args:\n x : input array\n\n Returns:\n An array.\n\n See also:\n :func:`sigmoid`\n """\n numpy_util.check_arraylike("log_sigmoid", x)\n x_arr = jnp.asarray(x)\n return -softplus(-x_arr)\n\n@jax.jit\ndef elu(x: ArrayLike, alpha: ArrayLike = 1.0) -> Array:\n r"""Exponential linear unit activation function.\n\n Computes the element-wise function:\n\n .. math::\n \mathrm{elu}(x) = \begin{cases}\n x, & x > 0\\\n \alpha \left(\exp(x) - 1\right), & x \le 0\n \end{cases}\n\n Args:\n x : input array\n alpha : scalar or array of alpha values (default: 1.0)\n\n Returns:\n An array.\n\n See also:\n :func:`selu`\n """\n numpy_util.check_arraylike("elu", x)\n x_arr = jnp.asarray(x)\n return jnp.where(x_arr > 0,\n x_arr,\n alpha * jnp.expm1(jnp.where(x_arr > 0, 0., x_arr)))\n\n@jax.jit\ndef leaky_relu(x: ArrayLike, negative_slope: ArrayLike = 1e-2) -> Array:\n r"""Leaky rectified linear unit activation function.\n\n Computes the element-wise function:\n\n .. math::\n \mathrm{leaky\_relu}(x) = \begin{cases}\n x, & x \ge 0\\\n \alpha x, & x < 0\n \end{cases}\n\n where :math:`\alpha` = :code:`negative_slope`.\n\n Args:\n x : input array\n negative_slope : array or scalar specifying the negative slope (default: 0.01)\n\n Returns:\n An array.\n\n See also:\n :func:`relu`\n """\n numpy_util.check_arraylike("leaky_relu", x)\n x_arr = jnp.asarray(x)\n return jnp.where(x_arr >= 0, x_arr, negative_slope * x_arr)\n\n@jax.jit\ndef hard_tanh(x: ArrayLike) -> Array:\n r"""Hard :math:`\mathrm{tanh}` activation function.\n\n Computes the element-wise function:\n\n .. math::\n \mathrm{hard\_tanh}(x) = \begin{cases}\n -1, & x < -1\\\n x, & -1 \le x \le 1\\\n 1, & 1 < x\n \end{cases}\n\n Args:\n x : input array\n\n Returns:\n An array.\n """\n numpy_util.check_arraylike("hard_tanh", x)\n x_arr = jnp.asarray(x)\n return jnp.where(x_arr > 1, 1, jnp.where(x_arr < -1, -1, x_arr))\n\n@jax.jit\ndef celu(x: ArrayLike, alpha: ArrayLike = 1.0) -> Array:\n r"""Continuously-differentiable exponential linear unit activation.\n\n Computes the element-wise function:\n\n .. math::\n \mathrm{celu}(x) = \begin{cases}\n x, & x > 0\\\n \alpha \left(\exp(\frac{x}{\alpha}) - 1\right), & x \le 0\n \end{cases}\n\n For more information, see\n `Continuously Differentiable Exponential Linear Units\n <https://arxiv.org/abs/1704.07483>`_.\n\n Args:\n x : input array\n alpha : array or scalar (default: 1.0)\n\n Returns:\n An array.\n """\n return jnp.maximum(x, 0.0) + alpha * jnp.expm1(jnp.minimum(x, 0.0) / alpha)\n\n@jax.jit\ndef selu(x: ArrayLike) -> Array:\n r"""Scaled exponential linear unit activation.\n\n Computes the element-wise function:\n\n .. math::\n \mathrm{selu}(x) = \lambda \begin{cases}\n x, & x > 0\\\n \alpha e^x - \alpha, & x \le 0\n \end{cases}\n\n where :math:`\lambda = 1.0507009873554804934193349852946` and\n :math:`\alpha = 1.6732632423543772848170429916717`.\n\n For more information, see\n `Self-Normalizing Neural Networks\n <https://arxiv.org/abs/1706.02515>`_.\n\n Args:\n x : input array\n\n Returns:\n An array.\n\n See also:\n :func:`elu`\n """\n alpha = 1.6732632423543772848170429916717\n scale = 1.0507009873554804934193349852946\n return scale * elu(x, alpha)\n\n# TODO(phawkins): this jit was found to change numerics in a test. Debug this.\n# @partial(jax.jit, static_argnames=("approximate",))\ndef gelu(x: ArrayLike, approximate: bool = True) -> Array:\n r"""Gaussian error linear unit activation function.\n\n If ``approximate=False``, computes the element-wise function:\n\n .. math::\n \mathrm{gelu}(x) = \frac{x}{2} \left(\mathrm{erfc} \left(\n \frac{-x}{\sqrt{2}} \right) \right)\n\n If ``approximate=True``, uses the approximate formulation of GELU:\n\n .. math::\n \mathrm{gelu}(x) = \frac{x}{2} \left(1 + \mathrm{tanh} \left(\n \sqrt{\frac{2}{\pi}} \left(x + 0.044715 x^3 \right) \right) \right)\n\n For more information, see `Gaussian Error Linear Units (GELUs)\n <https://arxiv.org/abs/1606.08415>`_, section 2.\n\n Args:\n x: input array\n approximate: whether to use the approximate or exact formulation.\n """\n [x_arr] = numpy_util.promote_args_inexact("gelu", x)\n\n if approximate:\n sqrt_2_over_pi = np.sqrt(2 / np.pi).astype(x_arr.dtype)\n cdf = 0.5 * (1.0 + jnp.tanh(sqrt_2_over_pi * (x_arr + 0.044715 * (x_arr ** 3))))\n return x_arr * cdf\n else:\n sqrt_half = np.sqrt(0.5).astype(x_arr.dtype)\n return jnp.array(\n 0.5 * x_arr * (lax.erfc(-x_arr * sqrt_half)), dtype=x_arr.dtype\n )\n\n@partial(jax.jit, static_argnames=("axis",))\ndef glu(x: ArrayLike, axis: int = -1) -> Array:\n r"""Gated linear unit activation function.\n\n Computes the function:\n\n .. math::\n \mathrm{glu}(x) = x\left[\ldots, 0:\frac{n}{2}, \ldots\right] \cdot\n \mathrm{sigmoid} \left( x\left[\ldots, \frac{n}{2}:n, \ldots\right]\n \right)\n\n where the array is split into two along ``axis``. The size of the ``axis``\n dimension must be divisible by two.\n\n Args:\n x : input array\n axis: the axis along which the split should be computed (default: -1)\n\n Returns:\n An array.\n\n See also:\n :func:`sigmoid`\n """\n numpy_util.check_arraylike("glu", x)\n x_arr = jnp.asarray(x)\n size = x_arr.shape[axis]\n assert size % 2 == 0, "axis size must be divisible by 2"\n x1, x2 = jnp.split(x_arr, 2, axis)\n return x1 * sigmoid(x2)\n\n# other functions\n\nlogsumexp = _logsumexp\n\n\n@partial(jax.jit, static_argnames=("axis",))\ndef log_softmax(x: ArrayLike,\n axis: int | tuple[int, ...] | None = -1,\n where: ArrayLike | None = None) -> Array:\n r"""Log-Softmax function.\n\n Computes the logarithm of the :code:`softmax` function, which rescales\n elements to the range :math:`[-\infty, 0)`.\n\n .. math ::\n \mathrm{log\_softmax}(x)_i = \log \left( \frac{\exp(x_i)}{\sum_j \exp(x_j)}\n \right)\n\n Args:\n x : input array\n axis: the axis or axes along which the :code:`log_softmax` should be\n computed. Either an integer or a tuple of integers.\n where: Elements to include in the :code:`log_softmax`.\n\n Returns:\n An array.\n\n Note:\n If any input values are ``+inf``, the result will be all ``NaN``: this reflects the\n fact that ``inf / inf`` is not well-defined in the context of floating-point math.\n\n See also:\n :func:`softmax`\n """\n numpy_util.check_arraylike("log_softmax", x)\n x_arr = jnp.asarray(x)\n x_max = jnp.max(x_arr, axis, where=where, initial=-jnp.inf, keepdims=True)\n x_safe = x_arr if where is None else jnp.where(where, x_arr, -jnp.inf)\n shifted = x_safe - lax.stop_gradient(x_max)\n shifted_logsumexp = jnp.log(\n jnp.sum(jnp.exp(shifted), axis, where=where, keepdims=True))\n result = shifted - shifted_logsumexp\n if where is not None:\n return jnp.where(where, result, -jnp.inf)\n return result\n\n\n# TODO(phawkins): this jit was found to change numerics in a test. Debug this.\n# @partial(jax.jit, static_argnames=("axis",))\ndef softmax(x: ArrayLike,\n axis: int | tuple[int, ...] | None = -1,\n where: ArrayLike | None = None) -> Array:\n r"""Softmax function.\n\n Computes the function which rescales elements to the range :math:`[0, 1]`\n such that the elements along :code:`axis` sum to :math:`1`.\n\n .. math ::\n \mathrm{softmax}(x) = \frac{\exp(x_i)}{\sum_j \exp(x_j)}\n\n Args:\n x : input array\n axis: the axis or axes along which the softmax should be computed. The\n softmax output summed across these dimensions should sum to :math:`1`.\n Either an integer or a tuple of integers.\n where: Elements to include in the :code:`softmax`.\n\n Returns:\n An array.\n\n Note:\n If any input values are ``+inf``, the result will be all ``NaN``: this reflects the\n fact that ``inf / inf`` is not well-defined in the context of floating-point math.\n\n See also:\n :func:`log_softmax`\n """\n if config.softmax_custom_jvp.value:\n # mypy is confused by the `functools.partial` application in the definition\n # of `_softmax` and incorrectly concludes that `_softmax` returns\n # `ReturnValue` -- the unsubstituted type parameter of `custom_jvp`.\n return _softmax(x, axis, where)\n else:\n return _softmax_deprecated(x, axis, where)\n\n# TODO(mattjj): replace softmax with _softmax when deprecation flag is removed\n@partial(jax.custom_jvp, nondiff_argnums=(1,))\ndef _softmax(\n x: ArrayLike,\n axis: int | tuple[int, ...] | None = -1,\n where: ArrayLike | None = None,\n initial: ArrayLike | None = -jnp.inf) -> Array:\n x_max = jnp.max(x, axis, where=where, initial=initial, keepdims=True)\n x_safe = x if where is None else jnp.where(where, x, initial)\n unnormalized = jnp.exp(x_safe - x_max)\n result = unnormalized / jnp.sum(unnormalized, axis, where=where, keepdims=True)\n if where is not None:\n result = jnp.where(where, result, 0)\n return result\n\n@_softmax.defjvp\ndef _softmax_jvp(axis, primals, tangents):\n (x, where, initial), (x_dot, _, _) = primals, tangents\n y = _softmax(x, axis, where, initial)\n return y, y * (x_dot - (y * x_dot).sum(axis, where=where, keepdims=True))\n\ndef _softmax_deprecated(\n x: ArrayLike,\n axis: int | tuple[int, ...] | None = -1,\n where: ArrayLike | None = None,\n initial: ArrayLike | None = -jnp.inf) -> Array:\n x_max = jnp.max(x, axis, where=where, initial=initial, keepdims=True)\n x_safe = x if where is None else jnp.where(where, x, initial)\n unnormalized = jnp.exp(x_safe - lax.stop_gradient(x_max))\n result = unnormalized / jnp.sum(unnormalized, axis, where=where, keepdims=True)\n if where is not None:\n result = jnp.where(where, result, 0)\n return result\n\n\n@partial(jax.jit, static_argnames=("axis",))\ndef standardize(x: ArrayLike,\n axis: int | tuple[int, ...] | None = -1,\n mean: ArrayLike | None = None,\n variance: ArrayLike | None = None,\n epsilon: ArrayLike = 1e-5,\n where: ArrayLike | None = None) -> Array:\n r"""Standardizes input to zero mean and unit variance.\n\n The standardization is given by:\n\n .. math::\n\n x_{std} = \frac{x - \langle x\rangle}{\sqrt{\langle(x - \langle x\rangle)^2\rangle + \epsilon}}\n\n where :math:`\langle x\rangle` indicates the mean of :math:`x`, and :math:`\epsilon` is\n a small correction factor introduced to avoid division by zero.\n\n Args:\n x: input array to be standardized.\n axis: integer or tuple of integers representing the axes along which\n to standardize. Defaults to the last axis (``-1``).\n mean: optionally specify the mean used for standardization. If not specified,\n then ``x.mean(axis, where=where)`` will be used.\n variance: optionally specify the variance used for standardization. If not\n specified, then ``x.var(axis, where=where)`` will be used.\n epsilon: correction factor added to variance to avoid division by zero; defaults\n to ``1E-5``.\n where: optional boolean mask specifying which elements to use when computing\n the mean and variance.\n\n Returns:\n An array of the same shape as ``x`` containing the standardized input.\n """\n numpy_util.check_arraylike("standardize", x)\n numpy_util.check_arraylike_or_none("standardize", mean, variance, where)\n if mean is None:\n mean = jnp.mean(x, axis, keepdims=True, where=where)\n if variance is None:\n # this definition is traditionally seen as less accurate than jnp.var's\n # mean((x - mean(x))**2) but may be faster and even, given typical\n # activation distributions and low-precision arithmetic, more accurate\n # when used in neural network normalization layers\n variance = jnp.mean(\n jnp.square(x), axis, keepdims=True, where=where) - jnp.square(mean)\n return jnp.subtract(x, jnp.asarray(mean)) * lax.rsqrt(jnp.asarray(variance) + epsilon)\n\n# TODO(slebedev): Change the type of `x` to `ArrayLike`.\n@partial(jax.jit, static_argnames=("num_classes", "dtype", "axis"))\ndef _one_hot(x: Array, num_classes: int, *,\n dtype: Any, axis: int | AxisName) -> Array:\n num_classes = core.concrete_dim_or_error(\n num_classes,\n "The error arose in jax.nn.one_hot argument `num_classes`.")\n dtype = dtypes.canonicalize_dtype(dtype)\n try:\n output_pos_axis = util.canonicalize_axis(axis, x.ndim + 1) # type: ignore[arg-type]\n except TypeError:\n axis_size = lax.axis_size(axis)\n if num_classes != axis_size:\n raise ValueError(f"Expected num_classes to match the size of axis {axis}, "\n f"but {num_classes} != {axis_size}") from None\n axis_idx = lax.axis_index(axis)\n return jnp.asarray(_dot_product_attention_xla == axis_idx, dtype=dtype)\n axis = operator.index(axis) # type: ignore[arg-type]\n lhs = lax.expand_dims(x, (axis,))\n rhs_shape = [1] * x.ndim\n rhs_shape.insert(output_pos_axis, num_classes)\n # TODO(yashkatariya): Maybe expose `out_sharding` on `one_hot` too?\n rhs_sharding = NamedSharding(x.aval.sharding.mesh, P(*[None] * len(rhs_shape))) # pytype: disable=attribute-error\n rhs = lax.broadcasted_iota(x.dtype, rhs_shape, output_pos_axis,\n out_sharding=rhs_sharding)\n return (lhs == rhs).astype(dtype)\n\n# TODO(slebedev): Change the type of `x` to `ArrayLike`.\ndef one_hot(x: Any, num_classes: int, *,\n dtype: Any = jnp.float_, axis: int | AxisName = -1) -> Array:\n """One-hot encodes the given indices.\n\n Each index in the input ``x`` is encoded as a vector of zeros of length\n ``num_classes`` with the element at ``index`` set to one::\n\n >>> jax.nn.one_hot(jnp.array([0, 1, 2]), 3)\n Array([[1., 0., 0.],\n [0., 1., 0.],\n [0., 0., 1.]], dtype=float32)\n\n Indices outside the range [0, num_classes) will be encoded as zeros::\n\n >>> jax.nn.one_hot(jnp.array([-1, 3]), 3)\n Array([[0., 0., 0.],\n [0., 0., 0.]], dtype=float32)\n\n Args:\n x: A tensor of indices.\n num_classes: Number of classes in the one-hot dimension.\n dtype: optional, a float dtype for the returned values (default :obj:`jnp.float_`).\n axis: the axis or axes along which the function should be\n computed.\n """\n num_classes = core.concrete_dim_or_error(\n num_classes,\n "The error arose in jax.nn.one_hot argument `num_classes`.")\n x_arr = jnp.asarray(x)\n if not jnp.isdtype(x_arr.dtype, "integral"):\n # Deprecated 2024-12-18\n deprecations.warn(\n 'jax-nn-one-hot-float-input',\n f"jax.nn.one_hot input should be integer-typed; got dtype={x_arr.dtype}",\n stacklevel=1)\n return _one_hot(x_arr, num_classes, dtype=dtype, axis=axis)\n\n\n@jax.custom_jvp\n@jax.jit\ndef relu6(x: ArrayLike) -> Array:\n r"""Rectified Linear Unit 6 activation function.\n\n Computes the element-wise function\n\n .. math::\n \mathrm{relu6}(x) = \min(\max(x, 0), 6)\n\n except under differentiation, we take:\n\n .. math::\n \nabla \mathrm{relu}(0) = 0\n\n and\n\n .. math::\n \nabla \mathrm{relu}(6) = 0\n\n Args:\n x : input array\n\n Returns:\n An array.\n\n See also:\n :func:`relu`\n """\n return jnp.minimum(jnp.maximum(x, 0), 6.)\nrelu6.defjvps(lambda g, ans, x:\n lax.select((x > 0) & (x < 6), g, lax.full_like(g, 0)))\n\n@jax.jit\ndef hard_sigmoid(x: ArrayLike) -> Array:\n r"""Hard Sigmoid activation function.\n\n Computes the element-wise function\n\n .. math::\n \mathrm{hard\_sigmoid}(x) = \frac{\mathrm{relu6}(x + 3)}{6}\n\n Args:\n x : input array\n\n Returns:\n An array.\n\n See also:\n :func:`relu6`\n """\n return relu6(x + 3.) / 6.\n\n@jax.jit\ndef hard_silu(x: ArrayLike) -> Array:\n r"""Hard SiLU (swish) activation function\n\n Computes the element-wise function\n\n .. math::\n \mathrm{hard\_silu}(x) = x \cdot \mathrm{hard\_sigmoid}(x)\n\n Both :func:`hard_silu` and :func:`hard_swish` are aliases for the same\n function.\n\n Args:\n x : input array\n\n Returns:\n An array.\n\n See also:\n :func:`hard_sigmoid`\n """\n numpy_util.check_arraylike("hard_silu", x)\n x_arr = jnp.asarray(x)\n return x_arr * hard_sigmoid(x_arr)\n\nhard_swish = hard_silu\n\ndef _get_large_negative(dtype):\n dtype_max = jnp.finfo(dtype).max\n return jnp.asarray(-0.7 * dtype_max, dtype=dtype)\n\ndef _get_causal_mask(T, S):\n mask = jnp.tril(jnp.ones((T, S), dtype=jnp.bool_))\n return mask[None, None, :, :]\n\ndef _get_window_mask(T: int, S: int, local_window_size: tuple[int, int]):\n query_pos = jnp.array(range(T))\n key_pos = jnp.array(range(S))\n left_window, right_window = local_window_size\n left_mask = query_pos[..., None] <= key_pos[..., None, :] + left_window\n right_mask = query_pos[..., None] >= key_pos[..., None, :] - right_window\n return jnp.logical_and(right_mask, left_mask)[None, None, :, :]\n\ndef _get_padding_mask_logits(T, S, q_seqlen, kv_seqlen):\n q_mask = True\n kv_mask = True\n if q_seqlen is not None:\n q_indices = jnp.arange(0, T)[None, :, None]\n q_mask = q_indices < q_seqlen[:, None, None]\n if kv_seqlen is not None:\n kv_indices = jnp.arange(0, S)[None, None, :]\n kv_mask = kv_indices < kv_seqlen[:, None, None]\n mask = jnp.logical_and(q_mask, kv_mask)\n return mask[:, None, :, :]\n\ndef _get_padding_mask_encoded(T, q_seqlen):\n q_indices = jnp.arange(0, T)[None, :]\n mask = q_indices < q_seqlen[:, None]\n return mask[:, :, None, None]\n\ndef _apply_masks(logits, mask, is_causal, q_seqlen, kv_seqlen,\n local_window_size):\n if mask is None and not is_causal and q_seqlen is None and kv_seqlen is None:\n return logits\n\n combined_mask = jnp.ones_like(logits, dtype=jnp.bool_)\n if mask is not None:\n assert mask.dtype == jnp.bool_\n combined_mask = jnp.logical_and(combined_mask, mask)\n\n T, S = logits.shape[2], logits.shape[3]\n\n if is_causal:\n mask = _get_causal_mask(T, S)\n combined_mask = jnp.logical_and(combined_mask, mask)\n\n if local_window_size is not None:\n mask = _get_window_mask(T, S, local_window_size)\n combined_mask = jnp.logical_and(combined_mask, mask)\n\n if q_seqlen is not None or kv_seqlen is not None:\n mask = _get_padding_mask_logits(T, S, q_seqlen, kv_seqlen)\n combined_mask = jnp.logical_and(combined_mask, mask)\n\n large_negative_number = _get_large_negative(logits.dtype)\n padded_logits = jnp.where(combined_mask, logits, large_negative_number)\n return padded_logits\n\ndef _dot_product_attention_core(query, key, value, bias, mask, is_causal,\n scale, q_seqlen, kv_seqlen, local_window_size):\n logits_dtype = jnp.promote_types(query.dtype, jnp.float32)\n\n # If the query and logits dtypes are different, then the default precision\n # can use inconsistent types in the backwards pass\n # (see https://github.com/jax-ml/jax/issues/24047).\n if query.dtype == jnp.bfloat16:\n precision = jax.lax.DotAlgorithmPreset.BF16_BF16_F32\n elif query.dtype == jnp.float16:\n precision = jax.lax.DotAlgorithmPreset.F16_F16_F32\n # TODO(sbodenstein): Implement this fix for all dtypes.\n else:\n precision = None\n\n # Explicit precision will fail on platforms that don't support it. For example,\n # some GPUs do not support BF16_BF16_F32, and TPU does not support F16_F16_F32.\n # Use the default precision as a fallback in these cases.\n try:\n logits = jnp.einsum(\n "BTNH,BSNH->BNTS",\n query,\n key,\n precision=precision,\n preferred_element_type=logits_dtype,\n )\n except: # pylint: disable=bare-except\n logits = jnp.einsum(\n "BTNH,BSNH->BNTS",\n query,\n key,\n precision=None,\n preferred_element_type=logits_dtype,\n )\n\n logits *= jnp.array(scale, dtype=logits.dtype)\n\n if bias is not None:\n logits = (logits + bias).astype(logits.dtype)\n\n padded_logits = _apply_masks(logits, mask, is_causal, q_seqlen, kv_seqlen,\n local_window_size)\n\n # Softmax and it is always carried out in fp32.\n padded_logits = padded_logits.astype(jnp.float32)\n probs = jax.nn.softmax(padded_logits, axis=-1).astype(key.dtype)\n\n encoded = jnp.einsum('BNTS,BSNH->BTNH', probs, value)\n if q_seqlen is not None and kv_seqlen is not None:\n mask = _get_padding_mask_encoded(encoded.shape[1], q_seqlen)\n encoded *= mask.astype(encoded.dtype)\n return encoded\n\ndef _dot_product_attention_xla(\n query: Array,\n key: Array,\n value: Array,\n bias: Array | None,\n mask: Array | None,\n is_causal: bool,\n scale: float,\n q_seqlen: Array | None,\n kv_seqlen: Array | None,\n local_window_size: tuple[int, int] | None):\n\n B, T, N, H = query.shape\n _, S, K, _ = key.shape\n G = N // K\n\n query = jnp.reshape(query, (B, T, K, G, H))\n def _reshape_to_grouped(t):\n if t is not None:\n tB, tN, tT, tS = t.shape\n if tN == 1:\n t = jnp.broadcast_to(t[:, :, None, :, :], (tB, tN, G, tT, tS))\n else:\n assert tN == N\n t = jnp.reshape(t, (tB, K, G, tT, tS))\n return t\n bias = _reshape_to_grouped(bias)\n mask = _reshape_to_grouped(mask)\n vmapped_fn = jax.vmap(\n _dot_product_attention_core,\n in_axes=(3, None, None, 2, 2, None, None, None, None, None),\n out_axes=3,\n )\n encoded = vmapped_fn(query, key, value, bias, mask, is_causal, scale,\n q_seqlen, kv_seqlen, local_window_size)\n encoded = jnp.reshape(encoded, (B, T, N, H))\n return encoded\n\ndef bias_fwd_rule(a, query_head_num):\n return bias_fwd_p.bind(a, query_head_num), a\ndef bias_bwd_rule(query_head_num, res, g):\n a = res\n if a.shape[0] > 1 or a.shape[-3] != query_head_num:\n raise ValueError("cuDNN only supports bias gradient when the batch size is "\n f"1 and the head number matches the query, but got "\n f"B={a.shape[0]}, N={a.shape[-3]}.")\n return (bias_bwd_p.bind(g, a, query_head_num),)\n\n# This function uses two custom primitives, `bias_fwd` and `bias_bwd`, to work\n# around a cuDNN issue where bias gradients are only supported when the batch\n# size is 1 and the number of heads matches the query.\n# TODO(kaixih@nvidia): Remove this workaround once cuDNN resolves the issue.\n@partial(jax.custom_vjp, nondiff_argnums=(1,))\ndef check_valid_bias_batch(x, query_head_num):\n output, _ = bias_fwd_rule(x, query_head_num)\n return output\ncheck_valid_bias_batch.defvjp(bias_fwd_rule, bias_bwd_rule)\n\nbias_fwd_p = core.Primitive('bias_fwd')\nbias_fwd_p.multiple_results = False\nbias_bwd_p = core.Primitive('bias_bwd')\nbias_bwd_p.multiple_results = False\n\ndef bias_fwd_impl(a, query_head_num):\n return a\ndef bias_bwd_impl(g, a, query_head_num):\n return g\nbias_fwd_p.def_impl(bias_fwd_impl)\nbias_bwd_p.def_impl(bias_bwd_impl)\n\ndef bias_fwd_abstract_eval(a, query_head_num):\n return core.ShapedArray(a.shape, a.dtype)\ndef bias_bwd_abstract_eval(g, a, query_head_num):\n return core.ShapedArray(g.shape, g.dtype)\nbias_fwd_p.def_abstract_eval(bias_fwd_abstract_eval)\nbias_bwd_p.def_abstract_eval(bias_bwd_abstract_eval)\n\ndef bias_fwd_lowering(ctx, a, query_head_num):\n return [a]\ndef bias_bwd_lowering(ctx, g, a, query_head_num):\n return [g]\nmlir.register_lowering(bias_fwd_p, bias_fwd_lowering)\nmlir.register_lowering(bias_bwd_p, bias_bwd_lowering)\n\ndef bias_fwd_batch_rule(batched_args, batch_dims):\n x, query_head_num = batched_args\n a = batch_dims[0]\n output, _ = bias_fwd_rule(x, query_head_num)\n return output, a\ndef bias_bwd_batch_rule(batched_args, batch_dims):\n g, x, query_head_num = batched_args\n b = batch_dims[0]\n *Bs, _, _, _ = x.shape\n B = math.prod(Bs)\n x = jnp.reshape(x, (B,) + x.shape[-3:])\n output, = bias_bwd_rule(query_head_num, x, g)\n return output, b\nbatching.primitive_batchers[bias_fwd_p] = bias_fwd_batch_rule\nbatching.primitive_batchers[bias_bwd_p] = bias_bwd_batch_rule\n\ndef dot_product_attention(\n query: ArrayLike,\n key: ArrayLike,\n value: ArrayLike,\n bias: ArrayLike | None = None,\n mask: ArrayLike | None = None,\n *,\n scale: float | None = None,\n is_causal: bool = False,\n query_seq_lengths: ArrayLike | None = None,\n key_value_seq_lengths: ArrayLike | None = None,\n local_window_size: int | tuple[int, int] | None = None,\n implementation: Literal['xla', 'cudnn'] | None = None) -> Array:\n r"""Scaled dot product attention function.\n\n Computes the attention function on Query, Key, and Value tensors:\n\n .. math::\n\n \mathrm{Attention}(Q, K, V)=\mathrm{softmax}(\frac{QK^T}{\sqrt{d_k}})V\n\n If we define :code:`logits` as the output of :math:`QK^T` and the\n :code:`probs` as the output of :math:`softmax`.\n\n Throughout this function, we utilize the following uppercase letters to\n represent the shape of array::\n\n B = batch size\n S = length of the key/value (source)\n T = length of the query (target)\n N = number of attention heads\n H = dimensions of each attention head\n K = number of key/value heads\n G = number of groups, which equals to N // K\n\n Args:\n query: query array; shape :code:`(BTNH|TNH)`\n key: key array: shape :code:`(BSKH|SKH)`. When `K` equals `N`, multi-headed\n attention (MHA https://arxiv.org/abs/1706.03762) is performed. Otherwise,\n grouped query attention (GQA https://arxiv.org/abs/2305.13245) is\n performed if `N` is a multiple of `K`, and multi-query attention (MQA\n https://arxiv.org/abs/1911.02150) is performed if `K == 1` (a special case\n of GQA).\n value: value array, should have the same shape as the `key` array.\n bias: optional, bias array to be added to logits; The shape must be 4D and\n be broadcastable to :code:`(BNTS|NTS)`.\n mask: optional, mask array used to filter out logits. It is a boolean mask\n where `True` indicates the element should take part in attention. For an\n additive mask, users should pass it to `bias`. The shape must be 4D and be\n broadcastable to :code:`(BNTS|NTS)`.\n scale: scale for the logits. If None, the scale will be set to 1 divided by\n the square root of query's head dimension (i.e. H).\n is_causal: If true, causal attention will be applied. Note, some\n implementations like `xla` will generate a mask tensor and apply it to the\n logits to mask out the non-causal parts of the attention matrix, but other\n implementations like `cudnn` will avoid computing the non-causal regions,\n providing speedups.\n query_seq_lengths: `int32` array of sequence lengths for query; shape\n :code:`(B)`\n key_value_seq_lengths: `int32` array of sequence lengths for key and value;\n shape :code:`(B)`\n local_window_size: Window sizes to make self attention to attend to each\n token's local window. If set, this specifies the (left_window_size,\n right_window_size) for each token. E.g., if local_window_size == (3, 2)\n and the sequence is [0, 1, 2, 3, 4, 5, c, 7, 8, 9], token `c` can attend\n to [3, 4, 5, c, 7, 8]. If a single int is given, it will be interpreted as\n a symmetric window (window_size, window_size).\n implementation: A string to control which implementation backend to use.\n Supported strings are `xla`, `cudnn` (cuDNN flash attention). It defaults\n to `None`, which will automatically select the best available backend.\n Note, `cudnn` supports only a subset of shapes/dtypes, and an exception\n will be thrown if its not supported.\n\n Returns:\n An array of the attention output with the same shape as :code:`query`.\n """\n output_shape = jnp.asarray(query).shape\n def _ensure_4d(t):\n t = jnp.asarray(t)\n dims_to_add = 4 - t.ndim\n if dims_to_add > 0:\n return jnp.expand_dims(t, axis=tuple(range(dims_to_add)))\n return t\n\n query_arr = _ensure_4d(query)\n key_arr = _ensure_4d(key)\n value_arr = _ensure_4d(value)\n bias = _ensure_4d(bias) if bias is not None else None\n mask = _ensure_4d(mask) if mask is not None else None\n if query_seq_lengths is not None:\n query_seq_lengths = jnp.asarray(query_seq_lengths)\n if key_value_seq_lengths is not None:\n key_value_seq_lengths = jnp.asarray(key_value_seq_lengths)\n if isinstance(local_window_size, int):\n local_window_size = (local_window_size, local_window_size)\n\n def _check_shape_and_dtype(t: Array | None, shape: Sequence[int],\n dtype: DType | None, name: str) -> None:\n if t is None:\n return\n if t.ndim != len(shape):\n raise ValueError(f"{name} ndim should be {len(shape)}, but got {t.ndim}")\n if dtype is not None and t.dtype != dtype:\n raise ValueError(f"{name} dtype should be {dtype}, but got {t.dtype}")\n for i in range(t.ndim):\n if shape[i] != -1 and t.shape[i] != shape[i]:\n raise ValueError(f"{name} shape should be {shape}: but got {t.shape}")\n\n B, S, K, H = key_arr.shape\n _check_shape_and_dtype(value_arr, [B, S, K, H], key_arr.dtype, 'value')\n _check_shape_and_dtype(query_arr, [B, -1, -1, H], key_arr.dtype, 'query')\n _check_shape_and_dtype(mask, [-1] * 4, jnp.bool_, 'mask')\n _check_shape_and_dtype(bias, [-1] * 4, None, 'bias')\n _check_shape_and_dtype(query_seq_lengths, [B], jnp.int32,\n 'query_seq_lengths')\n _check_shape_and_dtype(key_value_seq_lengths, [B], jnp.int32,\n 'key_value_seq_lengths')\n if query_arr.shape[-2] % K != 0:\n raise ValueError(f"The number of query heads must be a multiple of "\n f"key/value heads, but got {query_arr.shape[-2]} vs {K}")\n\n scale_val = (1.0 / np.sqrt(H)) if scale is None else scale\n\n match implementation:\n case 'xla':\n out = _dot_product_attention_xla(\n query_arr, key_arr, value_arr, bias, mask, is_causal=is_causal,\n scale=scale_val, q_seqlen=query_seq_lengths,\n kv_seqlen=key_value_seq_lengths,\n local_window_size=local_window_size,\n )\n case 'cudnn':\n if bias is not None:\n bias = check_valid_bias_batch(bias, query_arr.shape[-2])\n bias = jnp.asarray(bias)\n use_padding = (\n query_seq_lengths is not None or key_value_seq_lengths is not None\n )\n if use_padding:\n if query_seq_lengths is None:\n T = query_arr.shape[1]\n query_seq_lengths = jnp.full((B,), T, dtype=jnp.int32)\n if key_value_seq_lengths is None:\n key_value_seq_lengths = jnp.full((B,), S, dtype=jnp.int32)\n\n mask_type = MaskType.NO_MASK\n if use_padding and is_causal:\n mask_type = MaskType.PADDING_CAUSAL\n elif is_causal:\n mask_type = MaskType.CAUSAL\n elif use_padding:\n mask_type = MaskType.PADDING\n # CuDNN supports only the left window with an exclusive boundary when\n # causal mask is enabled.\n sliding_window = None\n if local_window_size is not None:\n l_window, r_window = local_window_size\n if r_window == 0 or mask_type == MaskType.CAUSAL:\n sliding_window = l_window + 1\n else:\n raise ValueError(f"cuDNN doesn't support right window: {r_window} "\n "when causal mask is not used.")\n\n out = cudnn_dot_product_attention(\n query_arr, key_arr, value_arr, bias, mask, query_seq_lengths,\n key_value_seq_lengths, scale=scale_val, mask_type=mask_type,\n sliding_window_length=sliding_window,\n )\n case None:\n # TODO(kaixih@nvidia) Defaults to XLA for now. Will automatically select\n # best backend.\n out = _dot_product_attention_xla(\n query_arr, key_arr, value_arr, bias, mask, is_causal=is_causal,\n scale=scale_val, q_seqlen=query_seq_lengths,\n kv_seqlen=key_value_seq_lengths,\n local_window_size=local_window_size,\n )\n case _:\n raise ValueError(f"Unsupported implementation option: {implementation}")\n\n return jnp.reshape(out, output_shape)\n\ndef scaled_matmul(\n lhs: Array,\n rhs: Array,\n lhs_scales: Array,\n rhs_scales: Array,\n preferred_element_type: DTypeLike = jnp.float32,\n) -> Array:\n r"""Scaled matrix multiplication function.\n\n Performs block-scaled matmul of `a` and `b` using `a_scales` and `b_scales`.\n The last dim is the contracting dim, and block size is inferred.\n\n Mathematically, this operation is equivalent to::\n\n a_block_size = a.shape[-1] // a_scales.shape[-1]\n b_block_size = b.shape[-1] // b_scales.shape[-1]\n a_scaled = a * jnp.repeat(a_scales, a_block_size, axis=-1)\n b_scaled = b * jnp.repeat(b_scales, b_block_size, axis=-1)\n jnp.einsum('BMK,BNK->BMN', a_scaled, b_scaled)\n\n Args:\n lhs (Array): Operand a, shape (B, M, K).\n rhs (Array): Operand b, shape (B, N, K).\n lhs_scales (Array): Shape (B, M, K_a), where `K % K_a == 0`.\n rhs_scales (Array): Shape (B, N, K_b), where `K % K_b == 0`.\n preferred_element_type (DTypeLike, optional): Defaults to `jnp.float32`.\n\n Returns:\n Array of shape (B, M, N).\n\n Notes:\n - We currently do not support user-defined `precision` for customizing the\n compute data type. It is fixed to `jnp.float32`.\n - Block size is inferred as `K // K_a` for `a` and `K // K_b` for `b`.\n - To use cuDNN with Nvidia Blackwell GPUs, inputs must match::\n\n # mxfp8\n a, b: jnp.float8_e4m3fn | jnp.float8_e5m2\n a_scales, b_scales: jnp.float8_e8m0fnu\n block_size: 32\n # nvfp4\n a, b: jnp.float4_e2m1fn\n a_scales, b_scales: jnp.float8_e4m3fn\n block_size: 16\n\n Examples:\n\n Basic case:\n\n >>> a = jnp.array([1, 2, 3]).reshape((1, 1, 3))\n >>> b = jnp.array([4, 5, 6]).reshape((1, 1, 3))\n >>> a_scales = jnp.array([0.5]).reshape((1, 1, 1))\n >>> b_scales = jnp.array([0.5]).reshape((1, 1, 1))\n >>> scaled_matmul(a, b, a_scales, b_scales) # doctest: +SKIP\n Array([[[8.]]], dtype=float32)\n\n Using fused cuDNN call on Blackwell GPUs:\n\n >>> dtype = jnp.float8_e4m3fn\n >>> a = jax.random.normal(jax.random.PRNGKey(1), (3, 128, 64), dtype=dtype)\n >>> b = jax.random.normal(jax.random.PRNGKey(2), (3, 128, 64), dtype=dtype)\n >>> a_scales = jnp.ones((3, 128, 4), dtype=jnp.float8_e8m0fnu)\n >>> b_scales = jnp.ones((3, 128, 4), dtype=jnp.float8_e8m0fnu)\n >>> scaled_matmul(a, b, a_scales, b_scales) # doctest: +SKIP\n """\n a, b, a_scales, b_scales = lhs, rhs, lhs_scales, rhs_scales\n if not all(x.ndim == 3 for x in (a, b, a_scales, b_scales)):\n raise ValueError(\n "scaled_matmul requires all inputs to be 3-dimensional arrays"\n )\n\n B_a, M_a, K_a = a.shape\n B_b, N_b, K_b = b.shape\n if K_a != K_b or B_a != B_b:\n raise ValueError(\n "scaled_matmul requires inputs a and b to have matching batch (B) "\n f"and contract (K) dimensions, but got shapes {a.shape} and "\n f"{b.shape}"\n )\n\n B_as, M_as, K_as = a_scales.shape\n B_bs, N_bs, K_bs = b_scales.shape\n if K_as != K_bs or B_as != B_bs:\n raise ValueError(\n "scaled_matmul requires scales to have matching batch (B) and "\n f"contract (K) dimensions, but got shapes {a_scales.shape} and "\n f"{b_scales.shape}"\n )\n\n if M_as != M_a or N_bs != N_b:\n raise ValueError(\n "scaled_matmul requires scales to match non-contract dimensions of "\n f"inputs, but got shapes a: {a.shape}, b: {b.shape}, a_scales: "\n f"{a_scales.shape}, b_scales: {b_scales.shape}"\n )\n\n preferred_element_type = dtypes.canonicalize_dtype(\n np.dtype(preferred_element_type)\n )\n out = cudnn_scaled_matmul(\n a,\n b,\n a_scales,\n b_scales,\n preferred_element_type=preferred_element_type,\n )\n return out\n\ndef get_scaled_dot_general_config(mode: Literal['nvfp4', 'mxfp8'],\n global_scale: Array | None = None):\n r"""Get quantization configs for scaled_dot_general.\n\n Create quantization configs for the `jax.nn.scaled_dot_general`.\n\n See Also:\n - :func:`jax.nn.scaled_dot_general`: Scaled dot general function.\n """\n\n if mode == 'nvfp4':\n one = jnp.ones((1,), dtype=jnp.float32)\n return BlockScaleConfig(\n mode='nvfp4',\n block_size=16,\n data_type=jnp.float4_e2m1fn,\n scale_type=jnp.float8_e4m3fn,\n global_scale=one if global_scale is None else global_scale,\n infer_only=False\n )\n elif mode == 'mxfp8':\n return BlockScaleConfig(\n mode='mxfp8',\n block_size=32,\n data_type=jnp.float8_e4m3fn,\n scale_type=jnp.float8_e8m0fnu,\n global_scale=None,\n infer_only=False\n )\n else:\n raise ValueError(f"Unsupported mode: {mode}")\n\ndef scaled_dot_general(\n lhs, rhs,\n dimension_numbers,\n preferred_element_type=jnp.float32,\n configs: List[BlockScaleConfig] | None = None,\n implementation: Literal['cudnn'] | None = None,\n ):\n r"""Scaled dot general operation.\n\n Performs a generalized dot product with block-scaled quantization on the\n lhs and rhs inputs. This operation extends `lax.dot_general` to support\n user-defined scaling configurations.\n\n Essentially, the operation follows::\n\n a, a_scales = quantize(lhs, configs[0])\n b, b_scales = quantize(rhs, configs[1])\n c = jax.nn.scaled_matmul(a, b, a_scales, b_scales)\n\n Args:\n lhs (ArrayLike): Input array.\n rhs (ArrayLike): Input array.\n dimension_numbers (DotDimensionNumbers): A tuple of two tuples specifying\n the contraction and batch dimensions:\n `((lhs_contracting_dims, rhs_contracting_dims), (lhs_batch_dims, rhs_batch_dims))`.\n preferred_element_type (DTypeLike, optional): Output data type of the dot\n product. Defaults to `jnp.float32`. Other valid types include\n `jnp.bfloat16` and `jnp.float16`.\n configs (list of BlockScaleConfig, optional): Scaling configurations for\n lhs, rhs, and gradients. Users can obtain valid configurations via\n `jax.nn.get_scaled_dot_general_config`. Currently, `nvfp4` and `mxfp8`\n are supported. If `None`, falls back to `lax.dot_general`.\n implementation: str\n (Deprecated) Backend selector, now ignored. The system chooses the backend\n automatically. Scheduled for removal in future releases.\n\n Returns:\n Array: The resulting tensor, with batch dimensions first, followed by\n non-contracting/non-batch dimensions of lhs, and then those of rhs.\n\n See Also:\n - :func:`jax.nn.scaled_matmul`: Scaled matmul function.\n - :func:`jax.lax.dot_general`: General dot product operator.\n\n Notes:\n - Unlike `nn.scaled_matmul`, which assumes quantized low-precision\n inputs with explicit scaling factors, this operator takes high-precision\n inputs, applies quantization internally, and handles the backward pass.\n\n Examples:\n\n Creating config for mxfp8:\n\n >>> configs = [jax.nn.get_scaled_dot_general_config('mxfp8')] * 3\n\n Creating config for nvfp4:\n\n >>> global_scale = jnp.array([0.5], jnp.float32)\n >>> configs = [jax.nn.get_scaled_dot_general_config('nvfp4', global_scale)] * 3\n\n Using scaled_dot_general with the configs:\n\n >>> import functools\n >>> scaled_dot_general_fn = functools.partial(jax.nn.scaled_dot_general, configs=configs)\n >>> lhs = jax.random.normal(jax.random.PRNGKey(1), (3, 128, 64))\n >>> rhs = jax.random.normal(jax.random.PRNGKey(2), (3, 128, 64))\n >>> out = scaled_dot_general_fn(lhs, rhs, (((2,), (2,)), ((0,), (0,)))) # doctest: +SKIP\n """\n if implementation is not None:\n warnings.warn("Backend selector, now ignored. The system chooses the "\n "backend automatically.", DeprecationWarning)\n\n if configs is None:\n return lax.dot_general(lhs, rhs, dimension_numbers,\n preferred_element_type=preferred_element_type)\n\n out = cudnn_scaled_dot_general(\n lhs, rhs, dimension_numbers,\n preferred_element_type=preferred_element_type,\n configs=configs\n )\n\n return out\n
python
tab
171
1,124,230
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
34,697
0
null
python
selection_mouse
172
1,124,377
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
34,681
17
query_seq_lengths
python
selection_mouse
173
1,124,929
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
34,643
0
null
python
selection_mouse
174
1,125,084
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
34,640
4
mask
python
selection_mouse
175
1,127,473
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
34,691
0
null
python
selection_mouse
176
1,128,425
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
34,700
0
null
python
selection_mouse
177
1,129,089
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
34,642
0
null
python
selection_mouse
178
1,129,239
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
34,640
4
mask
python
selection_mouse
179
1,135,834
.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py
32,840
0
null
python
selection_mouse