Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +3 -0
- llava_next/lib/python3.10/site-packages/torch/__pycache__/_tensor_docs.cpython-310.pyc +3 -0
- llava_next/lib/python3.10/site-packages/torch/__pycache__/_torch_docs.cpython-310.pyc +3 -0
- llava_next/lib/python3.10/site-packages/torch/_prims_common/__init__.py +1844 -0
- llava_next/lib/python3.10/site-packages/torch/amp/__pycache__/__init__.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/amp/autocast_mode.py +435 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/merge_matmul.py +171 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/normalize.py +162 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/__pycache__/__init__.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/__pycache__/annotate_getitem_nodes.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/__pycache__/fake_tensor_prop.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/__pycache__/operator_support.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/__pycache__/reinplace.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/__pycache__/split_module.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/__pycache__/split_utils.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/__pycache__/tools_common.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/sparse/__init__.py +596 -0
- llava_next/lib/python3.10/site-packages/torch/sparse/__pycache__/_semi_structured_conversions.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/__pycache__/allocator.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/__pycache__/config_v2.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/__init__.py +6 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ds_kernel.py +32 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/includes/activation_type.h +17 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/includes/conversion_utils.h +640 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/includes/ds_kernel_utils.h +58 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/includes/memory_access_utils.h +1115 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/includes/reduction_utils.h +778 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/__init__.py +13 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/atom_builder/__init__.py +6 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/atom_builder/__pycache__/__init__.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/atom_builder/__pycache__/atom_builder.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/atom_builder/atom_builder.cpp +53 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/atom_builder/atom_builder.h +21 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/atom_builder/atom_builder.py +50 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/__init__.py +6 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/__pycache__/__init__.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/__pycache__/logits_gather.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/logits_gather.cuh +22 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/logits_gather.h +20 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/logits_gather.py +52 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/logits_gather_cuda.cu +86 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_gather/__init__.py +6 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_gather/moe_gather.cpp +59 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_gather/moe_gather.h +20 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_gather/moe_gather_cuda.cu +169 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_scatter/__init__.py +6 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_scatter/__pycache__/__init__.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_scatter/__pycache__/moe_scatter.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_scatter/moe_scatter.cpp +67 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_scatter/moe_scatter.cuh +23 -0
.gitattributes
CHANGED
|
@@ -1197,3 +1197,6 @@ llava_next/lib/python3.10/site-packages/torch/lib/libtorch.so filter=lfs diff=lf
|
|
| 1197 |
vlmpy310/lib/python3.10/site-packages/skimage/segmentation/_watershed_cy.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 1198 |
vlmpy310/lib/python3.10/site-packages/skimage/segmentation/_felzenszwalb_cy.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 1199 |
vlmpy310/lib/python3.10/site-packages/skimage/segmentation/_quickshift_cy.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
| 1197 |
vlmpy310/lib/python3.10/site-packages/skimage/segmentation/_watershed_cy.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 1198 |
vlmpy310/lib/python3.10/site-packages/skimage/segmentation/_felzenszwalb_cy.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 1199 |
vlmpy310/lib/python3.10/site-packages/skimage/segmentation/_quickshift_cy.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 1200 |
+
vlmpy310/lib/python3.10/site-packages/skimage/segmentation/_slic.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 1201 |
+
llava_next/lib/python3.10/site-packages/torch/__pycache__/_torch_docs.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1202 |
+
llava_next/lib/python3.10/site-packages/torch/__pycache__/_tensor_docs.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
llava_next/lib/python3.10/site-packages/torch/__pycache__/_tensor_docs.cpython-310.pyc
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a88bd6b47dd78d2cc24564f4753bf056eb237eb387952d7313935ece5fa25c0e
|
| 3 |
+
size 128905
|
llava_next/lib/python3.10/site-packages/torch/__pycache__/_torch_docs.cpython-310.pyc
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:91e0dfb601d47b09e5c2fa500972e50c528df8cd610d1a9e81dd0f7417d553dd
|
| 3 |
+
size 401263
|
llava_next/lib/python3.10/site-packages/torch/_prims_common/__init__.py
ADDED
|
@@ -0,0 +1,1844 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import operator
|
| 4 |
+
import warnings
|
| 5 |
+
import weakref
|
| 6 |
+
|
| 7 |
+
from contextlib import nullcontext
|
| 8 |
+
from enum import Enum
|
| 9 |
+
from functools import cmp_to_key, reduce
|
| 10 |
+
from typing import (
|
| 11 |
+
Any,
|
| 12 |
+
Callable,
|
| 13 |
+
cast,
|
| 14 |
+
List,
|
| 15 |
+
Optional,
|
| 16 |
+
overload,
|
| 17 |
+
Sequence,
|
| 18 |
+
Tuple,
|
| 19 |
+
Type,
|
| 20 |
+
Union,
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
import sympy
|
| 24 |
+
|
| 25 |
+
import torch
|
| 26 |
+
from torch import sym_float, sym_int, sym_max
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
ShapeType = Union[torch.Size, List[int], Tuple[int, ...]]
|
| 30 |
+
StrideType = Union[List[int], Tuple[int, ...]]
|
| 31 |
+
DimsType = Union[int, List[int], Tuple[int, ...]]
|
| 32 |
+
DimsSequenceType = Union[List[int], Tuple[int, ...]]
|
| 33 |
+
# TODO: Type[torch.SymInt], Type[torch.SymFloat]
|
| 34 |
+
NumberTypeType = Union[Type[bool], Type[int], Type[float], Type[complex]]
|
| 35 |
+
# TODO: This needs a lot more type annotations
|
| 36 |
+
# NumberType = Union[bool, int, float, complex, torch.SymInt, torch.SymFloat]
|
| 37 |
+
NumberType = Union[bool, int, float, complex]
|
| 38 |
+
RealNumberType = Union[bool, int, float]
|
| 39 |
+
|
| 40 |
+
Number = (bool, int, float, complex, torch.SymInt, torch.SymFloat)
|
| 41 |
+
# I don't call it Integral because numbers.Integral includes bool, but IntLike
|
| 42 |
+
# does not
|
| 43 |
+
Dim = int
|
| 44 |
+
IntLike = (int, torch.SymInt)
|
| 45 |
+
FloatLike = (float, torch.SymFloat)
|
| 46 |
+
IntWithoutSymInt = int
|
| 47 |
+
FloatWithoutSymFloat = float
|
| 48 |
+
DeviceLikeType = Union[str, torch.device]
|
| 49 |
+
Tensor = torch.Tensor
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
torch_function_passthrough = {
|
| 53 |
+
torch.device,
|
| 54 |
+
torch.Tensor.dim,
|
| 55 |
+
torch.Tensor.ndim.__get__, # type: ignore[attr-defined]
|
| 56 |
+
torch.Tensor.numel,
|
| 57 |
+
torch.Tensor.size,
|
| 58 |
+
torch.Tensor.storage_offset,
|
| 59 |
+
torch.Tensor.stride,
|
| 60 |
+
torch.Tensor.dtype.__get__, # type: ignore[attr-defined]
|
| 61 |
+
torch.Tensor.is_sparse.__get__, # type: ignore[attr-defined]
|
| 62 |
+
torch.Tensor.shape.__get__, # type: ignore[attr-defined]
|
| 63 |
+
torch.Tensor.device.__get__, # type: ignore[attr-defined]
|
| 64 |
+
torch.Tensor.requires_grad.__get__, # type: ignore[attr-defined]
|
| 65 |
+
torch.Tensor.layout.__get__, # type: ignore[attr-defined]
|
| 66 |
+
torch.Tensor.is_contiguous,
|
| 67 |
+
# For TorchRefsMode only
|
| 68 |
+
torch.Tensor.__format__,
|
| 69 |
+
torch.Tensor.__repr__,
|
| 70 |
+
torch.Tensor.requires_grad.__get__, # type: ignore[attr-defined]
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
TensorLikeType = torch.Tensor
|
| 75 |
+
TensorLike = torch.Tensor
|
| 76 |
+
TensorSequenceType = Union[List[TensorLikeType], Tuple[TensorLikeType, ...]]
|
| 77 |
+
TensorOrNumberLikeType = Union[TensorLikeType, NumberType]
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def same_shape(a: ShapeType, b: ShapeType) -> bool:
|
| 81 |
+
if len(a) != len(b):
|
| 82 |
+
return False
|
| 83 |
+
|
| 84 |
+
for x, y in zip(a, b):
|
| 85 |
+
if x != y:
|
| 86 |
+
return False
|
| 87 |
+
|
| 88 |
+
return True
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
# TODO: look at using torch.testing.assert_close instead with an option
|
| 92 |
+
# to just compare metadata
|
| 93 |
+
def compare_tensor_meta(a: TensorLikeType, b: TensorLikeType, check_strides=False):
|
| 94 |
+
"""
|
| 95 |
+
Checks that two tensor likes have the same shape,
|
| 96 |
+
dtype and device.
|
| 97 |
+
|
| 98 |
+
In the future this will validate additional metadata, like
|
| 99 |
+
strides.
|
| 100 |
+
"""
|
| 101 |
+
assert isinstance(a, TensorLike)
|
| 102 |
+
assert isinstance(b, TensorLike)
|
| 103 |
+
|
| 104 |
+
if not same_shape(a.shape, b.shape):
|
| 105 |
+
msg = f"Shapes {a.shape} and {b.shape} are not equal!"
|
| 106 |
+
raise AssertionError(msg)
|
| 107 |
+
|
| 108 |
+
if a.dtype != b.dtype:
|
| 109 |
+
msg = f"Dtypes {a.dtype} and {b.dtype} are not equal!"
|
| 110 |
+
raise AssertionError(msg)
|
| 111 |
+
|
| 112 |
+
if a.device != b.device:
|
| 113 |
+
# Handles special cuda:0 vs cuda case
|
| 114 |
+
# TODO: we should review why this happens and see about fixing it
|
| 115 |
+
if (str(a.device) == "cuda:0" or str(a.device) == "cuda") and (
|
| 116 |
+
str(b.device) == "cuda:0" or str(b.device) == "cuda"
|
| 117 |
+
):
|
| 118 |
+
pass
|
| 119 |
+
else:
|
| 120 |
+
msg = f"Devices {a.device} and {b.device} are not equal!"
|
| 121 |
+
raise AssertionError(msg)
|
| 122 |
+
|
| 123 |
+
# Stride checking is currently disabled, see https://github.com/pytorch/pytorch/issues/78050
|
| 124 |
+
if check_strides:
|
| 125 |
+
same_strides, idx = check_significant_strides(a, b)
|
| 126 |
+
if not same_strides:
|
| 127 |
+
msg = f"Stride mismatch! Strides are {a.stride()} and {b.stride()} (mismatched at {idx})!"
|
| 128 |
+
raise RuntimeError(msg)
|
| 129 |
+
|
| 130 |
+
if a.storage_offset() != b.storage_offset():
|
| 131 |
+
msg = f"Storage offset mismatch! Storage offsets are {a.storage_offset()} and {b.storage_offset()}!"
|
| 132 |
+
raise RuntimeError(msg)
|
| 133 |
+
|
| 134 |
+
if a.is_conj() != b.is_conj():
|
| 135 |
+
raise RuntimeError(
|
| 136 |
+
f"Conj mismatch! is_conj is set to {a.is_conj()} and {b.is_conj()}"
|
| 137 |
+
)
|
| 138 |
+
|
| 139 |
+
if a.is_neg() != b.is_neg():
|
| 140 |
+
raise RuntimeError(
|
| 141 |
+
f"Neg mismatch! is_neg is set to {a.is_neg()} and {b.is_neg()}"
|
| 142 |
+
)
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
def _check_strides_helper(
|
| 146 |
+
a: TensorLikeType, b: TensorLikeType, *, only_cuda=True, significant_only=True
|
| 147 |
+
) -> Tuple[bool, Optional[int]]:
|
| 148 |
+
# NOTE: only on CUDA because CPU elementwise strides are incorrect in PyTorch
|
| 149 |
+
# See https://github.com/pytorch/pytorch/issues/77553
|
| 150 |
+
# Only compares strides that are "meaningful" -- strides for dimensions with length > 1
|
| 151 |
+
# and for tensors with more than one element
|
| 152 |
+
if (
|
| 153 |
+
not only_cuda or a.device.type == "cuda" or b.device.type == "cuda"
|
| 154 |
+
) and a.numel() > 0:
|
| 155 |
+
for idx in range(a.ndim):
|
| 156 |
+
check = not significant_only or a.shape[idx] > 1
|
| 157 |
+
if a.stride()[idx] != b.stride()[idx] and check:
|
| 158 |
+
return False, idx
|
| 159 |
+
|
| 160 |
+
return True, None
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
def check_significant_strides(
|
| 164 |
+
a: TensorLikeType, b: TensorLikeType, *, only_cuda=True
|
| 165 |
+
) -> Tuple[bool, Optional[int]]:
|
| 166 |
+
return _check_strides_helper(a, b, only_cuda=only_cuda, significant_only=True)
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
def check_all_strides(
|
| 170 |
+
a: TensorLikeType, b: TensorLikeType, *, only_cuda=True
|
| 171 |
+
) -> Tuple[bool, Optional[int]]:
|
| 172 |
+
return _check_strides_helper(a, b, only_cuda=only_cuda, significant_only=False)
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
# This function is equivalent to compute_contiguous() from TensorImpl.cpp
|
| 176 |
+
def is_contiguous(a: TensorLikeType) -> bool:
|
| 177 |
+
"""
|
| 178 |
+
Tests whether a tensor is contiguous or not.
|
| 179 |
+
|
| 180 |
+
Tensors are contiguous when they have no elements,
|
| 181 |
+
one element, or when they have "nested" strides.
|
| 182 |
+
"""
|
| 183 |
+
if a.numel() < 2:
|
| 184 |
+
return True
|
| 185 |
+
|
| 186 |
+
expected_stride = 1
|
| 187 |
+
for x, y in reversed(tuple(zip(a.shape, a.stride()))):
|
| 188 |
+
# Skips checking strides when a dimension has length 1
|
| 189 |
+
if x == 1:
|
| 190 |
+
continue
|
| 191 |
+
|
| 192 |
+
if y != expected_stride:
|
| 193 |
+
return False
|
| 194 |
+
expected_stride = expected_stride * x
|
| 195 |
+
|
| 196 |
+
return True
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
# This function is equivalent to compute_channels_last_contiguous_2d() in TensorImpl.cpp
|
| 200 |
+
def is_channels_last_contiguous_2d(a: Tensor) -> bool:
|
| 201 |
+
# NHWC or not channels last 2D contiguous
|
| 202 |
+
if a.ndim != 4:
|
| 203 |
+
return False
|
| 204 |
+
|
| 205 |
+
expected_stride = 1
|
| 206 |
+
for idx in (1, 3, 2, 0):
|
| 207 |
+
length = a.shape[idx]
|
| 208 |
+
if length == 1:
|
| 209 |
+
continue
|
| 210 |
+
|
| 211 |
+
stride = a.stride()[idx]
|
| 212 |
+
if stride != expected_stride:
|
| 213 |
+
return False
|
| 214 |
+
|
| 215 |
+
expected_stride *= length
|
| 216 |
+
|
| 217 |
+
return True
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
def is_channels_last_contiguous_3d(a: Tensor) -> bool:
|
| 221 |
+
# NDHWC or not channels last 3D contiguous
|
| 222 |
+
if a.ndim != 5:
|
| 223 |
+
return False
|
| 224 |
+
|
| 225 |
+
expected_stride = 1
|
| 226 |
+
for idx in (1, 4, 3, 2, 0):
|
| 227 |
+
length = a.shape[idx]
|
| 228 |
+
if length == 1:
|
| 229 |
+
continue
|
| 230 |
+
|
| 231 |
+
stride = a.stride()[idx]
|
| 232 |
+
if stride != expected_stride:
|
| 233 |
+
return False
|
| 234 |
+
|
| 235 |
+
expected_stride *= length
|
| 236 |
+
|
| 237 |
+
return True
|
| 238 |
+
|
| 239 |
+
|
| 240 |
+
_memory_formats = {
|
| 241 |
+
torch.contiguous_format,
|
| 242 |
+
torch.preserve_format,
|
| 243 |
+
torch.channels_last,
|
| 244 |
+
torch.channels_last_3d,
|
| 245 |
+
}
|
| 246 |
+
|
| 247 |
+
|
| 248 |
+
def validate_memory_format(memory_format: torch.memory_format):
|
| 249 |
+
torch._check(
|
| 250 |
+
memory_format in _memory_formats,
|
| 251 |
+
lambda: f"Received unknown memory format {memory_format}!",
|
| 252 |
+
)
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
def is_contiguous_for_memory_format( # type: ignore[return]
|
| 256 |
+
a: Tensor, *, memory_format: torch.memory_format
|
| 257 |
+
) -> bool:
|
| 258 |
+
validate_memory_format(memory_format)
|
| 259 |
+
|
| 260 |
+
if memory_format == torch.contiguous_format:
|
| 261 |
+
return is_contiguous(a)
|
| 262 |
+
if memory_format == torch.channels_last:
|
| 263 |
+
return is_channels_last_contiguous_2d(a)
|
| 264 |
+
if memory_format == torch.channels_last_3d:
|
| 265 |
+
return is_channels_last_contiguous_3d(a)
|
| 266 |
+
|
| 267 |
+
torch._check(
|
| 268 |
+
False,
|
| 269 |
+
lambda: f"is_contiguous received unsupported memory format {memory_format}",
|
| 270 |
+
)
|
| 271 |
+
|
| 272 |
+
|
| 273 |
+
# NOTE: that tensors with no elements and channels last is ???
|
| 274 |
+
def is_channels_last_contiguous(a: Tensor) -> bool:
|
| 275 |
+
"""
|
| 276 |
+
True when a tensor is channels-last contiguous.
|
| 277 |
+
|
| 278 |
+
This requires that:
|
| 279 |
+
|
| 280 |
+
- the tensor is conceptually either 4 (NHWC) or 5 (NDHWC) dimensions
|
| 281 |
+
- if we name the tensor's dimensions NCHW or NCDHW, then the strides are such that the
|
| 282 |
+
stride of the 'C' dimension (Cs) is 1 and the strides corresponding to
|
| 283 |
+
each dimension (Xs) can be ordered Cs <= Ws <= Hs <= (Ds) <= Ns and are
|
| 284 |
+
"nested" -- so Ws = Cs * Cl, where Cl is the length of the 'C' dimension,
|
| 285 |
+
for example.
|
| 286 |
+
"""
|
| 287 |
+
return is_channels_last_contiguous_2d(a) or is_channels_last_contiguous_3d(a)
|
| 288 |
+
|
| 289 |
+
|
| 290 |
+
def is_non_overlapping_and_dense(a: Tensor) -> bool:
|
| 291 |
+
"""
|
| 292 |
+
True when a tensor is non-overlapping and dense.
|
| 293 |
+
|
| 294 |
+
A tensor is non-overlapping and dense when there exists a permutation of
|
| 295 |
+
its dimensions that is contiguous.
|
| 296 |
+
"""
|
| 297 |
+
|
| 298 |
+
if a.is_sparse:
|
| 299 |
+
return False
|
| 300 |
+
|
| 301 |
+
# Short-circuits if the tensor is already contiguous or channels-last contiguous
|
| 302 |
+
if is_contiguous(a) or is_channels_last_contiguous(a):
|
| 303 |
+
return True
|
| 304 |
+
|
| 305 |
+
# The following is equivalent to compute_non_overlapping_and_dense in TensorImpl.cpp
|
| 306 |
+
|
| 307 |
+
# Short-circuits for tensors of rank one, which are
|
| 308 |
+
# non-overlapping and "dense" if their stride is one
|
| 309 |
+
if a.ndim == 1:
|
| 310 |
+
return a.stride()[0] == 1
|
| 311 |
+
|
| 312 |
+
# Checks that there exists a permutation of the strides s.t. the tensor would be contiguous
|
| 313 |
+
# Sorts (length, stride) pairs by stride
|
| 314 |
+
lengths_and_strides = sorted(zip(a.shape, a.stride()), key=operator.itemgetter(1))
|
| 315 |
+
|
| 316 |
+
expected_stride = 1
|
| 317 |
+
for length, stride in lengths_and_strides:
|
| 318 |
+
if length == 1:
|
| 319 |
+
continue
|
| 320 |
+
|
| 321 |
+
if stride != expected_stride:
|
| 322 |
+
return False
|
| 323 |
+
|
| 324 |
+
expected_stride *= length
|
| 325 |
+
|
| 326 |
+
return True
|
| 327 |
+
|
| 328 |
+
|
| 329 |
+
# NOTE: Based on the implementation in TensorIterator.cpp, but note that
|
| 330 |
+
# the note [Computing output strides] is incorrect, because it
|
| 331 |
+
# says that strides will be preserved even if they are not
|
| 332 |
+
# "non overlapping and dense", but this is incorrect. The
|
| 333 |
+
# output of elementwise operations are always given
|
| 334 |
+
# non overlapping and dense strides.
|
| 335 |
+
# This is also INCORRECT because it does not model TensorIterator's
|
| 336 |
+
# short-circuit, which can cause different strides.
|
| 337 |
+
def compute_elementwise_output_logical_to_physical_perm(
|
| 338 |
+
*tensors, _skip_checks=False
|
| 339 |
+
) -> List[int]:
|
| 340 |
+
if not _skip_checks and len(tensors) == 0:
|
| 341 |
+
msg = "Can't compute elementwise output strides for zero tensors!"
|
| 342 |
+
raise ValueError(msg)
|
| 343 |
+
|
| 344 |
+
if not _skip_checks:
|
| 345 |
+
check_same_shape(*tensors, allow_cpu_scalar_tensors=True)
|
| 346 |
+
|
| 347 |
+
# Filters the tensors to actual tensors
|
| 348 |
+
if not _skip_checks:
|
| 349 |
+
tensors = tuple(
|
| 350 |
+
a
|
| 351 |
+
for a in tensors
|
| 352 |
+
if isinstance(a, TensorLike) and not is_cpu_scalar_tensor(a)
|
| 353 |
+
)
|
| 354 |
+
|
| 355 |
+
# Short-circuits for CPU scalar case
|
| 356 |
+
if len(tensors) == 0:
|
| 357 |
+
return []
|
| 358 |
+
|
| 359 |
+
# Short-circuits for shapes with zero or one dimensions
|
| 360 |
+
# TODO: are these necessary?
|
| 361 |
+
ndim = tensors[0].ndim
|
| 362 |
+
if ndim == 0:
|
| 363 |
+
return []
|
| 364 |
+
if ndim == 1:
|
| 365 |
+
return [0]
|
| 366 |
+
|
| 367 |
+
# Short-circuits if contiguous, following the fake fast path.
|
| 368 |
+
# This reduces the number of guards we end up making
|
| 369 |
+
# TODO: do channels last too
|
| 370 |
+
is_contiguous = True
|
| 371 |
+
for t in tensors:
|
| 372 |
+
is_contiguous = is_contiguous and t.is_contiguous(
|
| 373 |
+
memory_format=torch.contiguous_format
|
| 374 |
+
)
|
| 375 |
+
|
| 376 |
+
if is_contiguous:
|
| 377 |
+
return list(range(ndim))
|
| 378 |
+
|
| 379 |
+
shape = tensors[0].shape
|
| 380 |
+
|
| 381 |
+
def should_swap(idx_a, idx_b):
|
| 382 |
+
for tensor in tensors:
|
| 383 |
+
stride_a = tensor.stride()[idx_a]
|
| 384 |
+
stride_b = tensor.stride()[idx_b]
|
| 385 |
+
|
| 386 |
+
if stride_a == 0 or stride_b == 0:
|
| 387 |
+
continue
|
| 388 |
+
|
| 389 |
+
if stride_a < stride_b:
|
| 390 |
+
return -1
|
| 391 |
+
|
| 392 |
+
if stride_a > stride_b:
|
| 393 |
+
return 1
|
| 394 |
+
|
| 395 |
+
# stride_a == stride_b
|
| 396 |
+
if shape[idx_a] > shape[idx_b]:
|
| 397 |
+
return 1
|
| 398 |
+
|
| 399 |
+
# Note: this case is hit if all strides are zero,
|
| 400 |
+
# or all strides are equal and all dimensions have the same length
|
| 401 |
+
return 0
|
| 402 |
+
|
| 403 |
+
# The "sort" order for the permutation is back-to-front, but
|
| 404 |
+
# the natural order for permutations is front-to-back. Do the
|
| 405 |
+
# sorting back-to-front and then reverse it on output.
|
| 406 |
+
#
|
| 407 |
+
# also, note this returns the logical to physical shape permutation
|
| 408 |
+
perm = list(reversed(range(ndim)))
|
| 409 |
+
|
| 410 |
+
# insertion sort with support for ambiguous comparisons
|
| 411 |
+
for i in range(1, ndim):
|
| 412 |
+
dim1 = i
|
| 413 |
+
for dim0 in reversed(range(i)):
|
| 414 |
+
comparison = should_swap(perm[dim0], perm[dim1])
|
| 415 |
+
if comparison > 0:
|
| 416 |
+
perm[dim0], perm[dim1] = perm[dim1], perm[dim0]
|
| 417 |
+
dim1 = dim0
|
| 418 |
+
elif comparison < 0:
|
| 419 |
+
break
|
| 420 |
+
|
| 421 |
+
return list(reversed(perm))
|
| 422 |
+
|
| 423 |
+
|
| 424 |
+
def compute_elementwise_output_strides(*tensors) -> Tuple[int, ...]:
|
| 425 |
+
"""
|
| 426 |
+
Computes the output strides for elementwise operations.
|
| 427 |
+
"""
|
| 428 |
+
if len(tensors) == 0:
|
| 429 |
+
msg = "Can't compute elementwise output strides for zero tensors!"
|
| 430 |
+
raise ValueError(msg)
|
| 431 |
+
|
| 432 |
+
check_same_shape(*tensors, allow_cpu_scalar_tensors=True)
|
| 433 |
+
|
| 434 |
+
# Filters the tensors to actual tensors
|
| 435 |
+
tensors = tuple(
|
| 436 |
+
a for a in tensors if isinstance(a, TensorLike) and not is_cpu_scalar_tensor(a)
|
| 437 |
+
)
|
| 438 |
+
|
| 439 |
+
# Short-circuits for CPU scalar case
|
| 440 |
+
if len(tensors) == 0:
|
| 441 |
+
return ()
|
| 442 |
+
|
| 443 |
+
ndim = tensors[0].ndim
|
| 444 |
+
shape = tensors[0].shape
|
| 445 |
+
|
| 446 |
+
if ndim == 0:
|
| 447 |
+
return ()
|
| 448 |
+
if ndim == 1:
|
| 449 |
+
return (1,)
|
| 450 |
+
|
| 451 |
+
logical_to_physical_perm = compute_elementwise_output_logical_to_physical_perm(
|
| 452 |
+
*tensors, _skip_checks=True
|
| 453 |
+
)
|
| 454 |
+
permuted_shape = apply_perm(shape, logical_to_physical_perm) # to physical
|
| 455 |
+
|
| 456 |
+
new_strides = make_contiguous_strides_for(permuted_shape)
|
| 457 |
+
permuted_strides = apply_perm(
|
| 458 |
+
new_strides, invert_perm(logical_to_physical_perm)
|
| 459 |
+
) # to logical
|
| 460 |
+
|
| 461 |
+
return tuple(permuted_strides)
|
| 462 |
+
|
| 463 |
+
|
| 464 |
+
# Identity permutation is [0, 1, 2]
|
| 465 |
+
def apply_perm(inp, perm):
|
| 466 |
+
ndim = len(inp)
|
| 467 |
+
permuted_inp = [-1] * ndim
|
| 468 |
+
for idx, x in enumerate(perm):
|
| 469 |
+
permuted_inp[idx] = inp[x]
|
| 470 |
+
return permuted_inp
|
| 471 |
+
|
| 472 |
+
|
| 473 |
+
def invert_perm(perm):
|
| 474 |
+
ndim = len(perm)
|
| 475 |
+
new_perm = [-1] * ndim
|
| 476 |
+
for idx, x in enumerate(perm):
|
| 477 |
+
new_perm[x] = idx
|
| 478 |
+
return new_perm
|
| 479 |
+
|
| 480 |
+
|
| 481 |
+
#
|
| 482 |
+
# Common helper functions
|
| 483 |
+
#
|
| 484 |
+
|
| 485 |
+
|
| 486 |
+
def validate_dim_length(length: int):
|
| 487 |
+
"""
|
| 488 |
+
Validates that an object represents a valid
|
| 489 |
+
dimension length.
|
| 490 |
+
"""
|
| 491 |
+
|
| 492 |
+
assert length >= 0
|
| 493 |
+
|
| 494 |
+
|
| 495 |
+
def validate_shape(shape: ShapeType):
|
| 496 |
+
"""
|
| 497 |
+
Validates that a sequence represents a valid shape.
|
| 498 |
+
"""
|
| 499 |
+
|
| 500 |
+
assert isinstance(shape, Sequence), type(shape)
|
| 501 |
+
for l in shape:
|
| 502 |
+
validate_dim_length(l)
|
| 503 |
+
|
| 504 |
+
|
| 505 |
+
def validate_strides(strides: StrideType):
|
| 506 |
+
"""
|
| 507 |
+
Verifies the object specifies valid strides.
|
| 508 |
+
"""
|
| 509 |
+
|
| 510 |
+
assert isinstance(strides, Sequence)
|
| 511 |
+
for stride in strides:
|
| 512 |
+
assert stride >= 0
|
| 513 |
+
|
| 514 |
+
|
| 515 |
+
def validate_idx(rank: int, idx: int):
|
| 516 |
+
"""
|
| 517 |
+
Validates that idx is a valid index for the given shape.
|
| 518 |
+
Assumes the index is already canonicalized.
|
| 519 |
+
"""
|
| 520 |
+
|
| 521 |
+
assert isinstance(idx, Dim)
|
| 522 |
+
assert isinstance(rank, Dim)
|
| 523 |
+
|
| 524 |
+
assert idx >= 0 and idx < rank or idx == 0
|
| 525 |
+
|
| 526 |
+
|
| 527 |
+
def validate_dimension_indices(rank: int, indices: DimsSequenceType):
|
| 528 |
+
for idx in indices:
|
| 529 |
+
validate_idx(rank, idx)
|
| 530 |
+
|
| 531 |
+
|
| 532 |
+
def validate_exclusive_idx(rank: int, ex_idx: int):
|
| 533 |
+
"""
|
| 534 |
+
Validates that ex_idx is a valid exclusive index
|
| 535 |
+
for the given shape.
|
| 536 |
+
"""
|
| 537 |
+
|
| 538 |
+
assert isinstance(ex_idx, Dim)
|
| 539 |
+
assert isinstance(rank, Dim)
|
| 540 |
+
assert ex_idx > 0 and ex_idx <= rank
|
| 541 |
+
|
| 542 |
+
|
| 543 |
+
# "Wraps" a dim (up to one time) for the given rank, allowing dims to be
|
| 544 |
+
# specified using negative indices. If `wrap_scalar` is true then scalar
|
| 545 |
+
# tensors of rank 0 will allow dimensions in the range [-1, 0]. Otherwise,
|
| 546 |
+
# idx should be in the range [-rank, rank-1].
|
| 547 |
+
def canonicalize_dim(rank: int, idx: int, wrap_scalar: bool = True) -> int:
|
| 548 |
+
if rank < 0:
|
| 549 |
+
msg = f"Rank cannot be negative but got {rank}"
|
| 550 |
+
raise IndexError(msg)
|
| 551 |
+
|
| 552 |
+
if rank == 0:
|
| 553 |
+
if not wrap_scalar:
|
| 554 |
+
msg = f"Dimension specified as {idx} but tensor has no dimensions"
|
| 555 |
+
raise IndexError(msg)
|
| 556 |
+
rank = 1
|
| 557 |
+
|
| 558 |
+
if idx >= 0 and idx < rank:
|
| 559 |
+
return idx
|
| 560 |
+
|
| 561 |
+
if idx < 0:
|
| 562 |
+
_idx = idx + rank
|
| 563 |
+
else:
|
| 564 |
+
_idx = idx
|
| 565 |
+
|
| 566 |
+
if _idx < 0 or _idx >= rank:
|
| 567 |
+
# Same error message as in aten/src/ATen/WrapDimUtils.h:49
|
| 568 |
+
msg = f"Dimension out of range (expected to be in range of [{-rank}, {rank - 1}], but got {idx})"
|
| 569 |
+
raise IndexError(msg)
|
| 570 |
+
|
| 571 |
+
return _idx
|
| 572 |
+
|
| 573 |
+
|
| 574 |
+
# Takes a dimension or sequence of dimensions and "wraps" them,
|
| 575 |
+
# mapping negative offsets to positive ones
|
| 576 |
+
@overload
|
| 577 |
+
def canonicalize_dims(
|
| 578 |
+
rank: int, indices: Sequence[int], wrap_scalar: bool = True
|
| 579 |
+
) -> Tuple[int, ...]:
|
| 580 |
+
pass
|
| 581 |
+
|
| 582 |
+
|
| 583 |
+
@overload
|
| 584 |
+
def canonicalize_dims(rank: int, indices: int, wrap_scalar: bool = True) -> int:
|
| 585 |
+
pass
|
| 586 |
+
|
| 587 |
+
|
| 588 |
+
def canonicalize_dims(rank, indices, wrap_scalar=True):
|
| 589 |
+
if isinstance(indices, Dim):
|
| 590 |
+
return canonicalize_dim(rank, indices, wrap_scalar)
|
| 591 |
+
|
| 592 |
+
return tuple(canonicalize_dim(rank, x, wrap_scalar) for x in indices)
|
| 593 |
+
|
| 594 |
+
|
| 595 |
+
def is_valid_permutation(rank: int, perm: DimsSequenceType) -> bool:
|
| 596 |
+
"""
|
| 597 |
+
Validates that perm is a permutation of length rank.
|
| 598 |
+
"""
|
| 599 |
+
|
| 600 |
+
if not isinstance(perm, Sequence):
|
| 601 |
+
return False
|
| 602 |
+
|
| 603 |
+
if not (tuple(sorted(perm)) == tuple(range(0, rank))):
|
| 604 |
+
return False
|
| 605 |
+
|
| 606 |
+
return True
|
| 607 |
+
|
| 608 |
+
|
| 609 |
+
def is_same_shape(a: Sequence, b: Sequence) -> bool:
|
| 610 |
+
"""
|
| 611 |
+
Compares two shapes a and b, returning True if they are the same
|
| 612 |
+
(their ranks and corresponding lengths match) and False otherwise.
|
| 613 |
+
"""
|
| 614 |
+
|
| 615 |
+
return tuple(a) == tuple(b)
|
| 616 |
+
|
| 617 |
+
|
| 618 |
+
def is_cpu_scalar_tensor(a: Any) -> bool:
|
| 619 |
+
return isinstance(a, TensorLike) and a.ndim == 0 and a.device.type == "cpu"
|
| 620 |
+
|
| 621 |
+
|
| 622 |
+
def check_same_device(*args, allow_cpu_scalar_tensors):
|
| 623 |
+
"""
|
| 624 |
+
Checks that all Tensors in args have the same device.
|
| 625 |
+
|
| 626 |
+
Raises a RuntimeError when:
|
| 627 |
+
- args contains an object whose type is not Tensor or Number
|
| 628 |
+
- two Tensor objects in args have different devices, unless one is a CPU scalar tensor and allow_cpu_scalar_tensors is True
|
| 629 |
+
"""
|
| 630 |
+
# Short-circuits if all (one or fewer) arguments are trivially on the same device
|
| 631 |
+
if len(args) <= 1:
|
| 632 |
+
return
|
| 633 |
+
|
| 634 |
+
# Note: cannot initialize device to the first arg's device (it may not have one)
|
| 635 |
+
device = None
|
| 636 |
+
for arg in args:
|
| 637 |
+
if isinstance(arg, Number):
|
| 638 |
+
continue
|
| 639 |
+
elif isinstance(arg, TensorLike):
|
| 640 |
+
if allow_cpu_scalar_tensors and is_cpu_scalar_tensor(arg):
|
| 641 |
+
continue
|
| 642 |
+
|
| 643 |
+
if device is None:
|
| 644 |
+
device = arg.device
|
| 645 |
+
|
| 646 |
+
if device != arg.device:
|
| 647 |
+
msg = (
|
| 648 |
+
"Tensor on device "
|
| 649 |
+
+ str(arg.device)
|
| 650 |
+
+ " is not on the expected device "
|
| 651 |
+
+ str(device)
|
| 652 |
+
+ "!"
|
| 653 |
+
)
|
| 654 |
+
raise RuntimeError(msg)
|
| 655 |
+
else:
|
| 656 |
+
msg = (
|
| 657 |
+
"Unexpected type when checking for same device, " + str(type(arg)) + "!"
|
| 658 |
+
)
|
| 659 |
+
raise RuntimeError(msg)
|
| 660 |
+
|
| 661 |
+
|
| 662 |
+
def canonicalize_device(device: DeviceLikeType) -> torch.device:
|
| 663 |
+
if isinstance(device, torch.device):
|
| 664 |
+
return device
|
| 665 |
+
|
| 666 |
+
assert isinstance(device, str)
|
| 667 |
+
return torch.device(device)
|
| 668 |
+
|
| 669 |
+
|
| 670 |
+
# Asserts if any of the following are true:
|
| 671 |
+
# - a non-scalar or non-Tensor is given
|
| 672 |
+
# - the shape of any tensors is distinct
|
| 673 |
+
def check_same_shape(*args, allow_cpu_scalar_tensors: bool):
|
| 674 |
+
"""
|
| 675 |
+
Checks that all Tensors in args have the same shape.
|
| 676 |
+
|
| 677 |
+
Raises a RuntimeError when:
|
| 678 |
+
- args contains an object whose type is not Tensor or Number
|
| 679 |
+
- two Tensor objects in args have different devices
|
| 680 |
+
"""
|
| 681 |
+
shape = None
|
| 682 |
+
|
| 683 |
+
for arg in args:
|
| 684 |
+
if isinstance(arg, Number):
|
| 685 |
+
continue
|
| 686 |
+
elif isinstance(arg, TensorLike):
|
| 687 |
+
if allow_cpu_scalar_tensors and is_cpu_scalar_tensor(arg):
|
| 688 |
+
continue
|
| 689 |
+
|
| 690 |
+
if shape is None:
|
| 691 |
+
shape = arg.shape
|
| 692 |
+
|
| 693 |
+
if not is_same_shape(shape, arg.shape):
|
| 694 |
+
msg = f"Shape {arg.shape} is not the expected shape {shape}!"
|
| 695 |
+
raise RuntimeError(msg)
|
| 696 |
+
else:
|
| 697 |
+
msg = (
|
| 698 |
+
"Unexpected type when checking for same shape, " + str(type(arg)) + "!"
|
| 699 |
+
)
|
| 700 |
+
raise RuntimeError(msg)
|
| 701 |
+
|
| 702 |
+
|
| 703 |
+
# Acquires a common shape, if it exists, from one or more tensor arguments,
|
| 704 |
+
# filtering number arguments
|
| 705 |
+
def extract_shape(*args, allow_cpu_scalar_tensors: bool) -> Optional[ShapeType]:
|
| 706 |
+
shape = None
|
| 707 |
+
scalar_shape = None
|
| 708 |
+
|
| 709 |
+
for arg in args:
|
| 710 |
+
if isinstance(arg, Number):
|
| 711 |
+
continue
|
| 712 |
+
elif isinstance(arg, TensorLike):
|
| 713 |
+
if allow_cpu_scalar_tensors and is_cpu_scalar_tensor(arg):
|
| 714 |
+
scalar_shape = arg.shape
|
| 715 |
+
continue
|
| 716 |
+
|
| 717 |
+
if shape is None:
|
| 718 |
+
shape = arg.shape
|
| 719 |
+
|
| 720 |
+
if not is_same_shape(shape, arg.shape):
|
| 721 |
+
return None
|
| 722 |
+
else:
|
| 723 |
+
return None
|
| 724 |
+
|
| 725 |
+
return shape if shape is not None else scalar_shape
|
| 726 |
+
|
| 727 |
+
|
| 728 |
+
# Extracts dimensions that might be passed either as a list/tuple or as varargs.
|
| 729 |
+
# A typical case is Tensor.permute .
|
| 730 |
+
def extract_dims_from_varargs(
|
| 731 |
+
dims: Union[DimsSequenceType, Tuple[DimsSequenceType, ...]]
|
| 732 |
+
) -> DimsSequenceType:
|
| 733 |
+
if dims and isinstance(dims[0], Sequence):
|
| 734 |
+
assert len(dims) == 1
|
| 735 |
+
dims = cast(Tuple[DimsSequenceType], dims)
|
| 736 |
+
return dims[0]
|
| 737 |
+
else:
|
| 738 |
+
return cast(DimsSequenceType, dims)
|
| 739 |
+
|
| 740 |
+
|
| 741 |
+
def extract_shape_from_varargs(
|
| 742 |
+
shape: Union[ShapeType, Tuple[ShapeType]],
|
| 743 |
+
validate=True,
|
| 744 |
+
) -> Tuple[int, ...]:
|
| 745 |
+
"""
|
| 746 |
+
Returns a shape from varargs.
|
| 747 |
+
|
| 748 |
+
In PyTorch, operations that accept shapes often accept them as varargs, like
|
| 749 |
+
foo(*shape). However a user can pass the shape as a sequence of integers,
|
| 750 |
+
like this:
|
| 751 |
+
|
| 752 |
+
foo(1, 2, 3)
|
| 753 |
+
|
| 754 |
+
or as a sequence of integers
|
| 755 |
+
|
| 756 |
+
foo((1, 2, 3))
|
| 757 |
+
|
| 758 |
+
In the first case shape will be a tuple of integers, and in the second case it's a tuple
|
| 759 |
+
containing a tuple of integers. This validates those inputs and canonicalizes them
|
| 760 |
+
to a tuple of integers.
|
| 761 |
+
"""
|
| 762 |
+
|
| 763 |
+
# Handles tuple unwrapping
|
| 764 |
+
if len(shape) == 1 and isinstance(shape[0], Sequence):
|
| 765 |
+
shape = shape[0]
|
| 766 |
+
|
| 767 |
+
if validate:
|
| 768 |
+
validate_shape(shape) # type: ignore[arg-type]
|
| 769 |
+
return shape # type: ignore[return-value]
|
| 770 |
+
|
| 771 |
+
|
| 772 |
+
def infer_size(shape: ShapeType, numel: int) -> Tuple[int, ...]:
|
| 773 |
+
"""
|
| 774 |
+
Infers the size of a dim with size -1, if it exists.
|
| 775 |
+
Also checks that new shape is compatible with the number of elements.
|
| 776 |
+
"""
|
| 777 |
+
dim = None
|
| 778 |
+
newsize = 1
|
| 779 |
+
for i, d in enumerate(shape):
|
| 780 |
+
if d == -1:
|
| 781 |
+
torch._check(dim is None, lambda: "only one dimension can be inferred")
|
| 782 |
+
dim = i
|
| 783 |
+
elif d >= 0:
|
| 784 |
+
newsize *= d
|
| 785 |
+
else:
|
| 786 |
+
torch._check(False, lambda: f"invalid shape dimension {d}")
|
| 787 |
+
torch._check(
|
| 788 |
+
numel == newsize or (dim is not None and newsize > 0 and numel % newsize == 0),
|
| 789 |
+
lambda: f"shape '{list(shape)}' is invalid for input of size {numel}",
|
| 790 |
+
)
|
| 791 |
+
if dim is not None:
|
| 792 |
+
# Convert to list to produce a compatible error message with core
|
| 793 |
+
# PyTorch, which prints sequences in square brackets.
|
| 794 |
+
shape = list(shape)
|
| 795 |
+
torch._check(
|
| 796 |
+
newsize != 0,
|
| 797 |
+
lambda: (
|
| 798 |
+
f"cannot reshape tensor of 0 elements into shape {shape} because the "
|
| 799 |
+
f"unspecified dimension size -1 can be any value and is ambiguous"
|
| 800 |
+
),
|
| 801 |
+
)
|
| 802 |
+
shape[dim] = numel // newsize
|
| 803 |
+
return tuple(shape)
|
| 804 |
+
|
| 805 |
+
|
| 806 |
+
_integer_dtypes = (torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)
|
| 807 |
+
_low_precision_dtypes = (torch.float16, torch.bfloat16, torch.complex32)
|
| 808 |
+
_float_dtypes = (torch.float16, torch.bfloat16, torch.float32, torch.float64)
|
| 809 |
+
_complex_dtypes = (torch.complex32, torch.complex64, torch.complex128)
|
| 810 |
+
|
| 811 |
+
|
| 812 |
+
def is_boolean_dtype(dtype: torch.dtype) -> bool:
|
| 813 |
+
assert isinstance(dtype, torch.dtype)
|
| 814 |
+
return dtype is torch.bool
|
| 815 |
+
|
| 816 |
+
|
| 817 |
+
def is_integer_dtype(dtype: torch.dtype) -> bool:
|
| 818 |
+
assert isinstance(dtype, torch.dtype)
|
| 819 |
+
return dtype in _integer_dtypes
|
| 820 |
+
|
| 821 |
+
|
| 822 |
+
def is_low_precision_dtype(dtype: torch.dtype) -> bool:
|
| 823 |
+
assert isinstance(dtype, torch.dtype)
|
| 824 |
+
return dtype in _low_precision_dtypes
|
| 825 |
+
|
| 826 |
+
|
| 827 |
+
def is_float_dtype(dtype: torch.dtype) -> bool:
|
| 828 |
+
assert isinstance(dtype, torch.dtype)
|
| 829 |
+
return dtype in _float_dtypes
|
| 830 |
+
|
| 831 |
+
|
| 832 |
+
def is_complex_dtype(dtype: torch.dtype) -> bool:
|
| 833 |
+
assert isinstance(dtype, torch.dtype)
|
| 834 |
+
return dtype in _complex_dtypes
|
| 835 |
+
|
| 836 |
+
|
| 837 |
+
def is_grad_dtype(dtype: torch.dtype) -> bool:
|
| 838 |
+
"""
|
| 839 |
+
Checks if the dtype can require a gradient.
|
| 840 |
+
"""
|
| 841 |
+
return is_float_dtype(dtype) or is_complex_dtype(dtype)
|
| 842 |
+
|
| 843 |
+
|
| 844 |
+
_complex_to_real_dtype_map = {
|
| 845 |
+
torch.complex128: torch.float64,
|
| 846 |
+
torch.complex64: torch.float32,
|
| 847 |
+
torch.complex32: torch.float16,
|
| 848 |
+
}
|
| 849 |
+
|
| 850 |
+
_real_to_complex_dtype_map = {
|
| 851 |
+
torch.float16: torch.complex32,
|
| 852 |
+
torch.bfloat16: torch.complex64,
|
| 853 |
+
torch.float32: torch.complex64,
|
| 854 |
+
torch.float64: torch.complex128,
|
| 855 |
+
}
|
| 856 |
+
|
| 857 |
+
|
| 858 |
+
def corresponding_real_dtype(dtype: torch.dtype) -> torch.dtype:
|
| 859 |
+
return _complex_to_real_dtype_map[dtype]
|
| 860 |
+
|
| 861 |
+
|
| 862 |
+
def corresponding_complex_dtype(dtype: torch.dtype) -> torch.dtype:
|
| 863 |
+
return _real_to_complex_dtype_map[dtype]
|
| 864 |
+
|
| 865 |
+
|
| 866 |
+
def dtype_to_type(dtype: torch.dtype) -> type:
|
| 867 |
+
"""
|
| 868 |
+
Computes the corresponding Python type (AKA "type kind") for the
|
| 869 |
+
given dtype.
|
| 870 |
+
"""
|
| 871 |
+
assert isinstance(dtype, torch.dtype)
|
| 872 |
+
|
| 873 |
+
if dtype is torch.bool:
|
| 874 |
+
return bool
|
| 875 |
+
if dtype in _integer_dtypes:
|
| 876 |
+
return int
|
| 877 |
+
if dtype in _float_dtypes:
|
| 878 |
+
return float
|
| 879 |
+
if dtype in _complex_dtypes:
|
| 880 |
+
return complex
|
| 881 |
+
|
| 882 |
+
raise ValueError("Invalid dtype!")
|
| 883 |
+
|
| 884 |
+
|
| 885 |
+
def dtype_to_type_ctor(dtype: torch.dtype) -> Callable[[NumberType], NumberType]:
|
| 886 |
+
"""
|
| 887 |
+
Computes the corresponding Python type constructor for the
|
| 888 |
+
given dtype.
|
| 889 |
+
"""
|
| 890 |
+
assert isinstance(dtype, torch.dtype)
|
| 891 |
+
|
| 892 |
+
if dtype is torch.bool:
|
| 893 |
+
return lambda x: bool(x)
|
| 894 |
+
if dtype in _integer_dtypes:
|
| 895 |
+
return sym_int
|
| 896 |
+
if dtype in _float_dtypes:
|
| 897 |
+
return sym_float
|
| 898 |
+
if dtype in _complex_dtypes:
|
| 899 |
+
# TODO: type error here is real, replace with sym_complex
|
| 900 |
+
return lambda x: complex(x) # type: ignore[arg-type]
|
| 901 |
+
|
| 902 |
+
raise ValueError("Invalid dtype!")
|
| 903 |
+
|
| 904 |
+
|
| 905 |
+
def type_to_dtype(typ: type) -> torch.dtype:
|
| 906 |
+
"""
|
| 907 |
+
Computes the corresponding dtype for a Number type.
|
| 908 |
+
"""
|
| 909 |
+
|
| 910 |
+
assert isinstance(typ, type)
|
| 911 |
+
|
| 912 |
+
if typ is bool:
|
| 913 |
+
return torch.bool
|
| 914 |
+
if typ in [int, torch.SymInt]:
|
| 915 |
+
return torch.long
|
| 916 |
+
if typ in [float, torch.SymFloat]:
|
| 917 |
+
return torch.get_default_dtype()
|
| 918 |
+
# TODO: sym_complex_float?
|
| 919 |
+
if typ is complex:
|
| 920 |
+
return corresponding_complex_dtype(torch.get_default_dtype())
|
| 921 |
+
|
| 922 |
+
raise ValueError("Invalid type!")
|
| 923 |
+
|
| 924 |
+
|
| 925 |
+
def get_dtype(x: Union[torch.Tensor, NumberType]):
|
| 926 |
+
if isinstance(x, torch.Tensor):
|
| 927 |
+
return x.dtype
|
| 928 |
+
else:
|
| 929 |
+
return type_to_dtype(type(x))
|
| 930 |
+
|
| 931 |
+
|
| 932 |
+
_ordered_types = (bool, int, float, complex)
|
| 933 |
+
|
| 934 |
+
|
| 935 |
+
def check_fp_or_complex(
|
| 936 |
+
dtype: torch.dtype, fn_name: str, allow_low_precision_dtypes: bool = True
|
| 937 |
+
):
|
| 938 |
+
"""
|
| 939 |
+
Checks whether the input is floating point or complex.
|
| 940 |
+
If allow_low_precision_dtypes is True, it allows having float16, bfloat16, and complex32
|
| 941 |
+
"""
|
| 942 |
+
torch._check(
|
| 943 |
+
is_float_dtype(dtype) or is_complex_dtype(dtype),
|
| 944 |
+
lambda: f"{fn_name}: Expected a floating point or complex tensor as input. Got {dtype}",
|
| 945 |
+
)
|
| 946 |
+
torch._check(
|
| 947 |
+
allow_low_precision_dtypes or not is_low_precision_dtype(dtype),
|
| 948 |
+
lambda: f"{fn_name}: Half precision dtypes not supported. Got {dtype}",
|
| 949 |
+
)
|
| 950 |
+
|
| 951 |
+
|
| 952 |
+
def check_is_matrix(A: TensorLikeType, f_name: str, arg_name: str = "A"):
|
| 953 |
+
torch._check(
|
| 954 |
+
len(A.shape) >= 2,
|
| 955 |
+
lambda: f"{f_name}: The input tensor {arg_name} must have at least 2 dimensions.",
|
| 956 |
+
)
|
| 957 |
+
|
| 958 |
+
|
| 959 |
+
def get_higher_type(a: type, b: type) -> type:
|
| 960 |
+
"""
|
| 961 |
+
Returns the higher of the two given Number types.
|
| 962 |
+
|
| 963 |
+
The types are ordered bool -> int -> float -> complex.
|
| 964 |
+
"""
|
| 965 |
+
# Type checking
|
| 966 |
+
assert a in _ordered_types
|
| 967 |
+
assert b in _ordered_types
|
| 968 |
+
|
| 969 |
+
if a is b:
|
| 970 |
+
return a
|
| 971 |
+
|
| 972 |
+
for typ in _ordered_types:
|
| 973 |
+
if a is typ:
|
| 974 |
+
return b
|
| 975 |
+
if b is typ:
|
| 976 |
+
return a
|
| 977 |
+
|
| 978 |
+
raise ValueError("Unknown Python scalar type!")
|
| 979 |
+
|
| 980 |
+
|
| 981 |
+
# Returns the higher of two torch datatypes a and b or, if the two
|
| 982 |
+
# are not ordered relative to each other, the next
|
| 983 |
+
# higher datatype
|
| 984 |
+
def get_higher_dtype(
|
| 985 |
+
a: Optional[Union[torch.dtype, TensorLikeType, NumberType]],
|
| 986 |
+
b: Optional[Union[torch.dtype, TensorLikeType, NumberType]],
|
| 987 |
+
) -> Optional[torch.dtype]:
|
| 988 |
+
"""
|
| 989 |
+
Computes the "lowest" datatype that is weakly
|
| 990 |
+
"higher" than both a and b.
|
| 991 |
+
"""
|
| 992 |
+
|
| 993 |
+
# Type checking
|
| 994 |
+
assert a is None or isinstance(a, (torch.dtype, TensorLike, Number))
|
| 995 |
+
assert b is None or isinstance(b, (torch.dtype, TensorLike, Number))
|
| 996 |
+
|
| 997 |
+
def _extract_dtype(
|
| 998 |
+
x: Optional[Union[torch.dtype, TensorLikeType, NumberType]]
|
| 999 |
+
) -> Optional[torch.dtype]:
|
| 1000 |
+
if x is None:
|
| 1001 |
+
return None
|
| 1002 |
+
if isinstance(x, torch.dtype):
|
| 1003 |
+
return x
|
| 1004 |
+
if isinstance(x, TensorLike):
|
| 1005 |
+
return x.dtype
|
| 1006 |
+
if isinstance(x, Number):
|
| 1007 |
+
return type_to_dtype(type(x))
|
| 1008 |
+
|
| 1009 |
+
raise RuntimeError("Unexpected type given to _extract_dtype!")
|
| 1010 |
+
|
| 1011 |
+
a, b = _extract_dtype(a), _extract_dtype(b)
|
| 1012 |
+
|
| 1013 |
+
if a is b:
|
| 1014 |
+
return a
|
| 1015 |
+
|
| 1016 |
+
if a is None:
|
| 1017 |
+
return b
|
| 1018 |
+
|
| 1019 |
+
if b is None:
|
| 1020 |
+
return a
|
| 1021 |
+
|
| 1022 |
+
ordered_datatypes = (
|
| 1023 |
+
(torch.bool,),
|
| 1024 |
+
(torch.uint8, torch.int8),
|
| 1025 |
+
(torch.int16,),
|
| 1026 |
+
(torch.int32,),
|
| 1027 |
+
(torch.int64,),
|
| 1028 |
+
(torch.float16, torch.bfloat16),
|
| 1029 |
+
(torch.float32,),
|
| 1030 |
+
(torch.float64,),
|
| 1031 |
+
(torch.complex32,),
|
| 1032 |
+
(torch.complex64,),
|
| 1033 |
+
(torch.complex128,),
|
| 1034 |
+
)
|
| 1035 |
+
|
| 1036 |
+
for idx, dtypes in enumerate(ordered_datatypes):
|
| 1037 |
+
if a in dtypes and b in dtypes:
|
| 1038 |
+
return ordered_datatypes[idx + 1][0]
|
| 1039 |
+
if a in dtypes:
|
| 1040 |
+
return b
|
| 1041 |
+
if b in dtypes:
|
| 1042 |
+
return a
|
| 1043 |
+
|
| 1044 |
+
raise RuntimeError("Unexpected termination!")
|
| 1045 |
+
|
| 1046 |
+
|
| 1047 |
+
def check_pin_memory(pin_memory: bool):
|
| 1048 |
+
torch._check_not_implemented(
|
| 1049 |
+
not pin_memory, lambda: "PrimTorch does not support pinned memory"
|
| 1050 |
+
)
|
| 1051 |
+
|
| 1052 |
+
|
| 1053 |
+
def check_layout(layout: torch.layout):
|
| 1054 |
+
torch._check_not_implemented(
|
| 1055 |
+
layout == torch.strided, lambda: f"PrimTorch doesn't support layout={layout}"
|
| 1056 |
+
)
|
| 1057 |
+
|
| 1058 |
+
|
| 1059 |
+
# TODO: maybe unify with can_cast_to?
|
| 1060 |
+
def is_weakly_lesser_type(a: type, b: type) -> bool:
|
| 1061 |
+
"""
|
| 1062 |
+
Compares two types, a and b, returning True if a is weakly "less" than b.
|
| 1063 |
+
|
| 1064 |
+
The comparison is determined by the following type ordering: bool, int, float, complex.
|
| 1065 |
+
"""
|
| 1066 |
+
ordered_types = (
|
| 1067 |
+
bool,
|
| 1068 |
+
int,
|
| 1069 |
+
float,
|
| 1070 |
+
complex,
|
| 1071 |
+
)
|
| 1072 |
+
|
| 1073 |
+
assert a in ordered_types
|
| 1074 |
+
assert b in ordered_types
|
| 1075 |
+
|
| 1076 |
+
for typ in ordered_types:
|
| 1077 |
+
if a == typ:
|
| 1078 |
+
return True
|
| 1079 |
+
if b == typ:
|
| 1080 |
+
return False
|
| 1081 |
+
|
| 1082 |
+
raise RuntimeError("Unexpected termination!")
|
| 1083 |
+
|
| 1084 |
+
|
| 1085 |
+
def can_safe_cast_to(*, cast_to: torch.dtype, cast_from: torch.dtype) -> bool:
|
| 1086 |
+
for fn in (is_complex_dtype, is_float_dtype, is_integer_dtype, is_boolean_dtype):
|
| 1087 |
+
if fn(cast_to):
|
| 1088 |
+
return True
|
| 1089 |
+
if fn(cast_from):
|
| 1090 |
+
return False
|
| 1091 |
+
|
| 1092 |
+
raise ValueError(f"Received unknown dtypes {cast_to}, {cast_from}!")
|
| 1093 |
+
|
| 1094 |
+
|
| 1095 |
+
def check_same_dtype(*args):
|
| 1096 |
+
"""
|
| 1097 |
+
Checks that all Tensors in args have the same device and that all Numbers have the
|
| 1098 |
+
same corresponding Python type.
|
| 1099 |
+
|
| 1100 |
+
Raises a RuntimeError when:
|
| 1101 |
+
- args contains an object whose type is not Tensor or Number
|
| 1102 |
+
- two Tensors objects in args have different dtypes
|
| 1103 |
+
- two Number objects in args have different types
|
| 1104 |
+
- there are Tensors and Numbers in args, and one of those Tensors corresponding
|
| 1105 |
+
Python types is different from the type of one of those Numbers
|
| 1106 |
+
"""
|
| 1107 |
+
full_dtype = None
|
| 1108 |
+
scalar_type = None
|
| 1109 |
+
|
| 1110 |
+
for arg in args:
|
| 1111 |
+
if isinstance(arg, Number):
|
| 1112 |
+
# Scalar type checking is disabled (and may be removed in the future)
|
| 1113 |
+
continue
|
| 1114 |
+
# if scalar_type is None:
|
| 1115 |
+
# scalar_type = type(arg)
|
| 1116 |
+
|
| 1117 |
+
# if scalar_type is not type(arg):
|
| 1118 |
+
# msg = (
|
| 1119 |
+
# "Scalar of type "
|
| 1120 |
+
# + str(type(arg))
|
| 1121 |
+
# + " is not the expected type of "
|
| 1122 |
+
# + str(scalar_type)
|
| 1123 |
+
# + "!"
|
| 1124 |
+
# )
|
| 1125 |
+
# raise RuntimeError(msg)
|
| 1126 |
+
elif isinstance(arg, TensorLike):
|
| 1127 |
+
if full_dtype is None:
|
| 1128 |
+
full_dtype = arg.dtype
|
| 1129 |
+
if scalar_type is None:
|
| 1130 |
+
scalar_type = dtype_to_type(arg.dtype)
|
| 1131 |
+
|
| 1132 |
+
if full_dtype is not arg.dtype:
|
| 1133 |
+
msg = (
|
| 1134 |
+
"Tensor with dtype "
|
| 1135 |
+
+ str(arg.dtype)
|
| 1136 |
+
+ " is not the expected dtype of "
|
| 1137 |
+
+ str(full_dtype)
|
| 1138 |
+
+ "!"
|
| 1139 |
+
)
|
| 1140 |
+
raise RuntimeError(msg)
|
| 1141 |
+
|
| 1142 |
+
arg_type = dtype_to_type(arg.dtype)
|
| 1143 |
+
if arg_type is not scalar_type:
|
| 1144 |
+
msg = (
|
| 1145 |
+
"Tensor with corresponding Python type "
|
| 1146 |
+
+ str(arg_type)
|
| 1147 |
+
+ " is not the expected type of "
|
| 1148 |
+
+ str(scalar_type)
|
| 1149 |
+
+ "!"
|
| 1150 |
+
)
|
| 1151 |
+
raise RuntimeError(msg)
|
| 1152 |
+
else:
|
| 1153 |
+
msg = (
|
| 1154 |
+
"Unexpected type when checking for same dtype, " + str(type(arg)) + "!"
|
| 1155 |
+
)
|
| 1156 |
+
raise RuntimeError(msg)
|
| 1157 |
+
|
| 1158 |
+
|
| 1159 |
+
# Maps datatypes to their computation types for elementwise operations
|
| 1160 |
+
_computation_dtype_map = {
|
| 1161 |
+
torch.bfloat16: torch.float32,
|
| 1162 |
+
torch.float16: torch.float32,
|
| 1163 |
+
torch.complex32: torch.complex64,
|
| 1164 |
+
}
|
| 1165 |
+
|
| 1166 |
+
|
| 1167 |
+
def get_computation_dtype(dtype: torch.dtype) -> torch.dtype:
|
| 1168 |
+
return _computation_dtype_map.get(dtype, dtype)
|
| 1169 |
+
|
| 1170 |
+
|
| 1171 |
+
_cpu_acc_type_map = {
|
| 1172 |
+
torch.bfloat16: torch.float64,
|
| 1173 |
+
torch.float16: torch.float64,
|
| 1174 |
+
torch.float32: torch.float64,
|
| 1175 |
+
torch.complex32: torch.complex128,
|
| 1176 |
+
torch.complex64: torch.complex128,
|
| 1177 |
+
}
|
| 1178 |
+
|
| 1179 |
+
|
| 1180 |
+
def get_acc_type(dtype: torch.dtype, device: torch.device) -> torch.dtype:
|
| 1181 |
+
# Equivalent to at::toAccumulateType, prefer computation_dtype where possible
|
| 1182 |
+
if device.type == "cpu":
|
| 1183 |
+
return _cpu_acc_type_map.get(dtype, dtype)
|
| 1184 |
+
else:
|
| 1185 |
+
return get_computation_dtype(dtype)
|
| 1186 |
+
|
| 1187 |
+
|
| 1188 |
+
class ELEMENTWISE_TYPE_PROMOTION_KIND(Enum):
|
| 1189 |
+
DEFAULT = (0,)
|
| 1190 |
+
NO_OPMATH = (1,)
|
| 1191 |
+
INT_TO_FLOAT = (2,)
|
| 1192 |
+
ALWAYS_BOOL = (3,)
|
| 1193 |
+
COMPLEX_TO_FLOAT = (4,)
|
| 1194 |
+
BOOL_TO_LONG = (5,)
|
| 1195 |
+
|
| 1196 |
+
|
| 1197 |
+
class REDUCTION_OUTPUT_TYPE_KIND(Enum):
|
| 1198 |
+
SAME = (0,)
|
| 1199 |
+
COMPLEX_TO_FLOAT = (1,) # for complex types outputs corresponding real type
|
| 1200 |
+
KEEP_PROMOTED_TYPE = (2,) # keep output in opmath type, needed for mean
|
| 1201 |
+
ALWAYS_BOOL = (3,)
|
| 1202 |
+
|
| 1203 |
+
|
| 1204 |
+
# Describes the return type of the primitive:
|
| 1205 |
+
#
|
| 1206 |
+
# - NEW, a new tensor is created
|
| 1207 |
+
# - VIEW, a view of an input tensor is returned
|
| 1208 |
+
# - INPLACE, one or more input tensors is modified
|
| 1209 |
+
#
|
| 1210 |
+
# these descriptors are mututally exclusive and exhaustive.
|
| 1211 |
+
class RETURN_TYPE(Enum):
|
| 1212 |
+
NEW = (0,)
|
| 1213 |
+
VIEW = (1,)
|
| 1214 |
+
INPLACE = (2,)
|
| 1215 |
+
|
| 1216 |
+
|
| 1217 |
+
# TODO: when NumberType contains the sym types, can simplify this
|
| 1218 |
+
def number_type(x: Union[NumberType, torch.SymInt, torch.SymFloat]) -> Type:
|
| 1219 |
+
if isinstance(x, torch.SymInt):
|
| 1220 |
+
return int
|
| 1221 |
+
elif isinstance(x, torch.SymFloat):
|
| 1222 |
+
return float
|
| 1223 |
+
else:
|
| 1224 |
+
return type(x)
|
| 1225 |
+
|
| 1226 |
+
|
| 1227 |
+
def symbol_type(x: sympy.Symbol) -> Type:
|
| 1228 |
+
if x.is_integer: # type: ignore[attr-defined]
|
| 1229 |
+
return int
|
| 1230 |
+
else:
|
| 1231 |
+
# NB: Not strictly correct, but we don't support SymPy complex or bool.
|
| 1232 |
+
return float
|
| 1233 |
+
|
| 1234 |
+
|
| 1235 |
+
# TODO: document type promotion kinds
|
| 1236 |
+
def elementwise_dtypes(
|
| 1237 |
+
*_args,
|
| 1238 |
+
type_promotion_kind: ELEMENTWISE_TYPE_PROMOTION_KIND,
|
| 1239 |
+
) -> Tuple[torch.dtype, torch.dtype]:
|
| 1240 |
+
"""
|
| 1241 |
+
Computes the computation and result dtypes for elementwise type promotion
|
| 1242 |
+
on the given arguments and with the given elementwise type promotion kind.
|
| 1243 |
+
|
| 1244 |
+
Note that not all inputs to an elementwise operation necessarily participate in type promotion.
|
| 1245 |
+
For example, the "alpha" parameter of torch.add does not participate in type promotion,
|
| 1246 |
+
although it may be cast to the Python type corresponding to the computation dtype that
|
| 1247 |
+
the type promotion algorithm determines.
|
| 1248 |
+
|
| 1249 |
+
Default elementwise type promotion, which all other type promotion kinds tweak (see below),
|
| 1250 |
+
first decides which of four ordered types to use:
|
| 1251 |
+
|
| 1252 |
+
bool -> integer -> floating point -> complex
|
| 1253 |
+
|
| 1254 |
+
The selected type is the "lowest" type in the above list such that all number arguments
|
| 1255 |
+
have a weakly "lower" type and all tensor arguments have a weakly lower corresponding
|
| 1256 |
+
type for their dtype.
|
| 1257 |
+
|
| 1258 |
+
Once the type is determined, the particular result dtype is found. The dtypes are
|
| 1259 |
+
partially ordered as follows:
|
| 1260 |
+
|
| 1261 |
+
bool -> uint8, int8 -> int16 -> int32 -> int64 ->
|
| 1262 |
+
float16, bfloat16 -> float32 -> float64 -> complex32 -> complex64 -> complex128
|
| 1263 |
+
|
| 1264 |
+
The result dtype is selected by:
|
| 1265 |
+
- if no tensor's dtype has the same corresponding type as the one selected,
|
| 1266 |
+
then the result dtype is the (default) dtype corresponding to the selected type
|
| 1267 |
+
(for example, 1.5 + an integer tensor has a result dtype of the default floating point dtype)
|
| 1268 |
+
- if the result type is complex then the dtype is:
|
| 1269 |
+
- the default complex dtype if there are no floating point or complex tensors
|
| 1270 |
+
- if there are floating point or complex tensors with one or more dimensions, then
|
| 1271 |
+
the complex dtype corresponding to the highest corresponding complex dtype among those tensors
|
| 1272 |
+
(for example, double + cfloat -> cdouble)
|
| 1273 |
+
- if there are only floating point or complex tensors with zero dimensions, then
|
| 1274 |
+
the complex dtype corresponding to the highest corresponding complex dtype among those tensors
|
| 1275 |
+
- if the first two cases do not apply, the result dtype is the highest dtype among
|
| 1276 |
+
all tensors with one or more dimensions of the output type, and if there are no such
|
| 1277 |
+
tensors then it's the highest dtype among all tensors with zero dimensions of the output type
|
| 1278 |
+
(for example, long + half -> half, even if the half tensor has zero dimensions)
|
| 1279 |
+
|
| 1280 |
+
The "corresponding complex dtypes" are:
|
| 1281 |
+
float16 -> complex32
|
| 1282 |
+
bfloat16 -> complex64
|
| 1283 |
+
float32 -> complex64
|
| 1284 |
+
float64 -> complex128
|
| 1285 |
+
complex32 -> complex32
|
| 1286 |
+
complex64 -> complex64
|
| 1287 |
+
complex128 -> complex128
|
| 1288 |
+
|
| 1289 |
+
The DEFAULT type promotion kind computes per above, and then uses the result dtype to pick a computation
|
| 1290 |
+
dtype by mapping low precision floating point and complex dtypes as follows:
|
| 1291 |
+
|
| 1292 |
+
float16 -> float32
|
| 1293 |
+
bfloat16 -> float32
|
| 1294 |
+
complex32 -> complex64
|
| 1295 |
+
|
| 1296 |
+
This is referred to as "op math", and the NO_OPMATH type promotion kind disables this mapping, making the
|
| 1297 |
+
computation dtype the same as the result dtype when it's selected. NO_OPMATH is appropriate for kernels
|
| 1298 |
+
which perform no mathematical operations on their tensors (see below for examples).
|
| 1299 |
+
|
| 1300 |
+
The INT_TO_FLOAT type promotion kind maps boolean and integer maps result dtypes to the default floating point dtype,
|
| 1301 |
+
and computation dtypes to the appropriate op math dtype.
|
| 1302 |
+
|
| 1303 |
+
The COMPLEX_TO_FLOAT type promotion kind maps complex result dtypes to the corresponding float dtype, following this
|
| 1304 |
+
mapping:
|
| 1305 |
+
|
| 1306 |
+
complex32 -> float16
|
| 1307 |
+
complex64 -> float32
|
| 1308 |
+
complex128 -> float64
|
| 1309 |
+
|
| 1310 |
+
Note that COMPLEX_TO_FLOAT derives the computation dtype as the DEFAULT setting does.
|
| 1311 |
+
|
| 1312 |
+
The BOOL_TO_LONG type promotion kind maps boolean computation and result dtypes to long.
|
| 1313 |
+
|
| 1314 |
+
The ALWAYS_BOOL type promotion kind always sets the result dtype to bool.
|
| 1315 |
+
|
| 1316 |
+
Example operators for each type promotion option:
|
| 1317 |
+
DEFAULT : add
|
| 1318 |
+
NO_OPMATH : where, nextafter, cat
|
| 1319 |
+
INT_TO_FLOAT : sin
|
| 1320 |
+
COMPLEX_TO_FLOAT : abs
|
| 1321 |
+
BOOL_TO_LONG : pow
|
| 1322 |
+
ALWAYS_BOOL : eq
|
| 1323 |
+
|
| 1324 |
+
"""
|
| 1325 |
+
|
| 1326 |
+
args = tuple(x for x in _args if x is not None)
|
| 1327 |
+
|
| 1328 |
+
highest_type: type = bool
|
| 1329 |
+
for x in args:
|
| 1330 |
+
if not isinstance(x, (Number, TensorLike, sympy.Symbol)):
|
| 1331 |
+
msg = f"Unexpected type {str(type(x))} when computing elementwise type promotion!"
|
| 1332 |
+
raise ValueError(msg)
|
| 1333 |
+
|
| 1334 |
+
if isinstance(x, Number):
|
| 1335 |
+
highest_type = get_higher_type(highest_type, number_type(x))
|
| 1336 |
+
elif isinstance(x, sympy.Symbol):
|
| 1337 |
+
highest_type = get_higher_type(highest_type, symbol_type(x))
|
| 1338 |
+
else:
|
| 1339 |
+
# x is a TensorLike
|
| 1340 |
+
highest_type = get_higher_type(highest_type, dtype_to_type(x.dtype))
|
| 1341 |
+
|
| 1342 |
+
result_dtype = None
|
| 1343 |
+
|
| 1344 |
+
def _find_highest_dtype_filtered(
|
| 1345 |
+
args, filter, *, float_as_complex=False
|
| 1346 |
+
) -> Optional[torch.dtype]:
|
| 1347 |
+
zero_dim_tensor_dtype = None
|
| 1348 |
+
one_plus_dim_tensor_dtype = None
|
| 1349 |
+
for x in args:
|
| 1350 |
+
if isinstance(x, TensorLike) and filter(x.dtype):
|
| 1351 |
+
_dtype = x.dtype
|
| 1352 |
+
if float_as_complex and is_float_dtype(_dtype):
|
| 1353 |
+
_dtype = corresponding_complex_dtype(_dtype)
|
| 1354 |
+
if x.ndim == 0:
|
| 1355 |
+
zero_dim_tensor_dtype = get_higher_dtype(
|
| 1356 |
+
zero_dim_tensor_dtype, _dtype
|
| 1357 |
+
)
|
| 1358 |
+
else:
|
| 1359 |
+
# x.ndim > 0
|
| 1360 |
+
one_plus_dim_tensor_dtype = get_higher_dtype(
|
| 1361 |
+
one_plus_dim_tensor_dtype, _dtype
|
| 1362 |
+
)
|
| 1363 |
+
|
| 1364 |
+
# Prefers dtype of tensors with one or more dimensions
|
| 1365 |
+
if one_plus_dim_tensor_dtype is not None:
|
| 1366 |
+
return one_plus_dim_tensor_dtype
|
| 1367 |
+
|
| 1368 |
+
return zero_dim_tensor_dtype
|
| 1369 |
+
|
| 1370 |
+
if highest_type is float:
|
| 1371 |
+
result_dtype = _find_highest_dtype_filtered(args, is_float_dtype)
|
| 1372 |
+
result_dtype = (
|
| 1373 |
+
torch.get_default_dtype() if result_dtype is None else result_dtype
|
| 1374 |
+
)
|
| 1375 |
+
elif highest_type is complex:
|
| 1376 |
+
result_dtype = _find_highest_dtype_filtered(
|
| 1377 |
+
args,
|
| 1378 |
+
lambda x: is_float_dtype(x) or is_complex_dtype(x),
|
| 1379 |
+
float_as_complex=True,
|
| 1380 |
+
)
|
| 1381 |
+
if result_dtype is None:
|
| 1382 |
+
result_dtype = corresponding_complex_dtype(torch.get_default_dtype())
|
| 1383 |
+
elif highest_type is int:
|
| 1384 |
+
result_dtype = _find_highest_dtype_filtered(args, is_integer_dtype)
|
| 1385 |
+
result_dtype = torch.long if result_dtype is None else result_dtype
|
| 1386 |
+
else:
|
| 1387 |
+
# highest_type is bool
|
| 1388 |
+
result_dtype = torch.bool
|
| 1389 |
+
|
| 1390 |
+
if type_promotion_kind is ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT:
|
| 1391 |
+
return get_computation_dtype(result_dtype), result_dtype
|
| 1392 |
+
elif type_promotion_kind is ELEMENTWISE_TYPE_PROMOTION_KIND.NO_OPMATH:
|
| 1393 |
+
return result_dtype, result_dtype
|
| 1394 |
+
elif type_promotion_kind is ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT:
|
| 1395 |
+
if is_integer_dtype(result_dtype) or is_boolean_dtype(result_dtype):
|
| 1396 |
+
result_dtype = torch.get_default_dtype()
|
| 1397 |
+
return get_computation_dtype(result_dtype), result_dtype
|
| 1398 |
+
elif type_promotion_kind is ELEMENTWISE_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT:
|
| 1399 |
+
# NOTE: computation can still occur in a complex dtype
|
| 1400 |
+
computation_dtype = get_computation_dtype(result_dtype)
|
| 1401 |
+
if is_complex_dtype(result_dtype):
|
| 1402 |
+
result_dtype = corresponding_real_dtype(result_dtype)
|
| 1403 |
+
return computation_dtype, result_dtype
|
| 1404 |
+
elif type_promotion_kind is ELEMENTWISE_TYPE_PROMOTION_KIND.BOOL_TO_LONG:
|
| 1405 |
+
if is_boolean_dtype(result_dtype):
|
| 1406 |
+
return torch.long, torch.long
|
| 1407 |
+
return get_computation_dtype(result_dtype), result_dtype
|
| 1408 |
+
elif type_promotion_kind is ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL:
|
| 1409 |
+
return get_computation_dtype(result_dtype), torch.bool
|
| 1410 |
+
else:
|
| 1411 |
+
raise ValueError(f"Unknown type promotion kind {str(type_promotion_kind)}")
|
| 1412 |
+
|
| 1413 |
+
|
| 1414 |
+
def reduction_dtypes(
|
| 1415 |
+
arg,
|
| 1416 |
+
output_dtype_kind: REDUCTION_OUTPUT_TYPE_KIND,
|
| 1417 |
+
dtype: Optional[torch.dtype] = None,
|
| 1418 |
+
) -> Tuple[torch.dtype, Optional[torch.dtype]]:
|
| 1419 |
+
# even though some reductions, like amin or amax, don't strictly require type promotion,
|
| 1420 |
+
# all the math ops (including comparisons) are still defined only for a computation type,
|
| 1421 |
+
# so promotion will still happen. We are doing it explicitly here
|
| 1422 |
+
inp_dtype = dtype if dtype is not None else arg.dtype
|
| 1423 |
+
computation_dtype = get_computation_dtype(inp_dtype)
|
| 1424 |
+
if (
|
| 1425 |
+
output_dtype_kind == REDUCTION_OUTPUT_TYPE_KIND.SAME
|
| 1426 |
+
or output_dtype_kind == REDUCTION_OUTPUT_TYPE_KIND.COMPLEX_TO_FLOAT
|
| 1427 |
+
):
|
| 1428 |
+
result_dtype = dtype if dtype else arg.dtype
|
| 1429 |
+
if (
|
| 1430 |
+
output_dtype_kind == REDUCTION_OUTPUT_TYPE_KIND.COMPLEX_TO_FLOAT
|
| 1431 |
+
and is_complex_dtype(result_dtype)
|
| 1432 |
+
):
|
| 1433 |
+
result_dtype = corresponding_real_dtype(result_dtype)
|
| 1434 |
+
elif output_dtype_kind == REDUCTION_OUTPUT_TYPE_KIND.KEEP_PROMOTED_TYPE:
|
| 1435 |
+
result_dtype = None
|
| 1436 |
+
else: # ALWAYS_BOOL
|
| 1437 |
+
result_dtype = torch.bool
|
| 1438 |
+
return computation_dtype, result_dtype
|
| 1439 |
+
|
| 1440 |
+
|
| 1441 |
+
# This function's logic is borrowed from the following functions defined in C++:
|
| 1442 |
+
# batched_matrix_contiguous_strides and contiguous_strides
|
| 1443 |
+
def make_contiguous_strides_for(
|
| 1444 |
+
shape: ShapeType, row_major: bool = True
|
| 1445 |
+
) -> Tuple[int, ...]:
|
| 1446 |
+
"""
|
| 1447 |
+
Returns the strides of a contiguous tensor if row_major
|
| 1448 |
+
If row_major=True, it returns the strides of a contiguous batch of Fortran-contiguous matrices
|
| 1449 |
+
This is often used when calling external libraries like BLAS/LAPACK/cuSolver...
|
| 1450 |
+
"""
|
| 1451 |
+
# contiguous_strides from c10/util/strides.h
|
| 1452 |
+
validate_shape(shape)
|
| 1453 |
+
if not shape:
|
| 1454 |
+
return ()
|
| 1455 |
+
|
| 1456 |
+
multiplier = 1
|
| 1457 |
+
strides = []
|
| 1458 |
+
for l in reversed(shape):
|
| 1459 |
+
strides.append(multiplier)
|
| 1460 |
+
multiplier *= sym_max(l, 1)
|
| 1461 |
+
|
| 1462 |
+
result = tuple(reversed(strides))
|
| 1463 |
+
|
| 1464 |
+
# batched_matrix_contiguous_strides from aten/src/ATen/native/LinearAlgebraUtils.h
|
| 1465 |
+
if row_major:
|
| 1466 |
+
return result
|
| 1467 |
+
else:
|
| 1468 |
+
if len(shape) < 2:
|
| 1469 |
+
return result
|
| 1470 |
+
return result[:-2] + (1, max(shape[-2], 1))
|
| 1471 |
+
|
| 1472 |
+
|
| 1473 |
+
def make_channels_last_1d_strides_for(shape: ShapeType) -> Tuple[int, ...]:
|
| 1474 |
+
torch._check(
|
| 1475 |
+
len(shape) == 3,
|
| 1476 |
+
lambda: "Only tensors of rank 3 can use the channels_last_1d memory format",
|
| 1477 |
+
)
|
| 1478 |
+
|
| 1479 |
+
multiplier = 1
|
| 1480 |
+
strides = [0] * 3
|
| 1481 |
+
for idx in (1, -1, 0):
|
| 1482 |
+
# NOTE: intentionally divergence from make_contiguous_strides_for
|
| 1483 |
+
# This is consistent with eager
|
| 1484 |
+
strides[idx] = multiplier
|
| 1485 |
+
multiplier *= shape[idx]
|
| 1486 |
+
|
| 1487 |
+
return tuple(strides)
|
| 1488 |
+
|
| 1489 |
+
|
| 1490 |
+
def make_channels_last_2d_strides_for(shape: ShapeType) -> Tuple[int, ...]:
|
| 1491 |
+
# TODO: maybe inform the user of channels_last_3d if rank of the tensor is 5?
|
| 1492 |
+
torch._check(
|
| 1493 |
+
len(shape) == 4,
|
| 1494 |
+
lambda: "Only tensors of rank 4 can use the channels_last memory format",
|
| 1495 |
+
)
|
| 1496 |
+
|
| 1497 |
+
multiplier = 1
|
| 1498 |
+
strides = [0] * 4
|
| 1499 |
+
for idx in (1, -1, -2, 0):
|
| 1500 |
+
# NOTE: intentionally divergence from make_contiguous_strides_for
|
| 1501 |
+
# This is consistent with eager
|
| 1502 |
+
strides[idx] = multiplier
|
| 1503 |
+
multiplier *= shape[idx]
|
| 1504 |
+
|
| 1505 |
+
return tuple(strides)
|
| 1506 |
+
|
| 1507 |
+
|
| 1508 |
+
def make_channels_last_3d_strides_for(shape: ShapeType) -> Tuple[int, ...]:
|
| 1509 |
+
torch._check(
|
| 1510 |
+
len(shape) == 5,
|
| 1511 |
+
lambda: "Only tensors of rank 5 can use the channels_last_3d memory format",
|
| 1512 |
+
)
|
| 1513 |
+
|
| 1514 |
+
multiplier = 1
|
| 1515 |
+
strides = [0] * 5
|
| 1516 |
+
for idx in (1, -1, -2, -3, 0):
|
| 1517 |
+
# NOTE: intentionally divergence from make_contiguous_strides_for
|
| 1518 |
+
# This is consistent with eager
|
| 1519 |
+
strides[idx] = multiplier
|
| 1520 |
+
multiplier *= shape[idx]
|
| 1521 |
+
|
| 1522 |
+
return tuple(strides)
|
| 1523 |
+
|
| 1524 |
+
|
| 1525 |
+
def make_channels_last_strides_for(shape: ShapeType) -> Tuple[int, ...]:
|
| 1526 |
+
ndim = len(shape) if isinstance(shape, Sequence) else 1
|
| 1527 |
+
if ndim == 3:
|
| 1528 |
+
return make_channels_last_1d_strides_for(shape)
|
| 1529 |
+
elif ndim == 4:
|
| 1530 |
+
return make_channels_last_2d_strides_for(shape)
|
| 1531 |
+
elif ndim == 5:
|
| 1532 |
+
return make_channels_last_3d_strides_for(shape)
|
| 1533 |
+
else:
|
| 1534 |
+
raise RuntimeError(
|
| 1535 |
+
f"no channels last format strides exist in {ndim} dimensions"
|
| 1536 |
+
)
|
| 1537 |
+
|
| 1538 |
+
|
| 1539 |
+
def compute_reduction_output_shape(
|
| 1540 |
+
shape: ShapeType, dimensions: Sequence
|
| 1541 |
+
) -> Tuple[int, ...]:
|
| 1542 |
+
for idx in dimensions:
|
| 1543 |
+
validate_idx(len(shape), idx)
|
| 1544 |
+
|
| 1545 |
+
new_shape = []
|
| 1546 |
+
for idx in range(len(shape)):
|
| 1547 |
+
if idx in dimensions:
|
| 1548 |
+
continue
|
| 1549 |
+
|
| 1550 |
+
new_shape.append(shape[idx])
|
| 1551 |
+
|
| 1552 |
+
return tuple(new_shape)
|
| 1553 |
+
|
| 1554 |
+
|
| 1555 |
+
def validate_no_repeating_dims(dims: Sequence):
|
| 1556 |
+
if len(dims) != len(set(dims)):
|
| 1557 |
+
raise RuntimeError("duplicate value in the list of dims")
|
| 1558 |
+
|
| 1559 |
+
|
| 1560 |
+
def reduction_dims(shape: ShapeType, dims: Optional[Sequence]) -> Tuple[int, ...]:
|
| 1561 |
+
if dims is None:
|
| 1562 |
+
return tuple(range(len(shape)))
|
| 1563 |
+
dims = tuple(canonicalize_dim(len(shape), idx) for idx in dims)
|
| 1564 |
+
validate_no_repeating_dims(dims)
|
| 1565 |
+
return dims
|
| 1566 |
+
|
| 1567 |
+
|
| 1568 |
+
def set_correction(
|
| 1569 |
+
unbiased: Optional[bool] = None,
|
| 1570 |
+
correction: Optional[NumberType] = None,
|
| 1571 |
+
) -> float:
|
| 1572 |
+
if correction is not None and unbiased is not None:
|
| 1573 |
+
raise RuntimeError("cannot specify both correction and unbiased arguments")
|
| 1574 |
+
elif correction is None and unbiased is None:
|
| 1575 |
+
correction = 1.0
|
| 1576 |
+
elif correction is None and unbiased is not None:
|
| 1577 |
+
correction = 0.0 if unbiased is False else 1.0
|
| 1578 |
+
# NB: we don't actually support symint here, but it's harmless to accept
|
| 1579 |
+
if not isinstance(correction, (IntLike, FloatLike)):
|
| 1580 |
+
raise ValueError("correction argument should be integer or float")
|
| 1581 |
+
if correction < 0:
|
| 1582 |
+
raise ValueError("correction argument should be non-negative")
|
| 1583 |
+
return sym_float(correction)
|
| 1584 |
+
|
| 1585 |
+
|
| 1586 |
+
def compute_required_storage_length(
|
| 1587 |
+
shape: ShapeType, strides: StrideType, storage_offset: int
|
| 1588 |
+
) -> int:
|
| 1589 |
+
"""Computes the minimum storage size to hold the given tensor geometry.
|
| 1590 |
+
|
| 1591 |
+
Example
|
| 1592 |
+
=======
|
| 1593 |
+
|
| 1594 |
+
This is the size of a newly allocated tensor's storage, in units of elements
|
| 1595 |
+
|
| 1596 |
+
>>> t = torch.empty((10, 20))
|
| 1597 |
+
>>> compute_required_storage_length(t.shape, t.stride(), t.storage_offset())
|
| 1598 |
+
200
|
| 1599 |
+
|
| 1600 |
+
>>> # xdoctest: +SKIP(failing)
|
| 1601 |
+
>>> t2 = torch.empty_strided((1, 2, 3), (5, 7, 11))
|
| 1602 |
+
>>> size = compute_required_storage_length(t2.shape, t2.stride(), t2.storage_offset())
|
| 1603 |
+
>>> size == t.storage().size()
|
| 1604 |
+
True
|
| 1605 |
+
|
| 1606 |
+
A valid tensor may have a larger storage size, but never smaller
|
| 1607 |
+
|
| 1608 |
+
>>> slice = torch.empty(100)[20:40]
|
| 1609 |
+
>>> slice.storage().size()
|
| 1610 |
+
100
|
| 1611 |
+
|
| 1612 |
+
>>> compute_required_storage_length(slice.shape, slice.stride(), slice.storage_offset())
|
| 1613 |
+
40
|
| 1614 |
+
|
| 1615 |
+
"""
|
| 1616 |
+
# Short-circuits if the shape has no elements
|
| 1617 |
+
if reduce(operator.mul, shape, 1) == 0:
|
| 1618 |
+
return 0
|
| 1619 |
+
|
| 1620 |
+
max_offset = sum((x - 1) * y for x, y in zip(shape, strides))
|
| 1621 |
+
# +1 to account for the first element which offsets are taken from
|
| 1622 |
+
return 1 + storage_offset + max_offset
|
| 1623 |
+
|
| 1624 |
+
|
| 1625 |
+
def check_in_bounds_for_storage(
|
| 1626 |
+
a: torch.TypedStorage, shape: ShapeType, strides: StrideType, storage_offset: int
|
| 1627 |
+
):
|
| 1628 |
+
"""
|
| 1629 |
+
Determines if the given shape, strides, and offset are valid for the given storage.
|
| 1630 |
+
"""
|
| 1631 |
+
|
| 1632 |
+
required_length = compute_required_storage_length(shape, strides, storage_offset)
|
| 1633 |
+
if a.size() < required_length:
|
| 1634 |
+
msg = (
|
| 1635 |
+
"Can't view a storage of size {} with an offset of {}, shape of {}, and strides of {}, "
|
| 1636 |
+
"which requires a storage of size {}".format(
|
| 1637 |
+
a.size(), storage_offset, str(shape), str(strides), required_length
|
| 1638 |
+
)
|
| 1639 |
+
)
|
| 1640 |
+
raise ValueError(msg)
|
| 1641 |
+
|
| 1642 |
+
|
| 1643 |
+
# NOTE: This function should ideally be removed, but some Meta internal models
|
| 1644 |
+
# packaged with `torch.package` are using it, so it will have to be removed
|
| 1645 |
+
# at some point in the future when those models no longer use this function.
|
| 1646 |
+
def check(
|
| 1647 |
+
b: bool, s: Callable[[], str], exc_type: Type[Exception] = RuntimeError
|
| 1648 |
+
) -> None:
|
| 1649 |
+
"""
|
| 1650 |
+
Helper function for raising an error_type (default: RuntimeError) if a boolean condition fails.
|
| 1651 |
+
Error message is a callable producing a string (to avoid wasting time
|
| 1652 |
+
string formatting in non-error case, and also to make it easier for torchdynamo
|
| 1653 |
+
to trace.)
|
| 1654 |
+
|
| 1655 |
+
.. note:: This function is planned for removal in the future. Please use
|
| 1656 |
+
`torch._check*` functions instead.
|
| 1657 |
+
"""
|
| 1658 |
+
warnings.warn(
|
| 1659 |
+
DeprecationWarning(
|
| 1660 |
+
"'torch._prims_common.check' will be removed in the future. Please use "
|
| 1661 |
+
"'torch._check*' functions instead"
|
| 1662 |
+
)
|
| 1663 |
+
)
|
| 1664 |
+
torch._check_with(exc_type, b, s)
|
| 1665 |
+
|
| 1666 |
+
|
| 1667 |
+
# This combines is_channels_last_strides_2d and is_channels_last_strides_3d in
|
| 1668 |
+
# c10/core/MemoryFormat.h into one function
|
| 1669 |
+
def are_strides_like_channels_last(
|
| 1670 |
+
shape: Sequence[int], strides: Sequence[int]
|
| 1671 |
+
) -> bool:
|
| 1672 |
+
ndim = len(shape)
|
| 1673 |
+
|
| 1674 |
+
if ndim == 4:
|
| 1675 |
+
# Check for channels_last_2d
|
| 1676 |
+
dim_order = [1, 3, 2, 0]
|
| 1677 |
+
elif ndim == 5:
|
| 1678 |
+
# Check for channels_last_3d
|
| 1679 |
+
dim_order = [1, 4, 3, 2, 0]
|
| 1680 |
+
else:
|
| 1681 |
+
return False
|
| 1682 |
+
|
| 1683 |
+
if strides[1] == 0:
|
| 1684 |
+
return False
|
| 1685 |
+
|
| 1686 |
+
min = 0
|
| 1687 |
+
for d in dim_order:
|
| 1688 |
+
if shape[d] == 0:
|
| 1689 |
+
return False
|
| 1690 |
+
if strides[d] < min:
|
| 1691 |
+
return False
|
| 1692 |
+
if d == 0 and min == strides[1]:
|
| 1693 |
+
return False
|
| 1694 |
+
min = strides[d]
|
| 1695 |
+
if strides[d] > 1:
|
| 1696 |
+
min *= shape[d]
|
| 1697 |
+
return True
|
| 1698 |
+
|
| 1699 |
+
|
| 1700 |
+
def suggest_memory_format(x: TensorLikeType) -> torch.memory_format:
|
| 1701 |
+
if x.layout != torch.strided:
|
| 1702 |
+
return torch.contiguous_format
|
| 1703 |
+
|
| 1704 |
+
if are_strides_like_channels_last(x.shape, x.stride()):
|
| 1705 |
+
return torch.channels_last if x.ndim == 4 else torch.channels_last_3d
|
| 1706 |
+
|
| 1707 |
+
return torch.contiguous_format
|
| 1708 |
+
|
| 1709 |
+
|
| 1710 |
+
def prod(xs: Sequence[NumberType]) -> NumberType:
|
| 1711 |
+
"""Product of elements in input sequence. Returns 1 for empty sequence"""
|
| 1712 |
+
return reduce(operator.mul, xs, 1)
|
| 1713 |
+
|
| 1714 |
+
|
| 1715 |
+
def is_expandable_to(shape: ShapeType, desired: ShapeType) -> bool:
|
| 1716 |
+
"""Checks if a shape can be expanded to another shape.
|
| 1717 |
+
This is equivalent to checking if the two shapes are broadcastable.
|
| 1718 |
+
"""
|
| 1719 |
+
# This is a Python implementation of
|
| 1720 |
+
# aten/src/ATen/ExpandUtils.h:is_expandable_to
|
| 1721 |
+
if len(shape) > len(desired):
|
| 1722 |
+
return False
|
| 1723 |
+
for i in range(len(shape)):
|
| 1724 |
+
if shape[-i - 1] != desired[-i - 1] and shape[-i - 1] != 1:
|
| 1725 |
+
return False
|
| 1726 |
+
return True
|
| 1727 |
+
|
| 1728 |
+
|
| 1729 |
+
def mask_tensor(mask: TensorLikeType, t: TensorLikeType):
|
| 1730 |
+
"""
|
| 1731 |
+
Similar to torch.where(mask, t, 0) but if t is boolean,
|
| 1732 |
+
result is also boolean and not promoted to int.
|
| 1733 |
+
"""
|
| 1734 |
+
# torch.where(mask, t, False) is equivalent
|
| 1735 |
+
# but feels hacky and might break in the future
|
| 1736 |
+
if t.dtype is torch.bool:
|
| 1737 |
+
return mask.logical_and(t)
|
| 1738 |
+
else:
|
| 1739 |
+
return torch.where(mask, t, 0)
|
| 1740 |
+
|
| 1741 |
+
|
| 1742 |
+
def get_aten_op(fn: Callable, name: str):
|
| 1743 |
+
"""
|
| 1744 |
+
Given the __module__ of reference and its name, it returns
|
| 1745 |
+
(our best guess of) the ATen name of the associated operation
|
| 1746 |
+
|
| 1747 |
+
Note: In ATen, the __name__ of a function within a module often
|
| 1748 |
+
starts by the module name. E.g. linalg_eigh, or special_zeta
|
| 1749 |
+
"""
|
| 1750 |
+
module = fn.__module__
|
| 1751 |
+
prefix = "torch._refs"
|
| 1752 |
+
assert module.startswith(prefix)
|
| 1753 |
+
module = module[len(prefix) :]
|
| 1754 |
+
# We want to go from .special / .nn.functional
|
| 1755 |
+
# to special and special_ / nn_functional_
|
| 1756 |
+
if module:
|
| 1757 |
+
module = module[1:]
|
| 1758 |
+
module = module.replace(".", "_")
|
| 1759 |
+
module = module + "_"
|
| 1760 |
+
return getattr(torch._ops.ops.aten, f"{module}{name}")
|
| 1761 |
+
|
| 1762 |
+
|
| 1763 |
+
def dtype_or_default(dtype: Optional[torch.dtype]) -> torch.dtype:
|
| 1764 |
+
return dtype if dtype is not None else torch.get_default_dtype()
|
| 1765 |
+
|
| 1766 |
+
|
| 1767 |
+
def device_or_default(device: Optional[torch.device]) -> torch.device:
|
| 1768 |
+
return device if device is not None else torch.device("cpu")
|
| 1769 |
+
|
| 1770 |
+
|
| 1771 |
+
def layout_or_default(layout: Optional[torch.layout]) -> torch.layout:
|
| 1772 |
+
return layout if layout is not None else torch.strided
|
| 1773 |
+
|
| 1774 |
+
|
| 1775 |
+
def clone_preserve_strides(x):
|
| 1776 |
+
needed_size = compute_required_storage_length(
|
| 1777 |
+
x.size(), x.stride(), x.storage_offset()
|
| 1778 |
+
)
|
| 1779 |
+
# Our eager implementations for *_scatter ops are all primitives w.r.t autograd,
|
| 1780 |
+
# so these as_strided() calls are not seen by autograd.
|
| 1781 |
+
# We need to mimic this behavior in our ref/prim implementations.
|
| 1782 |
+
# TODO: a better way to handle this would be with a new op, "_unsafe_as_strided"
|
| 1783 |
+
# We should revisit this when we add a compositional as_strided op,
|
| 1784 |
+
# and also as part of https://github.com/pytorch/pytorch/issues/90507
|
| 1785 |
+
try:
|
| 1786 |
+
old = torch._C._dispatch_tls_is_dispatch_key_excluded(
|
| 1787 |
+
torch._C.DispatchKey.ADInplaceOrView
|
| 1788 |
+
)
|
| 1789 |
+
torch._C._dispatch_tls_set_dispatch_key_excluded(
|
| 1790 |
+
torch._C.DispatchKey.ADInplaceOrView, True
|
| 1791 |
+
)
|
| 1792 |
+
buffer = torch.as_strided(x, (needed_size,), (1,), 0).clone()
|
| 1793 |
+
return torch.as_strided(buffer, x.size(), x.stride(), x.storage_offset())
|
| 1794 |
+
finally:
|
| 1795 |
+
torch._C._dispatch_tls_set_dispatch_key_excluded(
|
| 1796 |
+
torch._C.DispatchKey.ADInplaceOrView, old
|
| 1797 |
+
)
|
| 1798 |
+
|
| 1799 |
+
|
| 1800 |
+
def alert_not_deterministic(caller: str):
|
| 1801 |
+
if torch.are_deterministic_algorithms_enabled():
|
| 1802 |
+
if torch.is_deterministic_algorithms_warn_only_enabled():
|
| 1803 |
+
warnings.warn(
|
| 1804 |
+
f"{caller} does not have a deterministic implementation, but you set "
|
| 1805 |
+
f"'torch.use_deterministic_algorithms(True, warn_only=True)'. "
|
| 1806 |
+
f"You can file an issue at https://github.com/pytorch/pytorch/issues "
|
| 1807 |
+
f"to help us prioritize adding deterministic support for this operation."
|
| 1808 |
+
)
|
| 1809 |
+
else:
|
| 1810 |
+
torch._check(
|
| 1811 |
+
False,
|
| 1812 |
+
lambda: (
|
| 1813 |
+
f"{caller} does not have a deterministic implementation, but you set "
|
| 1814 |
+
f"'torch.use_deterministic_algorithms(True)'. You can turn off "
|
| 1815 |
+
f"determinism just for this operation, or you can use the "
|
| 1816 |
+
f"'warn_only=True' option, if that's acceptable for your application. "
|
| 1817 |
+
f"You can also file an issue at https://github.com/pytorch/pytorch/issues "
|
| 1818 |
+
f"to help us prioritize adding deterministic support for this operation."
|
| 1819 |
+
),
|
| 1820 |
+
)
|
| 1821 |
+
|
| 1822 |
+
|
| 1823 |
+
class CUDARngStateHelper:
|
| 1824 |
+
@staticmethod
|
| 1825 |
+
def get_torch_state_as_tuple(fake_mode=nullcontext()):
|
| 1826 |
+
if not torch.cuda.is_available():
|
| 1827 |
+
raise RuntimeError("CUDA not available")
|
| 1828 |
+
|
| 1829 |
+
with fake_mode:
|
| 1830 |
+
seed = torch.tensor(torch.cuda.initial_seed())
|
| 1831 |
+
offset = torch.tensor(torch.cuda._get_rng_state_offset())
|
| 1832 |
+
return seed, offset
|
| 1833 |
+
|
| 1834 |
+
@staticmethod
|
| 1835 |
+
def set_torch_state_tensor(seed, offset):
|
| 1836 |
+
# Rng state is [64-bit seed, 64-bit offset]
|
| 1837 |
+
seed_portion = seed.reshape([1]).view(torch.uint8)
|
| 1838 |
+
offset_portion = offset.reshape([1]).view(torch.uint8)
|
| 1839 |
+
new_state = torch.cat([seed_portion, offset_portion])
|
| 1840 |
+
torch.cuda.set_rng_state(new_state)
|
| 1841 |
+
|
| 1842 |
+
@staticmethod
|
| 1843 |
+
def set_new_offset(relative_offset):
|
| 1844 |
+
torch.cuda._set_rng_state_offset(relative_offset.item())
|
llava_next/lib/python3.10/site-packages/torch/amp/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (262 Bytes). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/amp/autocast_mode.py
ADDED
|
@@ -0,0 +1,435 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
import warnings
|
| 3 |
+
|
| 4 |
+
from typing import Any, Optional
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
from torch.types import _dtype
|
| 8 |
+
|
| 9 |
+
__all__ = ["autocast_decorator", "autocast"]
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def autocast_decorator(autocast_instance, func):
|
| 13 |
+
@functools.wraps(func)
|
| 14 |
+
def decorate_autocast(*args, **kwargs):
|
| 15 |
+
with autocast_instance:
|
| 16 |
+
return func(*args, **kwargs)
|
| 17 |
+
|
| 18 |
+
decorate_autocast.__script_unsupported = "@autocast() decorator is not supported in script mode" # type: ignore[attr-defined]
|
| 19 |
+
return decorate_autocast
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class autocast:
|
| 23 |
+
r"""
|
| 24 |
+
Instances of :class:`autocast` serve as context managers or decorators that
|
| 25 |
+
allow regions of your script to run in mixed precision.
|
| 26 |
+
|
| 27 |
+
In these regions, ops run in an op-specific dtype chosen by autocast
|
| 28 |
+
to improve performance while maintaining accuracy.
|
| 29 |
+
See the :ref:`Autocast Op Reference<autocast-op-reference>` for details.
|
| 30 |
+
|
| 31 |
+
When entering an autocast-enabled region, Tensors may be any type.
|
| 32 |
+
You should not call ``half()`` or ``bfloat16()`` on your model(s) or inputs when using autocasting.
|
| 33 |
+
|
| 34 |
+
:class:`autocast` should wrap only the forward pass(es) of your network, including the loss
|
| 35 |
+
computation(s). Backward passes under autocast are not recommended.
|
| 36 |
+
Backward ops run in the same type that autocast used for corresponding forward ops.
|
| 37 |
+
|
| 38 |
+
Example for CUDA Devices::
|
| 39 |
+
|
| 40 |
+
# Creates model and optimizer in default precision
|
| 41 |
+
model = Net().cuda()
|
| 42 |
+
optimizer = optim.SGD(model.parameters(), ...)
|
| 43 |
+
|
| 44 |
+
for input, target in data:
|
| 45 |
+
optimizer.zero_grad()
|
| 46 |
+
|
| 47 |
+
# Enables autocasting for the forward pass (model + loss)
|
| 48 |
+
with torch.autocast(device_type="cuda"):
|
| 49 |
+
output = model(input)
|
| 50 |
+
loss = loss_fn(output, target)
|
| 51 |
+
|
| 52 |
+
# Exits the context manager before backward()
|
| 53 |
+
loss.backward()
|
| 54 |
+
optimizer.step()
|
| 55 |
+
|
| 56 |
+
See the :ref:`CUDA Automatic Mixed Precision examples<amp-examples>` for usage (along with gradient scaling)
|
| 57 |
+
in more complex scenarios (e.g., gradient penalty, multiple models/losses, custom autograd functions).
|
| 58 |
+
|
| 59 |
+
:class:`autocast` can also be used as a decorator, e.g., on the ``forward`` method of your model::
|
| 60 |
+
|
| 61 |
+
class AutocastModel(nn.Module):
|
| 62 |
+
...
|
| 63 |
+
@torch.autocast(device_type="cuda")
|
| 64 |
+
def forward(self, input):
|
| 65 |
+
...
|
| 66 |
+
|
| 67 |
+
Floating-point Tensors produced in an autocast-enabled region may be ``float16``.
|
| 68 |
+
After returning to an autocast-disabled region, using them with floating-point
|
| 69 |
+
Tensors of different dtypes may cause type mismatch errors. If so, cast the Tensor(s)
|
| 70 |
+
produced in the autocast region back to ``float32`` (or other dtype if desired).
|
| 71 |
+
If a Tensor from the autocast region is already ``float32``, the cast is a no-op,
|
| 72 |
+
and incurs no additional overhead.
|
| 73 |
+
CUDA Example::
|
| 74 |
+
|
| 75 |
+
# Creates some tensors in default dtype (here assumed to be float32)
|
| 76 |
+
a_float32 = torch.rand((8, 8), device="cuda")
|
| 77 |
+
b_float32 = torch.rand((8, 8), device="cuda")
|
| 78 |
+
c_float32 = torch.rand((8, 8), device="cuda")
|
| 79 |
+
d_float32 = torch.rand((8, 8), device="cuda")
|
| 80 |
+
|
| 81 |
+
with torch.autocast(device_type="cuda"):
|
| 82 |
+
# torch.mm is on autocast's list of ops that should run in float16.
|
| 83 |
+
# Inputs are float32, but the op runs in float16 and produces float16 output.
|
| 84 |
+
# No manual casts are required.
|
| 85 |
+
e_float16 = torch.mm(a_float32, b_float32)
|
| 86 |
+
# Also handles mixed input types
|
| 87 |
+
f_float16 = torch.mm(d_float32, e_float16)
|
| 88 |
+
|
| 89 |
+
# After exiting autocast, calls f_float16.float() to use with d_float32
|
| 90 |
+
g_float32 = torch.mm(d_float32, f_float16.float())
|
| 91 |
+
|
| 92 |
+
CPU Training Example::
|
| 93 |
+
|
| 94 |
+
# Creates model and optimizer in default precision
|
| 95 |
+
model = Net()
|
| 96 |
+
optimizer = optim.SGD(model.parameters(), ...)
|
| 97 |
+
|
| 98 |
+
for epoch in epochs:
|
| 99 |
+
for input, target in data:
|
| 100 |
+
optimizer.zero_grad()
|
| 101 |
+
|
| 102 |
+
# Runs the forward pass with autocasting.
|
| 103 |
+
with torch.autocast(device_type="cpu", dtype=torch.bfloat16):
|
| 104 |
+
output = model(input)
|
| 105 |
+
loss = loss_fn(output, target)
|
| 106 |
+
|
| 107 |
+
loss.backward()
|
| 108 |
+
optimizer.step()
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
CPU Inference Example::
|
| 112 |
+
|
| 113 |
+
# Creates model in default precision
|
| 114 |
+
model = Net().eval()
|
| 115 |
+
|
| 116 |
+
with torch.autocast(device_type="cpu", dtype=torch.bfloat16):
|
| 117 |
+
for input in data:
|
| 118 |
+
# Runs the forward pass with autocasting.
|
| 119 |
+
output = model(input)
|
| 120 |
+
|
| 121 |
+
CPU Inference Example with Jit Trace::
|
| 122 |
+
|
| 123 |
+
class TestModel(nn.Module):
|
| 124 |
+
def __init__(self, input_size, num_classes):
|
| 125 |
+
super().__init__()
|
| 126 |
+
self.fc1 = nn.Linear(input_size, num_classes)
|
| 127 |
+
def forward(self, x):
|
| 128 |
+
return self.fc1(x)
|
| 129 |
+
|
| 130 |
+
input_size = 2
|
| 131 |
+
num_classes = 2
|
| 132 |
+
model = TestModel(input_size, num_classes).eval()
|
| 133 |
+
|
| 134 |
+
# For now, we suggest to disable the Jit Autocast Pass,
|
| 135 |
+
# As the issue: https://github.com/pytorch/pytorch/issues/75956
|
| 136 |
+
torch._C._jit_set_autocast_mode(False)
|
| 137 |
+
|
| 138 |
+
with torch.cpu.amp.autocast(cache_enabled=False):
|
| 139 |
+
model = torch.jit.trace(model, torch.randn(1, input_size))
|
| 140 |
+
model = torch.jit.freeze(model)
|
| 141 |
+
# Models Run
|
| 142 |
+
for _ in range(3):
|
| 143 |
+
model(torch.randn(1, input_size))
|
| 144 |
+
|
| 145 |
+
Type mismatch errors *in* an autocast-enabled region are a bug; if this is what you observe,
|
| 146 |
+
please file an issue.
|
| 147 |
+
|
| 148 |
+
``autocast(enabled=False)`` subregions can be nested in autocast-enabled regions.
|
| 149 |
+
Locally disabling autocast can be useful, for example, if you want to force a subregion
|
| 150 |
+
to run in a particular ``dtype``. Disabling autocast gives you explicit control over
|
| 151 |
+
the execution type. In the subregion, inputs from the surrounding region
|
| 152 |
+
should be cast to ``dtype`` before use::
|
| 153 |
+
|
| 154 |
+
# Creates some tensors in default dtype (here assumed to be float32)
|
| 155 |
+
a_float32 = torch.rand((8, 8), device="cuda")
|
| 156 |
+
b_float32 = torch.rand((8, 8), device="cuda")
|
| 157 |
+
c_float32 = torch.rand((8, 8), device="cuda")
|
| 158 |
+
d_float32 = torch.rand((8, 8), device="cuda")
|
| 159 |
+
|
| 160 |
+
with torch.autocast(device_type="cuda"):
|
| 161 |
+
e_float16 = torch.mm(a_float32, b_float32)
|
| 162 |
+
with torch.autocast(device_type="cuda", enabled=False):
|
| 163 |
+
# Calls e_float16.float() to ensure float32 execution
|
| 164 |
+
# (necessary because e_float16 was created in an autocasted region)
|
| 165 |
+
f_float32 = torch.mm(c_float32, e_float16.float())
|
| 166 |
+
|
| 167 |
+
# No manual casts are required when re-entering the autocast-enabled region.
|
| 168 |
+
# torch.mm again runs in float16 and produces float16 output, regardless of input types.
|
| 169 |
+
g_float16 = torch.mm(d_float32, f_float32)
|
| 170 |
+
|
| 171 |
+
The autocast state is thread-local. If you want it enabled in a new thread, the context manager or decorator
|
| 172 |
+
must be invoked in that thread. This affects :class:`torch.nn.DataParallel` and
|
| 173 |
+
:class:`torch.nn.parallel.DistributedDataParallel` when used with more than one GPU per process
|
| 174 |
+
(see :ref:`Working with Multiple GPUs<amp-multigpu>`).
|
| 175 |
+
|
| 176 |
+
Args:
|
| 177 |
+
device_type(str, required): Device type to use. Possible values are: 'cuda', 'cpu', 'xpu' and 'hpu'.
|
| 178 |
+
The type is the same as the `type` attribute of a :class:`torch.device`.
|
| 179 |
+
Thus, you may obtain the device type of a tensor using `Tensor.device.type`.
|
| 180 |
+
enabled(bool, optional): Whether autocasting should be enabled in the region.
|
| 181 |
+
Default: ``True``
|
| 182 |
+
dtype(torch_dtype, optional): Whether to use torch.float16 or torch.bfloat16.
|
| 183 |
+
cache_enabled(bool, optional): Whether the weight cache inside autocast should be enabled.
|
| 184 |
+
Default: ``True``
|
| 185 |
+
"""
|
| 186 |
+
|
| 187 |
+
def __init__(
|
| 188 |
+
self,
|
| 189 |
+
device_type: str,
|
| 190 |
+
dtype: Optional[_dtype] = None,
|
| 191 |
+
enabled: bool = True,
|
| 192 |
+
cache_enabled: Optional[bool] = None,
|
| 193 |
+
):
|
| 194 |
+
if torch._jit_internal.is_scripting():
|
| 195 |
+
self._enabled = enabled
|
| 196 |
+
self.device = device_type
|
| 197 |
+
self.fast_dtype = dtype
|
| 198 |
+
# TODO: support get_autocast_gpu/cpu_dtype
|
| 199 |
+
assert dtype is not None
|
| 200 |
+
return
|
| 201 |
+
self.device = device_type
|
| 202 |
+
self.custom_backend_name = torch._C._get_privateuse1_backend_name()
|
| 203 |
+
if self.device == "cuda":
|
| 204 |
+
self.fast_dtype = torch.get_autocast_gpu_dtype()
|
| 205 |
+
elif self.device == "cpu":
|
| 206 |
+
self.fast_dtype = torch.get_autocast_cpu_dtype()
|
| 207 |
+
elif self.device == "xpu":
|
| 208 |
+
self.fast_dtype = torch.xpu.get_autocast_xpu_dtype() # type: ignore[attr-defined]
|
| 209 |
+
elif self.device == "ipu":
|
| 210 |
+
self.fast_dtype = torch.get_autocast_ipu_dtype() # type: ignore[attr-defined]
|
| 211 |
+
elif self.device == "hpu":
|
| 212 |
+
self.fast_dtype = torch.hpu.get_autocast_hpu_dtype() # type: ignore[attr-defined]
|
| 213 |
+
elif self.device == "xla":
|
| 214 |
+
self.fast_dtype = torch.get_autocast_xla_dtype() # type: ignore[attr-defined]
|
| 215 |
+
elif self.device == self.custom_backend_name:
|
| 216 |
+
necessary_funcs = [
|
| 217 |
+
"is_autocast_enabled",
|
| 218 |
+
"set_autocast_enabled",
|
| 219 |
+
"get_autocast_dtype",
|
| 220 |
+
"set_autocast_dtype",
|
| 221 |
+
"get_amp_supported_dtype",
|
| 222 |
+
]
|
| 223 |
+
message = f"Tried to use AMP with the `{self.custom_backend_name}` backend, but the backend has not "
|
| 224 |
+
message += "registered a module or the module miss some necessary funcs. The backend should register "
|
| 225 |
+
message += "a module by `torch._register_device_module`, and the module must have these funcs: \n"
|
| 226 |
+
message += "`is_autocast_enabled() -> bool`, `set_autocast_enabled(bool) -> None`, "
|
| 227 |
+
message += "`get_autocast_dtype() -> torch.dtype`, `set_autocast_dtype(torch.dtype) "
|
| 228 |
+
message += (
|
| 229 |
+
"-> None` and `get_amp_supported_dtype() -> List[torch.dtype]`. \n"
|
| 230 |
+
)
|
| 231 |
+
|
| 232 |
+
assert hasattr(torch, self.custom_backend_name), message
|
| 233 |
+
self.custom_device_mod = getattr(torch, self.custom_backend_name)
|
| 234 |
+
for func in necessary_funcs:
|
| 235 |
+
assert hasattr(self.custom_device_mod, func), (
|
| 236 |
+
message + f"But the func `{func}` is missing. \n"
|
| 237 |
+
)
|
| 238 |
+
|
| 239 |
+
self.fast_dtype = self.custom_device_mod.get_autocast_dtype()
|
| 240 |
+
else:
|
| 241 |
+
raise RuntimeError(
|
| 242 |
+
f"User specified an unsupported autocast device_type '{self.device}'"
|
| 243 |
+
)
|
| 244 |
+
self._cache_enabled = torch.is_autocast_cache_enabled()
|
| 245 |
+
if (
|
| 246 |
+
enabled
|
| 247 |
+
and torch.cuda.amp.common.amp_definitely_not_available()
|
| 248 |
+
and self.device == "cuda"
|
| 249 |
+
):
|
| 250 |
+
warnings.warn(
|
| 251 |
+
"User provided device_type of 'cuda', but CUDA is not available. Disabling"
|
| 252 |
+
)
|
| 253 |
+
enabled = False
|
| 254 |
+
if dtype is not None:
|
| 255 |
+
self.fast_dtype = dtype
|
| 256 |
+
if cache_enabled is not None:
|
| 257 |
+
self._cache_enabled = cache_enabled
|
| 258 |
+
|
| 259 |
+
if self.device == "cpu":
|
| 260 |
+
supported_dtype = [torch.bfloat16]
|
| 261 |
+
if self.fast_dtype not in supported_dtype:
|
| 262 |
+
error_message = "In CPU autocast, but the target dtype is not supported. Disabling autocast.\n"
|
| 263 |
+
error_message += (
|
| 264 |
+
"CPU Autocast only supports dtype of torch.bfloat16 currently."
|
| 265 |
+
)
|
| 266 |
+
warnings.warn(error_message)
|
| 267 |
+
enabled = False
|
| 268 |
+
elif self.device == "xpu":
|
| 269 |
+
supported_dtype = [torch.bfloat16, torch.float16]
|
| 270 |
+
if self.fast_dtype not in supported_dtype:
|
| 271 |
+
error_message = "In XPU autocast, but the target dtype is not supported. Disabling autocast.\n"
|
| 272 |
+
error_message += "XPU Autocast only supports dtypes of torch.bfloat16 and torch.float16 currently."
|
| 273 |
+
warnings.warn(error_message)
|
| 274 |
+
enabled = False
|
| 275 |
+
elif self.device == "ipu":
|
| 276 |
+
supported_dtypes = [torch.bfloat16, torch.float16]
|
| 277 |
+
if self.fast_dtype not in supported_dtypes:
|
| 278 |
+
error_message = "In IPU autocast, but the target dtype is not supported. Disabling autocast.\n"
|
| 279 |
+
error_message += "IPU Autocast only supports dtypes of torch.bfloat16 and torch.float16 currently."
|
| 280 |
+
warnings.warn(error_message)
|
| 281 |
+
enabled = False
|
| 282 |
+
elif self.device == "hpu":
|
| 283 |
+
supported_dtype = [torch.bfloat16, torch.float16]
|
| 284 |
+
if self.fast_dtype not in supported_dtype:
|
| 285 |
+
error_message = "In HPU autocast, but the target dtype is not supported. Disabling autocast.\n"
|
| 286 |
+
error_message += "HPU Autocast only supports dtypes of torch.bfloat16 and torch.float16 currently."
|
| 287 |
+
warnings.warn(error_message)
|
| 288 |
+
enabled = False
|
| 289 |
+
elif self.device == self.custom_backend_name:
|
| 290 |
+
supported_dtype = self.custom_device_mod.get_amp_supported_dtype()
|
| 291 |
+
if self.fast_dtype not in supported_dtype:
|
| 292 |
+
error_message = f"In {self.custom_backend_name} autocast, but the target dtype is not supported. "
|
| 293 |
+
error_message += f"Disabling autocast.\n {self.custom_backend_name} Autocast only supports dtypes of "
|
| 294 |
+
error_message += (
|
| 295 |
+
", ".join(str(dtype) for dtype in supported_dtype) + " currently."
|
| 296 |
+
)
|
| 297 |
+
warnings.warn(error_message)
|
| 298 |
+
enabled = False
|
| 299 |
+
elif self.device == "cuda":
|
| 300 |
+
if (
|
| 301 |
+
enabled
|
| 302 |
+
and self.fast_dtype == torch.bfloat16
|
| 303 |
+
and not torch.cuda.is_bf16_supported()
|
| 304 |
+
):
|
| 305 |
+
raise RuntimeError(
|
| 306 |
+
"Current CUDA Device does not support bfloat16. Please switch dtype to float16."
|
| 307 |
+
)
|
| 308 |
+
elif self.device == "xla":
|
| 309 |
+
supported_dtype = [torch.bfloat16]
|
| 310 |
+
if self.fast_dtype not in supported_dtype:
|
| 311 |
+
error_message = "In XLA autocast, but the target dtype is not supported. Disabling autocast.\n"
|
| 312 |
+
error_message += (
|
| 313 |
+
"XLA Autocast only supports dtype of torch.bfloat16 currently."
|
| 314 |
+
)
|
| 315 |
+
warnings.warn(error_message)
|
| 316 |
+
enabled = False
|
| 317 |
+
self._enabled = enabled
|
| 318 |
+
|
| 319 |
+
def __enter__(self):
|
| 320 |
+
if torch._jit_internal.is_scripting():
|
| 321 |
+
assert self.fast_dtype is not None
|
| 322 |
+
return self
|
| 323 |
+
|
| 324 |
+
self.prev_cache_enabled = torch.is_autocast_cache_enabled()
|
| 325 |
+
if self.device == "cpu":
|
| 326 |
+
self.prev = torch.is_autocast_cpu_enabled()
|
| 327 |
+
self.prev_fastdtype = torch.get_autocast_cpu_dtype()
|
| 328 |
+
torch.set_autocast_cpu_enabled(self._enabled)
|
| 329 |
+
torch.set_autocast_cpu_dtype(self.fast_dtype) # type: ignore[arg-type]
|
| 330 |
+
torch.autocast_increment_nesting()
|
| 331 |
+
elif self.device == "xpu":
|
| 332 |
+
self.prev = torch.xpu.is_autocast_xpu_enabled() # type: ignore[attr-defined]
|
| 333 |
+
self.prev_fastdtype = torch.xpu.get_autocast_xpu_dtype() # type: ignore[attr-defined]
|
| 334 |
+
torch.xpu.set_autocast_xpu_enabled(self._enabled) # type: ignore[attr-defined]
|
| 335 |
+
torch.xpu.set_autocast_xpu_dtype(self.fast_dtype) # type: ignore[attr-defined]
|
| 336 |
+
torch.autocast_increment_nesting()
|
| 337 |
+
elif self.device == "ipu":
|
| 338 |
+
self.prev = torch.is_autocast_ipu_enabled() # type: ignore[attr-defined]
|
| 339 |
+
self.prev_fastdtype = torch.get_autocast_ipu_dtype() # type: ignore[attr-defined]
|
| 340 |
+
torch.set_autocast_ipu_enabled(self._enabled) # type: ignore[attr-defined]
|
| 341 |
+
torch.set_autocast_ipu_dtype(self.fast_dtype) # type: ignore[attr-defined]
|
| 342 |
+
torch.autocast_increment_nesting()
|
| 343 |
+
elif self.device == "hpu":
|
| 344 |
+
self.prev = torch.hpu.is_autocast_hpu_enabled() # type: ignore[attr-defined]
|
| 345 |
+
self.prev_fastdtype = torch.hpu.get_autocast_hpu_dtype() # type: ignore[attr-defined]
|
| 346 |
+
torch.hpu.set_autocast_hpu_enabled(self._enabled) # type: ignore[attr-defined]
|
| 347 |
+
torch.hpu.set_autocast_hpu_dtype(self.fast_dtype) # type: ignore[attr-defined]
|
| 348 |
+
torch.autocast_increment_nesting()
|
| 349 |
+
elif self.device == "xla":
|
| 350 |
+
self.prev = torch.is_autocast_xla_enabled() # type: ignore[attr-defined]
|
| 351 |
+
self.prev_fastdtype = torch.get_autocast_xla_dtype() # type: ignore[attr-defined]
|
| 352 |
+
torch.set_autocast_xla_enabled(self._enabled) # type: ignore[attr-defined]
|
| 353 |
+
torch.set_autocast_xla_dtype(self.fast_dtype) # type: ignore[attr-defined]
|
| 354 |
+
torch.autocast_increment_nesting()
|
| 355 |
+
elif self.device == self.custom_backend_name:
|
| 356 |
+
self.prev = self.custom_device_mod.is_autocast_enabled()
|
| 357 |
+
self.prev_fastdtype = self.custom_device_mod.get_autocast_dtype()
|
| 358 |
+
self.custom_device_mod.set_autocast_enabled(self._enabled)
|
| 359 |
+
self.custom_device_mod.set_autocast_dtype(self.fast_dtype)
|
| 360 |
+
torch.autocast_increment_nesting()
|
| 361 |
+
else:
|
| 362 |
+
self.prev = torch.is_autocast_enabled()
|
| 363 |
+
self.prev_fastdtype = torch.get_autocast_gpu_dtype()
|
| 364 |
+
torch.set_autocast_gpu_dtype(self.fast_dtype) # type: ignore[arg-type]
|
| 365 |
+
torch.set_autocast_enabled(self._enabled)
|
| 366 |
+
torch.autocast_increment_nesting()
|
| 367 |
+
torch.set_autocast_cache_enabled(self._cache_enabled)
|
| 368 |
+
|
| 369 |
+
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any): # type: ignore[override]
|
| 370 |
+
if torch._jit_internal.is_scripting():
|
| 371 |
+
return
|
| 372 |
+
|
| 373 |
+
# Drop the cache when we exit to a nesting level that's outside any instance of autocast.
|
| 374 |
+
if self.device == "cpu":
|
| 375 |
+
if torch.autocast_decrement_nesting() == 0:
|
| 376 |
+
torch.clear_autocast_cache()
|
| 377 |
+
torch.set_autocast_cpu_enabled(self.prev)
|
| 378 |
+
torch.set_autocast_cpu_dtype(self.prev_fastdtype)
|
| 379 |
+
elif self.device == "xpu":
|
| 380 |
+
if torch.autocast_decrement_nesting() == 0:
|
| 381 |
+
torch.clear_autocast_cache()
|
| 382 |
+
torch.xpu.set_autocast_xpu_enabled(self.prev) # type: ignore[attr-defined]
|
| 383 |
+
torch.xpu.set_autocast_xpu_dtype(self.prev_fastdtype) # type: ignore[attr-defined]
|
| 384 |
+
elif self.device == "ipu":
|
| 385 |
+
if torch.autocast_decrement_nesting() == 0:
|
| 386 |
+
torch.clear_autocast_cache()
|
| 387 |
+
torch.set_autocast_ipu_enabled(self.prev) # type: ignore[attr-defined]
|
| 388 |
+
torch.set_autocast_ipu_dtype(self.prev_fastdtype) # type: ignore[attr-defined]
|
| 389 |
+
elif self.device == "hpu":
|
| 390 |
+
if torch.autocast_decrement_nesting() == 0:
|
| 391 |
+
torch.clear_autocast_cache()
|
| 392 |
+
torch.hpu.set_autocast_hpu_enabled(self.prev) # type: ignore[attr-defined]
|
| 393 |
+
torch.hpu.set_autocast_hpu_dtype(self.prev_fastdtype) # type: ignore[attr-defined]
|
| 394 |
+
elif self.device == "xla":
|
| 395 |
+
if torch.autocast_decrement_nesting() == 0:
|
| 396 |
+
torch.clear_autocast_cache()
|
| 397 |
+
torch.set_autocast_xla_enabled(self.prev) # type: ignore[attr-defined]
|
| 398 |
+
torch.set_autocast_xla_dtype(self.prev_fastdtype) # type: ignore[attr-defined]
|
| 399 |
+
elif self.device == self.custom_backend_name:
|
| 400 |
+
if torch.autocast_decrement_nesting() == 0:
|
| 401 |
+
torch.clear_autocast_cache()
|
| 402 |
+
self.custom_device_mod.set_autocast_enabled(self.prev)
|
| 403 |
+
self.custom_device_mod.set_autocast_dtype(self.prev_fastdtype)
|
| 404 |
+
else:
|
| 405 |
+
if torch.autocast_decrement_nesting() == 0:
|
| 406 |
+
torch.clear_autocast_cache()
|
| 407 |
+
torch.set_autocast_enabled(self.prev)
|
| 408 |
+
torch.set_autocast_gpu_dtype(self.prev_fastdtype)
|
| 409 |
+
torch.set_autocast_cache_enabled(self.prev_cache_enabled)
|
| 410 |
+
return False
|
| 411 |
+
|
| 412 |
+
def __call__(self, func):
|
| 413 |
+
if torch._jit_internal.is_scripting():
|
| 414 |
+
return func
|
| 415 |
+
return autocast_decorator(self, func)
|
| 416 |
+
|
| 417 |
+
|
| 418 |
+
# These functions aren't meant for public usage.
|
| 419 |
+
# They are what we trace into a graph during pre_dispatch tracing
|
| 420 |
+
# when we encounter an autocast context manager.
|
| 421 |
+
def _enter_autocast(*vals):
|
| 422 |
+
# For pre-dispatch tracing, if a TorchFunction mode is active, we'll want to trace this into a graph.
|
| 423 |
+
if torch._C._is_torch_function_mode_enabled():
|
| 424 |
+
return torch.overrides.handle_torch_function(
|
| 425 |
+
torch.amp._enter_autocast, [], *vals
|
| 426 |
+
)
|
| 427 |
+
mode = torch.amp.autocast(*vals)
|
| 428 |
+
mode.__enter__()
|
| 429 |
+
return mode
|
| 430 |
+
|
| 431 |
+
|
| 432 |
+
def _exit_autocast(mode):
|
| 433 |
+
if torch._C._is_torch_function_mode_enabled():
|
| 434 |
+
return torch.overrides.handle_torch_function(torch.amp._exit_autocast, [], mode)
|
| 435 |
+
mode.__exit__(None, None, None)
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/merge_matmul.py
ADDED
|
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
|
| 3 |
+
from torch.fx.node import Node
|
| 4 |
+
from torch.fx._symbolic_trace import symbolic_trace
|
| 5 |
+
from torch.fx.passes.tools_common import legalize_graph
|
| 6 |
+
import itertools
|
| 7 |
+
import operator
|
| 8 |
+
|
| 9 |
+
from typing import Dict, List, Tuple
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def split_result_tensors(
|
| 13 |
+
result: torch.Tensor, inputs: List[torch.Tensor]
|
| 14 |
+
) -> Tuple[torch.Tensor, ...]:
|
| 15 |
+
"""
|
| 16 |
+
A free function for use in the merge_matmul graph transformation below that
|
| 17 |
+
splits the output from a merged matmul into the individual results for each
|
| 18 |
+
input tensor.
|
| 19 |
+
|
| 20 |
+
Arguments:
|
| 21 |
+
result: The merged matmul result tensor.
|
| 22 |
+
inputs: The list of inputs that were merged into one for the matmul.
|
| 23 |
+
|
| 24 |
+
Returns:
|
| 25 |
+
List of matmul results for each input tensor.
|
| 26 |
+
"""
|
| 27 |
+
# When fx tracer is running, x.shape[0] will be torch.fx.Attribute but we
|
| 28 |
+
# need an int even when tracing
|
| 29 |
+
if isinstance(result, torch.fx.Proxy):
|
| 30 |
+
splits = [0] * len(inputs)
|
| 31 |
+
else:
|
| 32 |
+
splits = [x.shape[0] for x in inputs]
|
| 33 |
+
|
| 34 |
+
return torch.split(result, splits)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def may_depend_on(a: Node, b: Node, search_depth: int = 6):
|
| 38 |
+
"""
|
| 39 |
+
Determine if one node depends on another in a torch.fx.Graph.
|
| 40 |
+
|
| 41 |
+
Arguments:
|
| 42 |
+
a: The node that may have a dependency on b.
|
| 43 |
+
b: The node that a may have a dependency on.
|
| 44 |
+
search_depth: In the case of an indirect dependency, this function
|
| 45 |
+
searches upto this many nodes away in search of a
|
| 46 |
+
data dependency. If none is found, the function
|
| 47 |
+
makes the conservative assumption that there is a
|
| 48 |
+
dependency.
|
| 49 |
+
|
| 50 |
+
Returns:
|
| 51 |
+
True if a may depend on b, False if it definitely does not.
|
| 52 |
+
"""
|
| 53 |
+
# Equivalence is defined as dependence.
|
| 54 |
+
if a == b:
|
| 55 |
+
return True
|
| 56 |
+
|
| 57 |
+
# If a has no inputs, it cannot depend on b.
|
| 58 |
+
if len(a.all_input_nodes) == 0:
|
| 59 |
+
return False
|
| 60 |
+
|
| 61 |
+
# If the search depth has been exhausted and no conclusion has been
|
| 62 |
+
# reached, assume that there is a data dependency.
|
| 63 |
+
if search_depth == 0:
|
| 64 |
+
return True
|
| 65 |
+
|
| 66 |
+
# Recursively check all inputs of a.
|
| 67 |
+
for inp in a.all_input_nodes:
|
| 68 |
+
if may_depend_on(inp, b, search_depth - 1):
|
| 69 |
+
return True
|
| 70 |
+
|
| 71 |
+
return False
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def are_nodes_independent(nodes: List[Node]):
|
| 75 |
+
"""
|
| 76 |
+
Check if all of the given nodes are pairwise-data independent.
|
| 77 |
+
|
| 78 |
+
Arguments:
|
| 79 |
+
nodes: The nodes to check for data dependencies.
|
| 80 |
+
|
| 81 |
+
Returns:
|
| 82 |
+
True if any pair in nodes has a data dependency.
|
| 83 |
+
"""
|
| 84 |
+
# For each pair in nodes:
|
| 85 |
+
for i, j in itertools.combinations(nodes, 2):
|
| 86 |
+
if may_depend_on(i, j) or may_depend_on(j, i):
|
| 87 |
+
return False
|
| 88 |
+
|
| 89 |
+
return True
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def merge_matmul(in_mod: torch.nn.Module):
|
| 93 |
+
"""
|
| 94 |
+
A graph transformation that merges matrix multiplication operations that share the same right-hand
|
| 95 |
+
side operand into one large matrix multiplication.
|
| 96 |
+
____ _________ _________
|
| 97 |
+
---- | | | | M| A * C |
|
| 98 |
+
M| A | T| B | * K| C | = |---------|
|
| 99 |
+
---- , | | | | T| B * C |
|
| 100 |
+
K ---- --------- ---------
|
| 101 |
+
K R R
|
| 102 |
+
"""
|
| 103 |
+
gm = symbolic_trace(in_mod)
|
| 104 |
+
|
| 105 |
+
rhs_users: Dict[Node, List[Node]] = {}
|
| 106 |
+
lhs_users: Dict[Node, List[Node]] = {}
|
| 107 |
+
|
| 108 |
+
# Populate rhs_users and lhs_users - maps from LHS/RHS matrix multiply operands to
|
| 109 |
+
# the matmul of which they are the LHS/RHS.
|
| 110 |
+
for node in gm.graph.nodes:
|
| 111 |
+
if node.op != "call_function" or node.target is not torch.matmul:
|
| 112 |
+
continue
|
| 113 |
+
|
| 114 |
+
lhs, rhs = node.args
|
| 115 |
+
|
| 116 |
+
# TODO: Properly handle aliasing caused by get_attr. For now,
|
| 117 |
+
# use the attribute name as the operand if the node is a
|
| 118 |
+
# get_attr.
|
| 119 |
+
lhs = lhs.target if lhs.op == "get_attr" else lhs
|
| 120 |
+
rhs = rhs.target if rhs.op == "get_attr" else rhs
|
| 121 |
+
|
| 122 |
+
lhs_users.setdefault(lhs, []).append(node)
|
| 123 |
+
rhs_users.setdefault(rhs, []).append(node)
|
| 124 |
+
|
| 125 |
+
for rhs, mms in rhs_users.items():
|
| 126 |
+
# There must be at least matmuls for a merge to make sense.
|
| 127 |
+
if len(mms) < 2:
|
| 128 |
+
continue
|
| 129 |
+
|
| 130 |
+
# All matmuls must not depend on each other directly or indirectly
|
| 131 |
+
# in order for the merge to be possible.
|
| 132 |
+
if not are_nodes_independent(mms):
|
| 133 |
+
continue
|
| 134 |
+
|
| 135 |
+
lhs_vals = [mm.args[0] for mm in mms]
|
| 136 |
+
|
| 137 |
+
# Merge the matmul.
|
| 138 |
+
# Collect a list of LHS operands and the single RHS operand.
|
| 139 |
+
lhs = [gm.graph.get_attr(l) if isinstance(l, str) else l for l in lhs_vals]
|
| 140 |
+
rhs = gm.graph.get_attr(rhs) if isinstance(rhs, str) else rhs
|
| 141 |
+
|
| 142 |
+
# Concatenate all the LHS operands.
|
| 143 |
+
merge_mm_cat = gm.graph.call_function(torch.cat, (lhs,), {})
|
| 144 |
+
|
| 145 |
+
# Multiply the concatenated LHS operands with the one RHS. This will produce
|
| 146 |
+
# the same results as all the individual matmuls involving rhs in the original graph,
|
| 147 |
+
# but they will all be concatenated together.
|
| 148 |
+
merge_mm = gm.graph.call_function(torch.matmul, (merge_mm_cat, rhs,), {})
|
| 149 |
+
|
| 150 |
+
# Split the result of the merged matmul using the shapes of the LHS operands
|
| 151 |
+
# to ascertain how large each chunk should be.
|
| 152 |
+
merge_mm_split = gm.graph.call_function(
|
| 153 |
+
split_result_tensors, (merge_mm, lhs), {}
|
| 154 |
+
)
|
| 155 |
+
merge_mm_res = [
|
| 156 |
+
gm.graph.call_function(operator.getitem, (merge_mm_split, out), {})
|
| 157 |
+
for out in range(len(lhs))
|
| 158 |
+
]
|
| 159 |
+
|
| 160 |
+
# Replace all uses of the original, unmerged matmuls with the equivalent split chunk from the merged matmul.
|
| 161 |
+
for old, new in zip(mms, merge_mm_res):
|
| 162 |
+
old.replace_all_uses_with(new)
|
| 163 |
+
gm.graph.erase_node(old)
|
| 164 |
+
|
| 165 |
+
# All of the new nodes created above were inserted at the end, so we need to sort
|
| 166 |
+
# the nodes topologically to make sure all definitions precede uses.
|
| 167 |
+
legalize_graph(gm)
|
| 168 |
+
|
| 169 |
+
gm.recompile()
|
| 170 |
+
gm.graph.lint()
|
| 171 |
+
return gm
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/normalize.py
ADDED
|
@@ -0,0 +1,162 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import operator
|
| 2 |
+
from typing import Any, Callable, Dict, Tuple, Optional
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
import torch.fx
|
| 6 |
+
import torch.fx as fx
|
| 7 |
+
from torch.fx import Transformer, Proxy
|
| 8 |
+
from torch.fx.node import Argument, Target, Node, map_aggregate
|
| 9 |
+
from torch.fx.operator_schemas import (
|
| 10 |
+
normalize_module,
|
| 11 |
+
normalize_function,
|
| 12 |
+
create_type_hint,
|
| 13 |
+
)
|
| 14 |
+
|
| 15 |
+
from .schema_type_annotation import AnnotateTypesWithSchema
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class NormalizeArgs(Transformer):
|
| 19 |
+
"""
|
| 20 |
+
Normalize arguments to Python targets. This means that
|
| 21 |
+
`args/kwargs` will be matched up to the module/functional's
|
| 22 |
+
signature and rewritten to exclusively kwargs in positional order
|
| 23 |
+
if `normalize_to_only_use_kwargs` is true. Also populates default
|
| 24 |
+
values. Does not support positional-only parameters or varargs
|
| 25 |
+
parameters (*args, **kwargs).
|
| 26 |
+
|
| 27 |
+
If the nodes have 'type' metadata, it will use it to disambiguate
|
| 28 |
+
overloads. Otherwise, it will throw an error.
|
| 29 |
+
|
| 30 |
+
Example usage:
|
| 31 |
+
m = torchvision.models.resnet18()
|
| 32 |
+
traced = torch.fx.symbolic_trace(m)
|
| 33 |
+
traced = NormalizeArgs(traced).transform()
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
def __init__(
|
| 37 |
+
self, module: torch.fx.GraphModule, normalize_to_only_use_kwargs: bool = True
|
| 38 |
+
):
|
| 39 |
+
super().__init__(module)
|
| 40 |
+
self.node_map: Dict[Proxy, Node] = {}
|
| 41 |
+
self.normalize_to_only_use_kwargs = normalize_to_only_use_kwargs
|
| 42 |
+
|
| 43 |
+
def run_node(self, n: Node) -> Any:
|
| 44 |
+
args, kwargs = self.fetch_args_kwargs_from_env(n)
|
| 45 |
+
|
| 46 |
+
def get_type(arg):
|
| 47 |
+
if isinstance(arg, fx.Node):
|
| 48 |
+
return n.meta["type"] if "type" in n.meta else None
|
| 49 |
+
return type(arg)
|
| 50 |
+
|
| 51 |
+
arg_types = map_aggregate(n.args, get_type)
|
| 52 |
+
assert isinstance(arg_types, tuple)
|
| 53 |
+
arg_types = tuple([create_type_hint(i) for i in arg_types])
|
| 54 |
+
kwarg_types = {k: get_type(v) for k, v in kwargs.items()}
|
| 55 |
+
if n.op == "call_function":
|
| 56 |
+
out = self.call_function(n.target, args, kwargs, arg_types, kwarg_types)
|
| 57 |
+
else:
|
| 58 |
+
out = super().run_node(n)
|
| 59 |
+
if n.op != "output":
|
| 60 |
+
self.node_map[out] = n
|
| 61 |
+
out.node.meta = n.meta
|
| 62 |
+
out.node.type = n.type
|
| 63 |
+
return out
|
| 64 |
+
|
| 65 |
+
def call_function(
|
| 66 |
+
self,
|
| 67 |
+
target: Target,
|
| 68 |
+
args: Tuple[Argument, ...],
|
| 69 |
+
kwargs: Dict[str, Any],
|
| 70 |
+
arg_types: Optional[Tuple[Any, ...]] = None,
|
| 71 |
+
kwarg_types: Optional[Dict[str, Any]] = None,
|
| 72 |
+
):
|
| 73 |
+
assert callable(target)
|
| 74 |
+
new_args_and_kwargs = normalize_function(
|
| 75 |
+
target,
|
| 76 |
+
args, # type: ignore[arg-type]
|
| 77 |
+
kwargs,
|
| 78 |
+
arg_types, # type: ignore[arg-type]
|
| 79 |
+
kwarg_types,
|
| 80 |
+
self.normalize_to_only_use_kwargs,
|
| 81 |
+
)
|
| 82 |
+
if new_args_and_kwargs:
|
| 83 |
+
new_args, new_kwargs = new_args_and_kwargs
|
| 84 |
+
return self.tracer.create_proxy(
|
| 85 |
+
"call_function", target, new_args, new_kwargs
|
| 86 |
+
)
|
| 87 |
+
else:
|
| 88 |
+
return super().call_function(target, args, kwargs)
|
| 89 |
+
|
| 90 |
+
def call_module(
|
| 91 |
+
self, target: Target, args: Tuple[Argument, ...], kwargs: Dict[str, Any]
|
| 92 |
+
):
|
| 93 |
+
assert isinstance(target, str)
|
| 94 |
+
new_args_and_kwargs = normalize_module(
|
| 95 |
+
self.module,
|
| 96 |
+
target,
|
| 97 |
+
args, # type: ignore[arg-type]
|
| 98 |
+
kwargs,
|
| 99 |
+
self.normalize_to_only_use_kwargs,
|
| 100 |
+
)
|
| 101 |
+
if new_args_and_kwargs:
|
| 102 |
+
new_args, new_kwargs = new_args_and_kwargs
|
| 103 |
+
return super().call_module(target, new_args, new_kwargs)
|
| 104 |
+
else:
|
| 105 |
+
return super().call_module(target, args, kwargs)
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
class NormalizeOperators(AnnotateTypesWithSchema):
|
| 109 |
+
"""
|
| 110 |
+
Normalize callsites that are different ways of "spelling" the same
|
| 111 |
+
invocation into a single, canonical call. Currently supports:
|
| 112 |
+
|
| 113 |
+
1. Normalize operators (e.g. operator.add) to the `torch` ops they
|
| 114 |
+
ultimately invoke (e.g. torch.add) when it is possible to statically
|
| 115 |
+
reason that
|
| 116 |
+
|
| 117 |
+
Example usage:
|
| 118 |
+
|
| 119 |
+
m = torchvision.models.resnet18()
|
| 120 |
+
|
| 121 |
+
traced = torch.fx.symbolic_trace(m)
|
| 122 |
+
|
| 123 |
+
traced = NormalizeOperators(traced).transform()
|
| 124 |
+
"""
|
| 125 |
+
|
| 126 |
+
binary_magic_method_remap: Dict[
|
| 127 |
+
Callable[[Any, Any], Any], Callable[[Any, Any], Any]
|
| 128 |
+
] = {
|
| 129 |
+
torch.add: operator.add,
|
| 130 |
+
torch.mul: operator.mul,
|
| 131 |
+
torch.sub: operator.sub,
|
| 132 |
+
torch.div: operator.truediv,
|
| 133 |
+
torch.floor_divide: operator.floordiv,
|
| 134 |
+
torch.remainder: operator.mod,
|
| 135 |
+
torch.eq: operator.eq,
|
| 136 |
+
torch.ne: operator.ne,
|
| 137 |
+
torch.lt: operator.lt,
|
| 138 |
+
torch.le: operator.le,
|
| 139 |
+
torch.gt: operator.gt,
|
| 140 |
+
torch.ge: operator.ge,
|
| 141 |
+
}
|
| 142 |
+
|
| 143 |
+
def call_function(
|
| 144 |
+
self, target: Target, args: Tuple[Argument, ...], kwargs: Dict[str, Any]
|
| 145 |
+
):
|
| 146 |
+
# Normalize operators according to the magic methods implemented on tensors here:
|
| 147 |
+
# https://github.com/pytorch/pytorch/blob/28c5d90b679c6b38bf4183ec99f16d933c2f1bcd/tools/autograd/templates/python_variable_methods.cpp#L1137 # noqa: B950
|
| 148 |
+
|
| 149 |
+
assert callable(target)
|
| 150 |
+
|
| 151 |
+
if target in self.binary_magic_method_remap:
|
| 152 |
+
if len(args) != 2:
|
| 153 |
+
return super().call_function(target, args, kwargs)
|
| 154 |
+
lhs, rhs = args
|
| 155 |
+
|
| 156 |
+
return super().call_function(
|
| 157 |
+
target=self.binary_magic_method_remap[target],
|
| 158 |
+
args=(lhs, rhs),
|
| 159 |
+
kwargs={},
|
| 160 |
+
)
|
| 161 |
+
|
| 162 |
+
return super().call_function(target, args, kwargs)
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (562 Bytes). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/__pycache__/annotate_getitem_nodes.cpython-310.pyc
ADDED
|
Binary file (1.39 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/__pycache__/fake_tensor_prop.cpython-310.pyc
ADDED
|
Binary file (2.88 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/__pycache__/operator_support.cpython-310.pyc
ADDED
|
Binary file (7.55 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/__pycache__/reinplace.cpython-310.pyc
ADDED
|
Binary file (18.7 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/__pycache__/split_module.cpython-310.pyc
ADDED
|
Binary file (10.3 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/__pycache__/split_utils.cpython-310.pyc
ADDED
|
Binary file (6.36 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/__pycache__/tools_common.cpython-310.pyc
ADDED
|
Binary file (7.14 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/sparse/__init__.py
ADDED
|
@@ -0,0 +1,596 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# The Tensor classes are added to this module by python_tensor.cpp
|
| 2 |
+
from typing import Optional, Tuple, List, Union, Any
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
from torch._C import _add_docstr, _sparse # type: ignore[attr-defined]
|
| 6 |
+
from torch import Tensor
|
| 7 |
+
|
| 8 |
+
# Semi structured sparsity support
|
| 9 |
+
from .semi_structured import SparseSemiStructuredTensor, to_sparse_semi_structured
|
| 10 |
+
|
| 11 |
+
# A workaround to support both TorchScript and MyPy:
|
| 12 |
+
from typing import TYPE_CHECKING
|
| 13 |
+
if TYPE_CHECKING:
|
| 14 |
+
from torch.types import _dtype as DType
|
| 15 |
+
DimOrDims = Optional[Union[int, Tuple[int], List[int]]]
|
| 16 |
+
else:
|
| 17 |
+
# The JIT doesn't understand Union, nor torch.dtype here
|
| 18 |
+
DType = int
|
| 19 |
+
DimOrDims = Optional[Tuple[int]]
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
__all__ = [
|
| 23 |
+
'addmm',
|
| 24 |
+
'check_sparse_tensor_invariants',
|
| 25 |
+
'mm',
|
| 26 |
+
'sum',
|
| 27 |
+
'softmax',
|
| 28 |
+
'log_softmax',
|
| 29 |
+
'SparseSemiStructuredTensor',
|
| 30 |
+
'to_sparse_semi_structured',
|
| 31 |
+
'as_sparse_gradcheck',
|
| 32 |
+
]
|
| 33 |
+
|
| 34 |
+
addmm = _add_docstr(_sparse._sparse_addmm, r"""
|
| 35 |
+
sparse.addmm(mat, mat1, mat2, *, beta=1., alpha=1.) -> Tensor
|
| 36 |
+
|
| 37 |
+
This function does exact same thing as :func:`torch.addmm` in the forward,
|
| 38 |
+
except that it supports backward for sparse COO matrix :attr:`mat1`.
|
| 39 |
+
When :attr:`mat1` is a COO tensor it must have `sparse_dim = 2`.
|
| 40 |
+
When inputs are COO tensors, this function also supports backward for both inputs.
|
| 41 |
+
|
| 42 |
+
Supports both CSR and COO storage formats.
|
| 43 |
+
|
| 44 |
+
.. note::
|
| 45 |
+
This function doesn't support computing derivaties with respect to CSR matrices.
|
| 46 |
+
|
| 47 |
+
Args:
|
| 48 |
+
mat (Tensor): a dense matrix to be added
|
| 49 |
+
mat1 (Tensor): a sparse matrix to be multiplied
|
| 50 |
+
mat2 (Tensor): a dense matrix to be multiplied
|
| 51 |
+
beta (Number, optional): multiplier for :attr:`mat` (:math:`\beta`)
|
| 52 |
+
alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
|
| 53 |
+
""")
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
mm = _add_docstr(_sparse._sparse_mm, r"""
|
| 57 |
+
Performs a matrix multiplication of the sparse matrix :attr:`mat1`
|
| 58 |
+
and the (sparse or strided) matrix :attr:`mat2`. Similar to :func:`torch.mm`, if :attr:`mat1` is a
|
| 59 |
+
:math:`(n \times m)` tensor, :attr:`mat2` is a :math:`(m \times p)` tensor, out will be a
|
| 60 |
+
:math:`(n \times p)` tensor.
|
| 61 |
+
When :attr:`mat1` is a COO tensor it must have `sparse_dim = 2`.
|
| 62 |
+
When inputs are COO tensors, this function also supports backward for both inputs.
|
| 63 |
+
|
| 64 |
+
Supports both CSR and COO storage formats.
|
| 65 |
+
|
| 66 |
+
.. note::
|
| 67 |
+
This function doesn't support computing derivaties with respect to CSR matrices.
|
| 68 |
+
|
| 69 |
+
This function also additionally accepts an optional :attr:`reduce` argument that allows
|
| 70 |
+
specification of an optional reduction operation, mathematically performs the following operation:
|
| 71 |
+
|
| 72 |
+
.. math::
|
| 73 |
+
|
| 74 |
+
z_{ij} = \bigoplus_{k = 0}^{K - 1} x_{ik} y_{kj}
|
| 75 |
+
|
| 76 |
+
where :math:`\bigoplus` defines the reduce operator. :attr:`reduce` is implemented only for
|
| 77 |
+
CSR storage format on CPU device.
|
| 78 |
+
|
| 79 |
+
Args:
|
| 80 |
+
mat1 (Tensor): the first sparse matrix to be multiplied
|
| 81 |
+
mat2 (Tensor): the second matrix to be multiplied, which could be sparse or dense
|
| 82 |
+
reduce (str, optional): the reduction operation to apply for non-unique indices
|
| 83 |
+
(:obj:`"sum"`, :obj:`"mean"`, :obj:`"amax"`, :obj:`"amin"`). Default :obj:`"sum"`.
|
| 84 |
+
|
| 85 |
+
Shape:
|
| 86 |
+
The format of the output tensor of this function follows:
|
| 87 |
+
- sparse x sparse -> sparse
|
| 88 |
+
- sparse x dense -> dense
|
| 89 |
+
|
| 90 |
+
Example::
|
| 91 |
+
|
| 92 |
+
>>> a = torch.tensor([[1., 0, 2], [0, 3, 0]]).to_sparse().requires_grad_()
|
| 93 |
+
>>> a
|
| 94 |
+
tensor(indices=tensor([[0, 0, 1],
|
| 95 |
+
[0, 2, 1]]),
|
| 96 |
+
values=tensor([1., 2., 3.]),
|
| 97 |
+
size=(2, 3), nnz=3, layout=torch.sparse_coo, requires_grad=True)
|
| 98 |
+
>>> b = torch.tensor([[0, 1.], [2, 0], [0, 0]], requires_grad=True)
|
| 99 |
+
>>> b
|
| 100 |
+
tensor([[0., 1.],
|
| 101 |
+
[2., 0.],
|
| 102 |
+
[0., 0.]], requires_grad=True)
|
| 103 |
+
>>> y = torch.sparse.mm(a, b)
|
| 104 |
+
>>> y
|
| 105 |
+
tensor([[0., 1.],
|
| 106 |
+
[6., 0.]], grad_fn=<SparseAddmmBackward0>)
|
| 107 |
+
>>> y.sum().backward()
|
| 108 |
+
>>> a.grad
|
| 109 |
+
tensor(indices=tensor([[0, 0, 1],
|
| 110 |
+
[0, 2, 1]]),
|
| 111 |
+
values=tensor([1., 0., 2.]),
|
| 112 |
+
size=(2, 3), nnz=3, layout=torch.sparse_coo)
|
| 113 |
+
>>> c = a.detach().to_sparse_csr()
|
| 114 |
+
>>> c
|
| 115 |
+
tensor(crow_indices=tensor([0, 2, 3]),
|
| 116 |
+
col_indices=tensor([0, 2, 1]),
|
| 117 |
+
values=tensor([1., 2., 3.]), size=(2, 3), nnz=3,
|
| 118 |
+
layout=torch.sparse_csr)
|
| 119 |
+
>>> y1 = torch.sparse.mm(c, b, 'sum')
|
| 120 |
+
>>> y1
|
| 121 |
+
tensor([[0., 1.],
|
| 122 |
+
[6., 0.]], grad_fn=<SparseMmReduceImplBackward0>)
|
| 123 |
+
>>> y2 = torch.sparse.mm(c, b, 'max')
|
| 124 |
+
>>> y2
|
| 125 |
+
tensor([[0., 1.],
|
| 126 |
+
[6., 0.]], grad_fn=<SparseMmReduceImplBackward0>)
|
| 127 |
+
""")
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
sampled_addmm = _add_docstr(_sparse.sparse_sampled_addmm, r"""
|
| 131 |
+
sparse.sampled_addmm(input, mat1, mat2, *, beta=1., alpha=1., out=None) -> Tensor
|
| 132 |
+
|
| 133 |
+
Performs a matrix multiplication of the dense matrices :attr:`mat1` and :attr:`mat2` at the locations
|
| 134 |
+
specified by the sparsity pattern of :attr:`input`. The matrix :attr:`input` is added to the final result.
|
| 135 |
+
|
| 136 |
+
Mathematically this performs the following operation:
|
| 137 |
+
|
| 138 |
+
.. math::
|
| 139 |
+
|
| 140 |
+
\text{out} = \alpha\ (\text{mat1} \mathbin{@} \text{mat2})*\text{spy}(\text{input}) + \beta\ \text{input}
|
| 141 |
+
|
| 142 |
+
where :math:`\text{spy}(\text{input})` is the sparsity pattern matrix of :attr:`input`, :attr:`alpha`
|
| 143 |
+
and :attr:`beta` are the scaling factors.
|
| 144 |
+
:math:`\text{spy}(\text{input})` has value 1 at the positions where :attr:`input` has non-zero values, and 0 elsewhere.
|
| 145 |
+
|
| 146 |
+
.. note::
|
| 147 |
+
:attr:`input` must be a sparse CSR tensor. :attr:`mat1` and :attr:`mat2` must be dense tensors.
|
| 148 |
+
|
| 149 |
+
Args:
|
| 150 |
+
input (Tensor): a sparse CSR matrix of shape `(m, n)` to be added and used to compute
|
| 151 |
+
the sampled matrix multiplication
|
| 152 |
+
mat1 (Tensor): a dense matrix of shape `(m, k)` to be multiplied
|
| 153 |
+
mat2 (Tensor): a dense matrix of shape `(k, n)` to be multiplied
|
| 154 |
+
|
| 155 |
+
Keyword args:
|
| 156 |
+
beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
|
| 157 |
+
alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
|
| 158 |
+
out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
|
| 159 |
+
|
| 160 |
+
Examples::
|
| 161 |
+
|
| 162 |
+
>>> input = torch.eye(3, device='cuda').to_sparse_csr()
|
| 163 |
+
>>> mat1 = torch.randn(3, 5, device='cuda')
|
| 164 |
+
>>> mat2 = torch.randn(5, 3, device='cuda')
|
| 165 |
+
>>> torch.sparse.sampled_addmm(input, mat1, mat2)
|
| 166 |
+
tensor(crow_indices=tensor([0, 1, 2, 3]),
|
| 167 |
+
col_indices=tensor([0, 1, 2]),
|
| 168 |
+
values=tensor([ 0.2847, -0.7805, -0.1900]), device='cuda:0',
|
| 169 |
+
size=(3, 3), nnz=3, layout=torch.sparse_csr)
|
| 170 |
+
>>> torch.sparse.sampled_addmm(input, mat1, mat2).to_dense()
|
| 171 |
+
tensor([[ 0.2847, 0.0000, 0.0000],
|
| 172 |
+
[ 0.0000, -0.7805, 0.0000],
|
| 173 |
+
[ 0.0000, 0.0000, -0.1900]], device='cuda:0')
|
| 174 |
+
>>> torch.sparse.sampled_addmm(input, mat1, mat2, beta=0.5, alpha=0.5)
|
| 175 |
+
tensor(crow_indices=tensor([0, 1, 2, 3]),
|
| 176 |
+
col_indices=tensor([0, 1, 2]),
|
| 177 |
+
values=tensor([ 0.1423, -0.3903, -0.0950]), device='cuda:0',
|
| 178 |
+
size=(3, 3), nnz=3, layout=torch.sparse_csr)
|
| 179 |
+
""")
|
| 180 |
+
|
| 181 |
+
def sum(input: Tensor, dim: DimOrDims = None,
|
| 182 |
+
dtype: Optional[DType] = None) -> Tensor:
|
| 183 |
+
r"""
|
| 184 |
+
Returns the sum of each row of the sparse tensor :attr:`input` in the given
|
| 185 |
+
dimensions :attr:`dim`. If :attr:`dim` is a list of dimensions,
|
| 186 |
+
reduce over all of them. When sum over all ``sparse_dim``, this method
|
| 187 |
+
returns a dense tensor instead of a sparse tensor.
|
| 188 |
+
|
| 189 |
+
All summed :attr:`dim` are squeezed (see :func:`torch.squeeze`), resulting an output
|
| 190 |
+
tensor having :attr:`dim` fewer dimensions than :attr:`input`.
|
| 191 |
+
|
| 192 |
+
During backward, only gradients at ``nnz`` locations of :attr:`input`
|
| 193 |
+
will propagate back. Note that the gradients of :attr:`input` is coalesced.
|
| 194 |
+
|
| 195 |
+
Args:
|
| 196 |
+
input (Tensor): the input sparse tensor
|
| 197 |
+
dim (int or tuple of ints): a dimension or a list of dimensions to reduce. Default: reduce
|
| 198 |
+
over all dims.
|
| 199 |
+
dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor.
|
| 200 |
+
Default: dtype of :attr:`input`.
|
| 201 |
+
|
| 202 |
+
Example::
|
| 203 |
+
|
| 204 |
+
>>> nnz = 3
|
| 205 |
+
>>> dims = [5, 5, 2, 3]
|
| 206 |
+
>>> I = torch.cat([torch.randint(0, dims[0], size=(nnz,)),
|
| 207 |
+
torch.randint(0, dims[1], size=(nnz,))], 0).reshape(2, nnz)
|
| 208 |
+
>>> V = torch.randn(nnz, dims[2], dims[3])
|
| 209 |
+
>>> size = torch.Size(dims)
|
| 210 |
+
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
|
| 211 |
+
>>> S = torch.sparse_coo_tensor(I, V, size)
|
| 212 |
+
>>> S
|
| 213 |
+
tensor(indices=tensor([[2, 0, 3],
|
| 214 |
+
[2, 4, 1]]),
|
| 215 |
+
values=tensor([[[-0.6438, -1.6467, 1.4004],
|
| 216 |
+
[ 0.3411, 0.0918, -0.2312]],
|
| 217 |
+
|
| 218 |
+
[[ 0.5348, 0.0634, -2.0494],
|
| 219 |
+
[-0.7125, -1.0646, 2.1844]],
|
| 220 |
+
|
| 221 |
+
[[ 0.1276, 0.1874, -0.6334],
|
| 222 |
+
[-1.9682, -0.5340, 0.7483]]]),
|
| 223 |
+
size=(5, 5, 2, 3), nnz=3, layout=torch.sparse_coo)
|
| 224 |
+
|
| 225 |
+
# when sum over only part of sparse_dims, return a sparse tensor
|
| 226 |
+
>>> torch.sparse.sum(S, [1, 3])
|
| 227 |
+
tensor(indices=tensor([[0, 2, 3]]),
|
| 228 |
+
values=tensor([[-1.4512, 0.4073],
|
| 229 |
+
[-0.8901, 0.2017],
|
| 230 |
+
[-0.3183, -1.7539]]),
|
| 231 |
+
size=(5, 2), nnz=3, layout=torch.sparse_coo)
|
| 232 |
+
|
| 233 |
+
# when sum over all sparse dim, return a dense tensor
|
| 234 |
+
# with summed dims squeezed
|
| 235 |
+
>>> torch.sparse.sum(S, [0, 1, 3])
|
| 236 |
+
tensor([-2.6596, -1.1450])
|
| 237 |
+
"""
|
| 238 |
+
if dtype is None:
|
| 239 |
+
if dim is not None:
|
| 240 |
+
return torch._sparse_sum(input, dim)
|
| 241 |
+
else:
|
| 242 |
+
return torch._sparse_sum(input)
|
| 243 |
+
else:
|
| 244 |
+
if dim is not None:
|
| 245 |
+
return torch._sparse_sum(input, dim, dtype=dtype)
|
| 246 |
+
else:
|
| 247 |
+
return torch._sparse_sum(input, dtype=dtype)
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
softmax = _add_docstr(_sparse._sparse_softmax, r"""
|
| 251 |
+
sparse.softmax(input, dim, *, dtype=None) -> Tensor
|
| 252 |
+
|
| 253 |
+
Applies a softmax function.
|
| 254 |
+
|
| 255 |
+
Softmax is defined as:
|
| 256 |
+
|
| 257 |
+
:math:`\text{Softmax}(x_{i}) = \frac{exp(x_i)}{\sum_j exp(x_j)}`
|
| 258 |
+
|
| 259 |
+
where :math:`i, j` run over sparse tensor indices and unspecified
|
| 260 |
+
entries are ignores. This is equivalent to defining unspecified
|
| 261 |
+
entries as negative infinity so that :math:`exp(x_k) = 0` when the
|
| 262 |
+
entry with index :math:`k` has not specified.
|
| 263 |
+
|
| 264 |
+
It is applied to all slices along `dim`, and will re-scale them so
|
| 265 |
+
that the elements lie in the range `[0, 1]` and sum to 1.
|
| 266 |
+
|
| 267 |
+
Args:
|
| 268 |
+
input (Tensor): input
|
| 269 |
+
dim (int): A dimension along which softmax will be computed.
|
| 270 |
+
dtype (:class:`torch.dtype`, optional): the desired data type
|
| 271 |
+
of returned tensor. If specified, the input tensor is
|
| 272 |
+
casted to :attr:`dtype` before the operation is
|
| 273 |
+
performed. This is useful for preventing data type
|
| 274 |
+
overflows. Default: None
|
| 275 |
+
""")
|
| 276 |
+
|
| 277 |
+
|
| 278 |
+
log_softmax = _add_docstr(_sparse._sparse_log_softmax, r"""
|
| 279 |
+
sparse.log_softmax(input, dim, *, dtype=None) -> Tensor
|
| 280 |
+
|
| 281 |
+
Applies a softmax function followed by logarithm.
|
| 282 |
+
|
| 283 |
+
See :class:`~torch.sparse.softmax` for more details.
|
| 284 |
+
|
| 285 |
+
Args:
|
| 286 |
+
input (Tensor): input
|
| 287 |
+
dim (int): A dimension along which softmax will be computed.
|
| 288 |
+
dtype (:class:`torch.dtype`, optional): the desired data type
|
| 289 |
+
of returned tensor. If specified, the input tensor is
|
| 290 |
+
casted to :attr:`dtype` before the operation is
|
| 291 |
+
performed. This is useful for preventing data type
|
| 292 |
+
overflows. Default: None
|
| 293 |
+
""")
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
spdiags = _add_docstr(
|
| 297 |
+
_sparse._spdiags,
|
| 298 |
+
r"""
|
| 299 |
+
sparse.spdiags(diagonals, offsets, shape, layout=None) -> Tensor
|
| 300 |
+
|
| 301 |
+
Creates a sparse 2D tensor by placing the values from rows of
|
| 302 |
+
:attr:`diagonals` along specified diagonals of the output
|
| 303 |
+
|
| 304 |
+
The :attr:`offsets` tensor controls which diagonals are set.
|
| 305 |
+
|
| 306 |
+
- If :attr:`offsets[i]` = 0, it is the main diagonal
|
| 307 |
+
- If :attr:`offsets[i]` < 0, it is below the main diagonal
|
| 308 |
+
- If :attr:`offsets[i]` > 0, it is above the main diagonal
|
| 309 |
+
|
| 310 |
+
The number of rows in :attr:`diagonals` must match the length of :attr:`offsets`,
|
| 311 |
+
and an offset may not be repeated.
|
| 312 |
+
|
| 313 |
+
Args:
|
| 314 |
+
diagonals (Tensor): Matrix storing diagonals row-wise
|
| 315 |
+
offsets (Tensor): The diagonals to be set, stored as a vector
|
| 316 |
+
shape (2-tuple of ints): The desired shape of the result
|
| 317 |
+
Keyword args:
|
| 318 |
+
layout (:class:`torch.layout`, optional): The desired layout of the
|
| 319 |
+
returned tensor. ``torch.sparse_coo``, ``torch.sparse_csc`` and ``torch.sparse_csr``
|
| 320 |
+
are supported. Default: ``torch.sparse_coo``
|
| 321 |
+
|
| 322 |
+
Examples:
|
| 323 |
+
|
| 324 |
+
Set the main and first two lower diagonals of a matrix::
|
| 325 |
+
|
| 326 |
+
>>> diags = torch.arange(9).reshape(3, 3)
|
| 327 |
+
>>> diags
|
| 328 |
+
tensor([[0, 1, 2],
|
| 329 |
+
[3, 4, 5],
|
| 330 |
+
[6, 7, 8]])
|
| 331 |
+
>>> s = torch.sparse.spdiags(diags, torch.tensor([0, -1, -2]), (3, 3))
|
| 332 |
+
>>> s
|
| 333 |
+
tensor(indices=tensor([[0, 1, 2, 1, 2, 2],
|
| 334 |
+
[0, 1, 2, 0, 1, 0]]),
|
| 335 |
+
values=tensor([0, 1, 2, 3, 4, 6]),
|
| 336 |
+
size=(3, 3), nnz=6, layout=torch.sparse_coo)
|
| 337 |
+
>>> s.to_dense()
|
| 338 |
+
tensor([[0, 0, 0],
|
| 339 |
+
[3, 1, 0],
|
| 340 |
+
[6, 4, 2]])
|
| 341 |
+
|
| 342 |
+
|
| 343 |
+
Change the output layout::
|
| 344 |
+
|
| 345 |
+
>>> diags = torch.arange(9).reshape(3, 3)
|
| 346 |
+
>>> diags
|
| 347 |
+
tensor([[0, 1, 2],[3, 4, 5], [6, 7, 8])
|
| 348 |
+
>>> s = torch.sparse.spdiags(diags, torch.tensor([0, -1, -2]), (3, 3), layout=torch.sparse_csr)
|
| 349 |
+
>>> s
|
| 350 |
+
tensor(crow_indices=tensor([0, 1, 3, 6]),
|
| 351 |
+
col_indices=tensor([0, 0, 1, 0, 1, 2]),
|
| 352 |
+
values=tensor([0, 3, 1, 6, 4, 2]), size=(3, 3), nnz=6,
|
| 353 |
+
layout=torch.sparse_csr)
|
| 354 |
+
>>> s.to_dense()
|
| 355 |
+
tensor([[0, 0, 0],
|
| 356 |
+
[3, 1, 0],
|
| 357 |
+
[6, 4, 2]])
|
| 358 |
+
|
| 359 |
+
Set partial diagonals of a large output::
|
| 360 |
+
|
| 361 |
+
>>> diags = torch.tensor([[1, 2], [3, 4]])
|
| 362 |
+
>>> offsets = torch.tensor([0, -1])
|
| 363 |
+
>>> torch.sparse.spdiags(diags, offsets, (5, 5)).to_dense()
|
| 364 |
+
tensor([[1, 0, 0, 0, 0],
|
| 365 |
+
[3, 2, 0, 0, 0],
|
| 366 |
+
[0, 4, 0, 0, 0],
|
| 367 |
+
[0, 0, 0, 0, 0],
|
| 368 |
+
[0, 0, 0, 0, 0]])
|
| 369 |
+
|
| 370 |
+
.. note::
|
| 371 |
+
|
| 372 |
+
When setting the values along a given diagonal the index into the diagonal
|
| 373 |
+
and the index into the row of :attr:`diagonals` is taken as the
|
| 374 |
+
column index in the output. This has the effect that when setting a diagonal
|
| 375 |
+
with a positive offset `k` the first value along that diagonal will be
|
| 376 |
+
the value in position `k` of the row of :attr:`diagonals`
|
| 377 |
+
|
| 378 |
+
Specifying a positive offset::
|
| 379 |
+
|
| 380 |
+
>>> diags = torch.tensor([[1, 2, 3], [1, 2, 3], [1, 2, 3]])
|
| 381 |
+
>>> torch.sparse.spdiags(diags, torch.tensor([0, 1, 2]), (5, 5)).to_dense()
|
| 382 |
+
tensor([[1, 2, 3, 0, 0],
|
| 383 |
+
[0, 2, 3, 0, 0],
|
| 384 |
+
[0, 0, 3, 0, 0],
|
| 385 |
+
[0, 0, 0, 0, 0],
|
| 386 |
+
[0, 0, 0, 0, 0]])
|
| 387 |
+
""")
|
| 388 |
+
|
| 389 |
+
|
| 390 |
+
class check_sparse_tensor_invariants:
|
| 391 |
+
"""A tool to control checking sparse tensor invariants.
|
| 392 |
+
|
| 393 |
+
The following options exists to manage sparsr tensor invariants
|
| 394 |
+
checking in sparse tensor construction:
|
| 395 |
+
|
| 396 |
+
1. Using a context manager:
|
| 397 |
+
|
| 398 |
+
.. code:: python
|
| 399 |
+
|
| 400 |
+
with torch.sparse.check_sparse_tensor_invariants():
|
| 401 |
+
run_my_model()
|
| 402 |
+
|
| 403 |
+
2. Using a procedural approach:
|
| 404 |
+
|
| 405 |
+
.. code:: python
|
| 406 |
+
|
| 407 |
+
prev_checks_enabled = torch.sparse.check_sparse_tensor_invariants.is_enabled()
|
| 408 |
+
torch.sparse.check_sparse_tensor_invariants.enable()
|
| 409 |
+
|
| 410 |
+
run_my_model()
|
| 411 |
+
|
| 412 |
+
if not prev_checks_enabled:
|
| 413 |
+
torch.sparse.check_sparse_tensor_invariants.disable()
|
| 414 |
+
|
| 415 |
+
3. Using function decoration:
|
| 416 |
+
|
| 417 |
+
.. code:: python
|
| 418 |
+
|
| 419 |
+
@torch.sparse.check_sparse_tensor_invariants()
|
| 420 |
+
def run_my_model():
|
| 421 |
+
...
|
| 422 |
+
|
| 423 |
+
run_my_model()
|
| 424 |
+
|
| 425 |
+
4. Using ``check_invariants`` keyword argument in sparse tensor constructor call.
|
| 426 |
+
For example:
|
| 427 |
+
|
| 428 |
+
>>> torch.sparse_csr_tensor([0, 1, 3], [0, 1], [1, 2], check_invariants=True)
|
| 429 |
+
Traceback (most recent call last):
|
| 430 |
+
File "<stdin>", line 1, in <module>
|
| 431 |
+
RuntimeError: `crow_indices[..., -1] == nnz` is not satisfied.
|
| 432 |
+
"""
|
| 433 |
+
|
| 434 |
+
@staticmethod
|
| 435 |
+
def is_enabled():
|
| 436 |
+
r"""Returns True if the sparse tensor invariants checking is enabled.
|
| 437 |
+
|
| 438 |
+
.. note::
|
| 439 |
+
|
| 440 |
+
Use :func:`torch.sparse.check_sparse_tensor_invariants.enable` or
|
| 441 |
+
:func:`torch.sparse.check_sparse_tensor_invariants.disable` to
|
| 442 |
+
manage the state of the sparse tensor invariants checks.
|
| 443 |
+
"""
|
| 444 |
+
return torch._C._check_sparse_tensor_invariants()
|
| 445 |
+
|
| 446 |
+
@staticmethod
|
| 447 |
+
def enable():
|
| 448 |
+
r"""Enable sparse tensor invariants checking in sparse tensor constructors.
|
| 449 |
+
|
| 450 |
+
.. note::
|
| 451 |
+
|
| 452 |
+
By default, the sparse tensor invariants checks are disabled. Use
|
| 453 |
+
:func:`torch.sparse.check_sparse_tensor_invariants.is_enabled` to
|
| 454 |
+
retrieve the current state of sparse tensor invariants checking.
|
| 455 |
+
|
| 456 |
+
.. note::
|
| 457 |
+
|
| 458 |
+
The sparse tensor invariants check flag is effective to all sparse
|
| 459 |
+
tensor constructors, both in Python and ATen.
|
| 460 |
+
|
| 461 |
+
The flag can be locally overridden by the ``check_invariants``
|
| 462 |
+
optional argument of the sparse tensor constructor functions.
|
| 463 |
+
"""
|
| 464 |
+
torch._C._set_check_sparse_tensor_invariants(True)
|
| 465 |
+
|
| 466 |
+
@staticmethod
|
| 467 |
+
def disable():
|
| 468 |
+
r"""Disable sparse tensor invariants checking in sparse tensor constructors.
|
| 469 |
+
|
| 470 |
+
See :func:`torch.sparse.check_sparse_tensor_invariants.enable` for more information.
|
| 471 |
+
"""
|
| 472 |
+
torch._C._set_check_sparse_tensor_invariants(False)
|
| 473 |
+
|
| 474 |
+
# context manager support
|
| 475 |
+
def __init__(self, enable=True):
|
| 476 |
+
self.state = enable
|
| 477 |
+
self.saved_state : Optional[bool] = None
|
| 478 |
+
|
| 479 |
+
def __enter__(self):
|
| 480 |
+
if self.saved_state is not None:
|
| 481 |
+
raise RuntimeError('This context manager instance is already activated.'
|
| 482 |
+
' Use a different context manager instance for context nesting.')
|
| 483 |
+
self.saved_state = self.is_enabled()
|
| 484 |
+
torch._C._set_check_sparse_tensor_invariants(self.state)
|
| 485 |
+
|
| 486 |
+
def __exit__(self, type, value, traceback):
|
| 487 |
+
assert self.saved_state is not None
|
| 488 |
+
torch._C._set_check_sparse_tensor_invariants(self.saved_state)
|
| 489 |
+
self.saved_state = None
|
| 490 |
+
|
| 491 |
+
# decorator support
|
| 492 |
+
def __call__(self, mth):
|
| 493 |
+
|
| 494 |
+
def test_mth(*args, **kwargs):
|
| 495 |
+
with type(self)(self.state):
|
| 496 |
+
return mth(*args, **kwargs)
|
| 497 |
+
|
| 498 |
+
return test_mth
|
| 499 |
+
|
| 500 |
+
|
| 501 |
+
def as_sparse_gradcheck(gradcheck):
|
| 502 |
+
"""Decorator for torch.autograd.gradcheck or its functools.partial
|
| 503 |
+
variants that extends the gradcheck function with support to input
|
| 504 |
+
functions that operate on or/and return sparse tensors.
|
| 505 |
+
|
| 506 |
+
The specified gradcheck function itself is guaranteed to operate
|
| 507 |
+
on strided tensors only.
|
| 508 |
+
|
| 509 |
+
For example:
|
| 510 |
+
|
| 511 |
+
>>> gradcheck = torch.sparse.as_sparse_gradcheck(torch.autograd.gradcheck)
|
| 512 |
+
>>> x = torch.tensor([[0, 1], [2, 3]], dtype=torch.float64).to_sparse_coo().requires_grad_(True)
|
| 513 |
+
>>> gradcheck(lambda x: x.to_sparse_csr(), x)
|
| 514 |
+
True
|
| 515 |
+
"""
|
| 516 |
+
|
| 517 |
+
def gradcheck_with_sparse_support(func, inputs, **kwargs):
|
| 518 |
+
"""Same as :func:`torch.autograd.gradcheck` but with sparse tensors
|
| 519 |
+
inputs and outputs support.
|
| 520 |
+
"""
|
| 521 |
+
masked = kwargs.pop('masked', False)
|
| 522 |
+
sparse_layouts = {torch.sparse_coo, torch.sparse_csr, torch.sparse_csc, torch.sparse_bsr, torch.sparse_bsc}
|
| 523 |
+
sparse_compressed_layouts = {torch.sparse_csr, torch.sparse_csc, torch.sparse_bsr, torch.sparse_bsc}
|
| 524 |
+
sparse_block_layouts = {torch.sparse_bsr, torch.sparse_bsc}
|
| 525 |
+
STRIDED_REPRESENTATION = '__STRIDED_REPRESENTATION__'
|
| 526 |
+
|
| 527 |
+
def convert_to_strided_representation(args):
|
| 528 |
+
"""Convert differentiable non-strided tensors to a representation
|
| 529 |
+
containing differentiable strided tensors.
|
| 530 |
+
"""
|
| 531 |
+
if not isinstance(args, (list, tuple)):
|
| 532 |
+
args = args,
|
| 533 |
+
new_args: List[Any] = []
|
| 534 |
+
for obj in args:
|
| 535 |
+
if isinstance(obj, torch.Tensor) and obj.requires_grad and obj.layout in sparse_layouts:
|
| 536 |
+
d = dict(layout=obj.layout, shape=obj.shape)
|
| 537 |
+
if not masked:
|
| 538 |
+
# Materialize unspecified elements with zero values
|
| 539 |
+
batch_dim = obj.ndim - obj.dense_dim() - obj.sparse_dim()
|
| 540 |
+
blocksize = obj.values().shape[batch_dim + 1:batch_dim + 3] if obj.layout in sparse_block_layouts else None
|
| 541 |
+
full_mask = torch.ones(obj.shape, device=obj.device, dtype=torch.bool).to_sparse(
|
| 542 |
+
layout=obj.layout, blocksize=blocksize, dense_dim=obj.dense_dim())
|
| 543 |
+
obj = obj.to_dense().sparse_mask(full_mask)
|
| 544 |
+
if obj.layout is torch.sparse_coo:
|
| 545 |
+
d.update(indices=obj._indices(), is_coalesced=obj.is_coalesced())
|
| 546 |
+
values = obj._values()
|
| 547 |
+
elif obj.layout in {torch.sparse_csr, torch.sparse_bsr}:
|
| 548 |
+
d.update(compressed_indices=obj.crow_indices(), plain_indices=obj.col_indices())
|
| 549 |
+
values = obj.values()
|
| 550 |
+
else:
|
| 551 |
+
d.update(compressed_indices=obj.ccol_indices(), plain_indices=obj.row_indices())
|
| 552 |
+
values = obj.values()
|
| 553 |
+
new_args.extend((STRIDED_REPRESENTATION, d, values.requires_grad_(True)))
|
| 554 |
+
else:
|
| 555 |
+
new_args.append(obj)
|
| 556 |
+
return tuple(new_args)
|
| 557 |
+
|
| 558 |
+
def restore_from_strided_representation(args):
|
| 559 |
+
"""Restore non-strided differentiable tensosr from their strided
|
| 560 |
+
representations.
|
| 561 |
+
"""
|
| 562 |
+
new_args = []
|
| 563 |
+
args = list(args)
|
| 564 |
+
while args:
|
| 565 |
+
a = args.pop(0)
|
| 566 |
+
if a == STRIDED_REPRESENTATION:
|
| 567 |
+
d, values = args.pop(0), args.pop(0)
|
| 568 |
+
if d['layout'] is torch.sparse_coo:
|
| 569 |
+
a = torch.sparse_coo_tensor(d['indices'], values, size=d['shape'], is_coalesced=d['is_coalesced'])
|
| 570 |
+
elif d['layout'] in sparse_compressed_layouts:
|
| 571 |
+
a = torch.sparse_compressed_tensor(d['compressed_indices'], d['plain_indices'], values,
|
| 572 |
+
size=d['shape'], layout=d['layout'])
|
| 573 |
+
else:
|
| 574 |
+
raise NotImplementedError(f'conversion of {d["layout"]} strided representation to tensor')
|
| 575 |
+
new_args.append(a)
|
| 576 |
+
return tuple(new_args)
|
| 577 |
+
|
| 578 |
+
def func_wrapper(*args, **kwargs):
|
| 579 |
+
restored_args = restore_from_strided_representation(args)
|
| 580 |
+
|
| 581 |
+
# convert differentiable output sparse tensors to strided
|
| 582 |
+
# tensors:
|
| 583 |
+
outputs = func(*restored_args, **kwargs)
|
| 584 |
+
|
| 585 |
+
strided_outputs = tuple(outputs) if isinstance(outputs, (list, tuple)) else (outputs,)
|
| 586 |
+
strided_outputs = tuple((o.to_dense(masked_grad=masked)
|
| 587 |
+
if isinstance(o, torch.Tensor) and o.requires_grad and o.layout in sparse_layouts else o)
|
| 588 |
+
for o in strided_outputs)
|
| 589 |
+
|
| 590 |
+
return strided_outputs if isinstance(outputs, (list, tuple)) else strided_outputs[0]
|
| 591 |
+
|
| 592 |
+
args = (func_wrapper, convert_to_strided_representation(inputs))
|
| 593 |
+
|
| 594 |
+
return gradcheck(*args, **kwargs)
|
| 595 |
+
|
| 596 |
+
return gradcheck_with_sparse_support
|
llava_next/lib/python3.10/site-packages/torch/sparse/__pycache__/_semi_structured_conversions.cpython-310.pyc
ADDED
|
Binary file (5.69 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/__pycache__/allocator.cpython-310.pyc
ADDED
|
Binary file (1.6 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/__pycache__/config_v2.cpython-310.pyc
ADDED
|
Binary file (1.29 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/__init__.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
from .ds_kernel import DSKernelBase
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ds_kernel.py
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
from abc import ABC, abstractmethod
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class DSKernelBase(ABC):
|
| 10 |
+
|
| 11 |
+
@abstractmethod
|
| 12 |
+
def __init__(self, *args, **kwargs):
|
| 13 |
+
"""
|
| 14 |
+
If necessary trigger compilation and warmup
|
| 15 |
+
Autotuning of the kernel would happen at this stage to
|
| 16 |
+
eliminate any potential hangs that might occur mid-deployment
|
| 17 |
+
Validate that the desired run configuration is compatible.
|
| 18 |
+
|
| 19 |
+
It is not necessary to call super on this method.
|
| 20 |
+
"""
|
| 21 |
+
raise NotImplementedError()
|
| 22 |
+
|
| 23 |
+
@abstractmethod
|
| 24 |
+
def __call__(self, *args, **kwargs):
|
| 25 |
+
"""
|
| 26 |
+
However the kernel needs to be called, it can be called here. Auto-tuning
|
| 27 |
+
should never be performed here.
|
| 28 |
+
|
| 29 |
+
All inputs/outputs should be passed as arguments to this function. No allocations
|
| 30 |
+
should be performed here.
|
| 31 |
+
"""
|
| 32 |
+
raise NotImplementedError()
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/includes/activation_type.h
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#pragma once
|
| 7 |
+
|
| 8 |
+
enum ActivationType {
|
| 9 |
+
GELU = 0,
|
| 10 |
+
RELU = 1,
|
| 11 |
+
SILU = 2,
|
| 12 |
+
GEGLU = 3,
|
| 13 |
+
ReGLU = 4,
|
| 14 |
+
SiGLU = 5,
|
| 15 |
+
IDENTITY = 6,
|
| 16 |
+
InvalidType = -1
|
| 17 |
+
};
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/includes/conversion_utils.h
ADDED
|
@@ -0,0 +1,640 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#pragma once
|
| 7 |
+
|
| 8 |
+
#include "ds_kernel_utils.h"
|
| 9 |
+
|
| 10 |
+
#include <stdint.h>
|
| 11 |
+
|
| 12 |
+
#ifdef BF16_AVAILABLE
|
| 13 |
+
#include <cuda_bf16.h>
|
| 14 |
+
#endif
|
| 15 |
+
|
| 16 |
+
namespace conversion {
|
| 17 |
+
|
| 18 |
+
// Basic primitive for constructing conversions
|
| 19 |
+
template <typename TO, typename FROM>
|
| 20 |
+
DS_D_INLINE TO to(FROM val)
|
| 21 |
+
{
|
| 22 |
+
return to(val);
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
// Specializations
|
| 26 |
+
|
| 27 |
+
/********************* Identity Conversions *********************/
|
| 28 |
+
/*
|
| 29 |
+
Identity conversions are useful in templated functions where we might have
|
| 30 |
+
a fixed destination type. For example, I might have a kernel that accepts
|
| 31 |
+
__half, __nv_bfloat16, and float but always want to do the core computation
|
| 32 |
+
at floating point:
|
| 33 |
+
|
| 34 |
+
T mem_value = input[idx];
|
| 35 |
+
float compute_value = conversion::to<float, T>(mem_value);
|
| 36 |
+
|
| 37 |
+
In practice, we should be able to elide the second template parameter:
|
| 38 |
+
float compute_val = conversion::to<float>(mem_value);
|
| 39 |
+
|
| 40 |
+
In this case, we need an implementation to handle the T = float case
|
| 41 |
+
|
| 42 |
+
NOTE: The type inferencing system appears to be unable to handle inferring the first
|
| 43 |
+
template parameter, even in the trivial case.
|
| 44 |
+
*/
|
| 45 |
+
|
| 46 |
+
// Floating point types
|
| 47 |
+
template <>
|
| 48 |
+
DS_D_INLINE double to(double val)
|
| 49 |
+
{
|
| 50 |
+
return val;
|
| 51 |
+
}
|
| 52 |
+
template <>
|
| 53 |
+
DS_D_INLINE float to(float val)
|
| 54 |
+
{
|
| 55 |
+
return val;
|
| 56 |
+
}
|
| 57 |
+
template <>
|
| 58 |
+
DS_D_INLINE __half to(__half val)
|
| 59 |
+
{
|
| 60 |
+
return val;
|
| 61 |
+
}
|
| 62 |
+
#ifdef BF16_AVAILABLE
|
| 63 |
+
template <>
|
| 64 |
+
DS_D_INLINE __nv_bfloat16 to(__nv_bfloat16 val)
|
| 65 |
+
{
|
| 66 |
+
return val;
|
| 67 |
+
}
|
| 68 |
+
#endif
|
| 69 |
+
|
| 70 |
+
// Integer types
|
| 71 |
+
template <>
|
| 72 |
+
DS_D_INLINE int8_t to(int8_t val)
|
| 73 |
+
{
|
| 74 |
+
return val;
|
| 75 |
+
}
|
| 76 |
+
template <>
|
| 77 |
+
DS_D_INLINE uint8_t to(uint8_t val)
|
| 78 |
+
{
|
| 79 |
+
return val;
|
| 80 |
+
}
|
| 81 |
+
template <>
|
| 82 |
+
DS_D_INLINE int16_t to(int16_t val)
|
| 83 |
+
{
|
| 84 |
+
return val;
|
| 85 |
+
}
|
| 86 |
+
template <>
|
| 87 |
+
DS_D_INLINE uint16_t to(uint16_t val)
|
| 88 |
+
{
|
| 89 |
+
return val;
|
| 90 |
+
}
|
| 91 |
+
template <>
|
| 92 |
+
DS_D_INLINE int32_t to(int32_t val)
|
| 93 |
+
{
|
| 94 |
+
return val;
|
| 95 |
+
}
|
| 96 |
+
template <>
|
| 97 |
+
DS_D_INLINE uint32_t to(uint32_t val)
|
| 98 |
+
{
|
| 99 |
+
return val;
|
| 100 |
+
}
|
| 101 |
+
template <>
|
| 102 |
+
DS_D_INLINE int64_t to(int64_t val)
|
| 103 |
+
{
|
| 104 |
+
return val;
|
| 105 |
+
}
|
| 106 |
+
template <>
|
| 107 |
+
DS_D_INLINE uint64_t to(uint64_t val)
|
| 108 |
+
{
|
| 109 |
+
return val;
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
// TODO: evaluate if we want bools
|
| 113 |
+
|
| 114 |
+
/********************* To Double Conversions *********************/
|
| 115 |
+
|
| 116 |
+
// * to double variants
|
| 117 |
+
|
| 118 |
+
// Would normally like to not use C cast, but this is an important enough conversion
|
| 119 |
+
// to keep
|
| 120 |
+
template <>
|
| 121 |
+
DS_D_INLINE double to(float val)
|
| 122 |
+
{
|
| 123 |
+
#ifdef PTX_AVAILABLE
|
| 124 |
+
double ret_val;
|
| 125 |
+
asm("ctv.rn.f64.f32 %0, %1;\n" : "=d"(ret_val) : "f"(val));
|
| 126 |
+
return ret_val;
|
| 127 |
+
#else
|
| 128 |
+
return double(val);
|
| 129 |
+
#endif
|
| 130 |
+
}
|
| 131 |
+
// Note: there is a CVT instruction for __half -> double, but there's no inline interface
|
| 132 |
+
// for passing a single half value
|
| 133 |
+
template <>
|
| 134 |
+
DS_D_INLINE double to(__half val)
|
| 135 |
+
{
|
| 136 |
+
return to<double>(__half2float(val));
|
| 137 |
+
}
|
| 138 |
+
template <>
|
| 139 |
+
DS_D_INLINE double to(int64_t val)
|
| 140 |
+
{
|
| 141 |
+
return __ll2double_rn(val);
|
| 142 |
+
}
|
| 143 |
+
template <>
|
| 144 |
+
DS_D_INLINE double to(int32_t val)
|
| 145 |
+
{
|
| 146 |
+
return __int2double_rn(val);
|
| 147 |
+
}
|
| 148 |
+
template <>
|
| 149 |
+
DS_D_INLINE double to(int16_t val)
|
| 150 |
+
{
|
| 151 |
+
return __int2double_rn(val);
|
| 152 |
+
}
|
| 153 |
+
template <>
|
| 154 |
+
DS_D_INLINE double to(int8_t val)
|
| 155 |
+
{
|
| 156 |
+
return __int2double_rn(val);
|
| 157 |
+
}
|
| 158 |
+
template <>
|
| 159 |
+
DS_D_INLINE double to(uint64_t val)
|
| 160 |
+
{
|
| 161 |
+
return __ull2double_rn(val);
|
| 162 |
+
}
|
| 163 |
+
template <>
|
| 164 |
+
DS_D_INLINE double to(uint32_t val)
|
| 165 |
+
{
|
| 166 |
+
return __uint2double_rn(val);
|
| 167 |
+
}
|
| 168 |
+
template <>
|
| 169 |
+
DS_D_INLINE double to(uint16_t val)
|
| 170 |
+
{
|
| 171 |
+
return __uint2double_rn(val);
|
| 172 |
+
}
|
| 173 |
+
template <>
|
| 174 |
+
DS_D_INLINE double to(uint8_t val)
|
| 175 |
+
{
|
| 176 |
+
return __uint2double_rn(val);
|
| 177 |
+
}
|
| 178 |
+
|
| 179 |
+
// Same applies here
|
| 180 |
+
#ifdef BF16_AVAILABLE
|
| 181 |
+
template <>
|
| 182 |
+
DS_D_INLINE double to(__nv_bfloat16 val)
|
| 183 |
+
{
|
| 184 |
+
return to<double>(__bfloat162float(val));
|
| 185 |
+
}
|
| 186 |
+
#endif
|
| 187 |
+
|
| 188 |
+
/********************* To Float Conversions *********************/
|
| 189 |
+
|
| 190 |
+
template <>
|
| 191 |
+
DS_D_INLINE float to(double val)
|
| 192 |
+
{
|
| 193 |
+
return __double2float_rn(val);
|
| 194 |
+
}
|
| 195 |
+
template <>
|
| 196 |
+
DS_D_INLINE float to(__half val)
|
| 197 |
+
{
|
| 198 |
+
return __half2float(val);
|
| 199 |
+
}
|
| 200 |
+
template <>
|
| 201 |
+
DS_D_INLINE float to(int64_t val)
|
| 202 |
+
{
|
| 203 |
+
return __ll2float_rn(val);
|
| 204 |
+
}
|
| 205 |
+
template <>
|
| 206 |
+
DS_D_INLINE float to(int32_t val)
|
| 207 |
+
{
|
| 208 |
+
return __int2float_rn(val);
|
| 209 |
+
}
|
| 210 |
+
template <>
|
| 211 |
+
DS_D_INLINE float to(int16_t val)
|
| 212 |
+
{
|
| 213 |
+
return __int2float_rn(val);
|
| 214 |
+
}
|
| 215 |
+
template <>
|
| 216 |
+
DS_D_INLINE float to(int8_t val)
|
| 217 |
+
{
|
| 218 |
+
return __int2float_rn(val);
|
| 219 |
+
}
|
| 220 |
+
template <>
|
| 221 |
+
DS_D_INLINE float to(uint64_t val)
|
| 222 |
+
{
|
| 223 |
+
return __ull2float_rn(val);
|
| 224 |
+
}
|
| 225 |
+
template <>
|
| 226 |
+
DS_D_INLINE float to(uint32_t val)
|
| 227 |
+
{
|
| 228 |
+
return __uint2float_rn(val);
|
| 229 |
+
}
|
| 230 |
+
template <>
|
| 231 |
+
DS_D_INLINE float to(uint16_t val)
|
| 232 |
+
{
|
| 233 |
+
return __uint2float_rn(val);
|
| 234 |
+
}
|
| 235 |
+
template <>
|
| 236 |
+
DS_D_INLINE float to(uint8_t val)
|
| 237 |
+
{
|
| 238 |
+
return __uint2float_rn(val);
|
| 239 |
+
}
|
| 240 |
+
|
| 241 |
+
#ifdef BF16_AVAILABLE
|
| 242 |
+
template <>
|
| 243 |
+
DS_D_INLINE float to(__nv_bfloat16 val)
|
| 244 |
+
{
|
| 245 |
+
return __bfloat162float(val);
|
| 246 |
+
}
|
| 247 |
+
#endif
|
| 248 |
+
|
| 249 |
+
/********************* To Float2 Conversions *********************/
|
| 250 |
+
template <>
|
| 251 |
+
DS_D_INLINE float2 to(__half2 val)
|
| 252 |
+
{
|
| 253 |
+
return __half22float2(val);
|
| 254 |
+
}
|
| 255 |
+
|
| 256 |
+
#ifdef BF16_AVAILABLE
|
| 257 |
+
template <>
|
| 258 |
+
DS_D_INLINE float2 to(__nv_bfloat162 val)
|
| 259 |
+
{
|
| 260 |
+
return __bfloat1622float2(val);
|
| 261 |
+
}
|
| 262 |
+
#endif
|
| 263 |
+
|
| 264 |
+
/********************* To Half Conversions *********************/
|
| 265 |
+
template <>
|
| 266 |
+
DS_D_INLINE __half to(double val)
|
| 267 |
+
{
|
| 268 |
+
#ifdef __HIP_PLATFORM_AMD__
|
| 269 |
+
float val_f = __double2float_rn(val);
|
| 270 |
+
return __float2half(val_f);
|
| 271 |
+
#else
|
| 272 |
+
return __double2half(val);
|
| 273 |
+
#endif
|
| 274 |
+
}
|
| 275 |
+
template <>
|
| 276 |
+
DS_D_INLINE __half to(float val)
|
| 277 |
+
{
|
| 278 |
+
return __float2half(val);
|
| 279 |
+
}
|
| 280 |
+
template <>
|
| 281 |
+
DS_D_INLINE __half to(int64_t val)
|
| 282 |
+
{
|
| 283 |
+
return __ll2half_rn(val);
|
| 284 |
+
}
|
| 285 |
+
template <>
|
| 286 |
+
DS_D_INLINE __half to(int32_t val)
|
| 287 |
+
{
|
| 288 |
+
return __int2half_rn(val);
|
| 289 |
+
}
|
| 290 |
+
template <>
|
| 291 |
+
DS_D_INLINE __half to(int16_t val)
|
| 292 |
+
{
|
| 293 |
+
return __short2half_rn(val);
|
| 294 |
+
}
|
| 295 |
+
template <>
|
| 296 |
+
DS_D_INLINE __half to(int8_t val)
|
| 297 |
+
{
|
| 298 |
+
return __int2half_rn(val);
|
| 299 |
+
}
|
| 300 |
+
template <>
|
| 301 |
+
DS_D_INLINE __half to(uint64_t val)
|
| 302 |
+
{
|
| 303 |
+
return __ull2half_rn(val);
|
| 304 |
+
}
|
| 305 |
+
template <>
|
| 306 |
+
DS_D_INLINE __half to(uint32_t val)
|
| 307 |
+
{
|
| 308 |
+
return __uint2half_rn(val);
|
| 309 |
+
}
|
| 310 |
+
template <>
|
| 311 |
+
DS_D_INLINE __half to(uint16_t val)
|
| 312 |
+
{
|
| 313 |
+
return __ushort2half_rn(val);
|
| 314 |
+
}
|
| 315 |
+
template <>
|
| 316 |
+
DS_D_INLINE __half to(uint8_t val)
|
| 317 |
+
{
|
| 318 |
+
return __uint2half_rn(val);
|
| 319 |
+
}
|
| 320 |
+
|
| 321 |
+
#ifdef BF16_AVAILABLE
|
| 322 |
+
// No direct conversion
|
| 323 |
+
template <>
|
| 324 |
+
DS_D_INLINE __half to(__nv_bfloat16 val)
|
| 325 |
+
{
|
| 326 |
+
return to<__half>(to<float>(val));
|
| 327 |
+
}
|
| 328 |
+
#endif
|
| 329 |
+
|
| 330 |
+
/********************* To Half2 Conversions *********************/
|
| 331 |
+
template <>
|
| 332 |
+
DS_D_INLINE __half2 to(float2 val)
|
| 333 |
+
{
|
| 334 |
+
return __float22half2_rn(val);
|
| 335 |
+
}
|
| 336 |
+
template <>
|
| 337 |
+
DS_D_INLINE __half2 to(float val)
|
| 338 |
+
{
|
| 339 |
+
return __float2half2_rn(val);
|
| 340 |
+
}
|
| 341 |
+
|
| 342 |
+
#ifdef BF16_AVAILABLE
|
| 343 |
+
// No direct conversion
|
| 344 |
+
template <>
|
| 345 |
+
DS_D_INLINE __half2 to(__nv_bfloat162 val)
|
| 346 |
+
{
|
| 347 |
+
return to<__half2>(to<float2>(val));
|
| 348 |
+
}
|
| 349 |
+
#endif
|
| 350 |
+
|
| 351 |
+
/********************* To BF16 Conversions *********************/
|
| 352 |
+
#ifdef BF16_AVAILABLE
|
| 353 |
+
template <>
|
| 354 |
+
DS_D_INLINE __nv_bfloat16 to(double val)
|
| 355 |
+
{
|
| 356 |
+
return __double2bfloat16(val);
|
| 357 |
+
}
|
| 358 |
+
template <>
|
| 359 |
+
DS_D_INLINE __nv_bfloat16 to(float val)
|
| 360 |
+
{
|
| 361 |
+
return __float2bfloat16(val);
|
| 362 |
+
}
|
| 363 |
+
template <>
|
| 364 |
+
DS_D_INLINE __nv_bfloat16 to(int64_t val)
|
| 365 |
+
{
|
| 366 |
+
return __ll2bfloat16_rn(val);
|
| 367 |
+
}
|
| 368 |
+
template <>
|
| 369 |
+
DS_D_INLINE __nv_bfloat16 to(int32_t val)
|
| 370 |
+
{
|
| 371 |
+
return __int2bfloat16_rn(val);
|
| 372 |
+
}
|
| 373 |
+
template <>
|
| 374 |
+
DS_D_INLINE __nv_bfloat16 to(int16_t val)
|
| 375 |
+
{
|
| 376 |
+
return __short2bfloat16_rn(val);
|
| 377 |
+
}
|
| 378 |
+
template <>
|
| 379 |
+
DS_D_INLINE __nv_bfloat16 to(int8_t val)
|
| 380 |
+
{
|
| 381 |
+
return __int2bfloat16_rn(val);
|
| 382 |
+
}
|
| 383 |
+
template <>
|
| 384 |
+
DS_D_INLINE __nv_bfloat16 to(uint64_t val)
|
| 385 |
+
{
|
| 386 |
+
return __ull2bfloat16_rn(val);
|
| 387 |
+
}
|
| 388 |
+
template <>
|
| 389 |
+
DS_D_INLINE __nv_bfloat16 to(uint32_t val)
|
| 390 |
+
{
|
| 391 |
+
return __uint2bfloat16_rn(val);
|
| 392 |
+
}
|
| 393 |
+
template <>
|
| 394 |
+
DS_D_INLINE __nv_bfloat16 to(uint16_t val)
|
| 395 |
+
{
|
| 396 |
+
return __ushort2bfloat16_rn(val);
|
| 397 |
+
}
|
| 398 |
+
template <>
|
| 399 |
+
DS_D_INLINE __nv_bfloat16 to(uint8_t val)
|
| 400 |
+
{
|
| 401 |
+
return __uint2bfloat16_rn(val);
|
| 402 |
+
}
|
| 403 |
+
#endif
|
| 404 |
+
|
| 405 |
+
/********************* To BF162 Conversions *********************/
|
| 406 |
+
#ifdef BF16_AVAILABLE
|
| 407 |
+
template <>
|
| 408 |
+
DS_D_INLINE __nv_bfloat162 to(float2 val)
|
| 409 |
+
{
|
| 410 |
+
return __float22bfloat162_rn(val);
|
| 411 |
+
}
|
| 412 |
+
template <>
|
| 413 |
+
DS_D_INLINE __nv_bfloat162 to(float val)
|
| 414 |
+
{
|
| 415 |
+
return __float2bfloat162_rn(val);
|
| 416 |
+
}
|
| 417 |
+
template <>
|
| 418 |
+
DS_D_INLINE __nv_bfloat162 to(__half2 val)
|
| 419 |
+
{
|
| 420 |
+
return to<__nv_bfloat162>(to<float2>(val));
|
| 421 |
+
}
|
| 422 |
+
#endif
|
| 423 |
+
|
| 424 |
+
/********************* To INT64_T Conversions *********************/
|
| 425 |
+
template <>
|
| 426 |
+
DS_D_INLINE int64_t to(double val)
|
| 427 |
+
{
|
| 428 |
+
return __double2ll_rn(val);
|
| 429 |
+
}
|
| 430 |
+
template <>
|
| 431 |
+
DS_D_INLINE int64_t to(float val)
|
| 432 |
+
{
|
| 433 |
+
return __float2ll_rn(val);
|
| 434 |
+
}
|
| 435 |
+
template <>
|
| 436 |
+
DS_D_INLINE int64_t to(__half val)
|
| 437 |
+
{
|
| 438 |
+
return __half2ll_rn(val);
|
| 439 |
+
}
|
| 440 |
+
// No direct support for integer casts at the C++ level and I don't feel they're so important
|
| 441 |
+
// to demand an PTX at this time
|
| 442 |
+
|
| 443 |
+
#ifdef BF16_AVAILABLE
|
| 444 |
+
template <>
|
| 445 |
+
DS_D_INLINE int64_t to(__nv_bfloat16 val)
|
| 446 |
+
{
|
| 447 |
+
return __bfloat162ll_rn(val);
|
| 448 |
+
}
|
| 449 |
+
#endif
|
| 450 |
+
|
| 451 |
+
/********************* To INT32_T Conversions *********************/
|
| 452 |
+
template <>
|
| 453 |
+
DS_D_INLINE int32_t to(double val)
|
| 454 |
+
{
|
| 455 |
+
return __double2int_rn(val);
|
| 456 |
+
}
|
| 457 |
+
template <>
|
| 458 |
+
DS_D_INLINE int32_t to(float val)
|
| 459 |
+
{
|
| 460 |
+
return __float2int_rn(val);
|
| 461 |
+
}
|
| 462 |
+
template <>
|
| 463 |
+
DS_D_INLINE int32_t to(__half val)
|
| 464 |
+
{
|
| 465 |
+
return __half2int_rn(val);
|
| 466 |
+
}
|
| 467 |
+
// No direct support for integer casts at the C++ level and I don't feel they're so important
|
| 468 |
+
// to demand an PTX at this time
|
| 469 |
+
|
| 470 |
+
#ifdef BF16_AVAILABLE
|
| 471 |
+
template <>
|
| 472 |
+
DS_D_INLINE int32_t to(__nv_bfloat16 val)
|
| 473 |
+
{
|
| 474 |
+
return __bfloat162int_rn(val);
|
| 475 |
+
}
|
| 476 |
+
#endif
|
| 477 |
+
|
| 478 |
+
/********************* To INT16_T Conversions *********************/
|
| 479 |
+
template <>
|
| 480 |
+
DS_D_INLINE int16_t to(double val)
|
| 481 |
+
{
|
| 482 |
+
return __double2int_rn(val);
|
| 483 |
+
}
|
| 484 |
+
template <>
|
| 485 |
+
DS_D_INLINE int16_t to(float val)
|
| 486 |
+
{
|
| 487 |
+
return __float2int_rn(val);
|
| 488 |
+
}
|
| 489 |
+
template <>
|
| 490 |
+
DS_D_INLINE int16_t to(__half val)
|
| 491 |
+
{
|
| 492 |
+
return __half2int_rn(val);
|
| 493 |
+
}
|
| 494 |
+
// No direct support for integer casts at the C++ level and I don't feel they're so important
|
| 495 |
+
// to demand an PTX at this time
|
| 496 |
+
|
| 497 |
+
#ifdef BF16_AVAILABLE
|
| 498 |
+
template <>
|
| 499 |
+
DS_D_INLINE int16_t to(__nv_bfloat16 val)
|
| 500 |
+
{
|
| 501 |
+
return __bfloat162int_rn(val);
|
| 502 |
+
}
|
| 503 |
+
#endif
|
| 504 |
+
|
| 505 |
+
/********************* To INT8_T Conversions *********************/
|
| 506 |
+
template <>
|
| 507 |
+
DS_D_INLINE int8_t to(double val)
|
| 508 |
+
{
|
| 509 |
+
return __double2int_rn(val);
|
| 510 |
+
}
|
| 511 |
+
template <>
|
| 512 |
+
DS_D_INLINE int8_t to(float val)
|
| 513 |
+
{
|
| 514 |
+
return __float2int_rn(val);
|
| 515 |
+
}
|
| 516 |
+
template <>
|
| 517 |
+
DS_D_INLINE int8_t to(__half val)
|
| 518 |
+
{
|
| 519 |
+
return __half2int_rn(val);
|
| 520 |
+
}
|
| 521 |
+
// No direct support for integer casts at the C++ level and I don't feel they're so important
|
| 522 |
+
// to demand an PTX at this time
|
| 523 |
+
|
| 524 |
+
#ifdef BF16_AVAILABLE
|
| 525 |
+
template <>
|
| 526 |
+
DS_D_INLINE int8_t to(__nv_bfloat16 val)
|
| 527 |
+
{
|
| 528 |
+
return __bfloat162int_rn(val);
|
| 529 |
+
}
|
| 530 |
+
#endif
|
| 531 |
+
|
| 532 |
+
/********************* To UINT64_T Conversions *********************/
|
| 533 |
+
template <>
|
| 534 |
+
DS_D_INLINE uint64_t to(double val)
|
| 535 |
+
{
|
| 536 |
+
return __double2ull_rn(val);
|
| 537 |
+
}
|
| 538 |
+
template <>
|
| 539 |
+
DS_D_INLINE uint64_t to(float val)
|
| 540 |
+
{
|
| 541 |
+
return __float2ull_rn(val);
|
| 542 |
+
}
|
| 543 |
+
template <>
|
| 544 |
+
DS_D_INLINE uint64_t to(__half val)
|
| 545 |
+
{
|
| 546 |
+
return __half2ull_rn(val);
|
| 547 |
+
}
|
| 548 |
+
// No direct support for integer casts at the C++ level and I don't feel they're so important
|
| 549 |
+
// to demand an PTX at this time
|
| 550 |
+
|
| 551 |
+
#ifdef BF16_AVAILABLE
|
| 552 |
+
template <>
|
| 553 |
+
DS_D_INLINE uint64_t to(__nv_bfloat16 val)
|
| 554 |
+
{
|
| 555 |
+
return __bfloat162ull_rn(val);
|
| 556 |
+
}
|
| 557 |
+
#endif
|
| 558 |
+
|
| 559 |
+
/********************* To UINT32_T Conversions *********************/
|
| 560 |
+
template <>
|
| 561 |
+
DS_D_INLINE uint32_t to(double val)
|
| 562 |
+
{
|
| 563 |
+
return __double2uint_rn(val);
|
| 564 |
+
}
|
| 565 |
+
template <>
|
| 566 |
+
DS_D_INLINE uint32_t to(float val)
|
| 567 |
+
{
|
| 568 |
+
return __float2uint_rn(val);
|
| 569 |
+
}
|
| 570 |
+
template <>
|
| 571 |
+
DS_D_INLINE uint32_t to(__half val)
|
| 572 |
+
{
|
| 573 |
+
return __half2uint_rn(val);
|
| 574 |
+
}
|
| 575 |
+
// No direct support for integer casts at the C++ level and I don't feel they're so important
|
| 576 |
+
// to demand an PTX at this time
|
| 577 |
+
|
| 578 |
+
#ifdef BF16_AVAILABLE
|
| 579 |
+
template <>
|
| 580 |
+
DS_D_INLINE uint32_t to(__nv_bfloat16 val)
|
| 581 |
+
{
|
| 582 |
+
return __bfloat162uint_rn(val);
|
| 583 |
+
}
|
| 584 |
+
#endif
|
| 585 |
+
|
| 586 |
+
/********************* To UINT16_T Conversions *********************/
|
| 587 |
+
template <>
|
| 588 |
+
DS_D_INLINE uint16_t to(double val)
|
| 589 |
+
{
|
| 590 |
+
return __double2uint_rn(val);
|
| 591 |
+
}
|
| 592 |
+
template <>
|
| 593 |
+
DS_D_INLINE uint16_t to(float val)
|
| 594 |
+
{
|
| 595 |
+
return __float2uint_rn(val);
|
| 596 |
+
}
|
| 597 |
+
template <>
|
| 598 |
+
DS_D_INLINE uint16_t to(__half val)
|
| 599 |
+
{
|
| 600 |
+
return __half2uint_rn(val);
|
| 601 |
+
}
|
| 602 |
+
// No direct support for integer casts at the C++ level and I don't feel they're so important
|
| 603 |
+
// to demand an PTX at this time
|
| 604 |
+
|
| 605 |
+
#ifdef BF16_AVAILABLE
|
| 606 |
+
template <>
|
| 607 |
+
DS_D_INLINE uint16_t to(__nv_bfloat16 val)
|
| 608 |
+
{
|
| 609 |
+
return __bfloat162uint_rn(val);
|
| 610 |
+
}
|
| 611 |
+
#endif
|
| 612 |
+
|
| 613 |
+
/********************* To UINT8_T Conversions *********************/
|
| 614 |
+
template <>
|
| 615 |
+
DS_D_INLINE uint8_t to(double val)
|
| 616 |
+
{
|
| 617 |
+
return __double2uint_rn(val);
|
| 618 |
+
}
|
| 619 |
+
template <>
|
| 620 |
+
DS_D_INLINE uint8_t to(float val)
|
| 621 |
+
{
|
| 622 |
+
return __float2uint_rn(val);
|
| 623 |
+
}
|
| 624 |
+
template <>
|
| 625 |
+
DS_D_INLINE uint8_t to(__half val)
|
| 626 |
+
{
|
| 627 |
+
return __half2uint_rn(val);
|
| 628 |
+
}
|
| 629 |
+
// No direct support for integer casts at the C++ level and I don't feel they're so important
|
| 630 |
+
// to demand an PTX at this time
|
| 631 |
+
|
| 632 |
+
#ifdef BF16_AVAILABLE
|
| 633 |
+
template <>
|
| 634 |
+
DS_D_INLINE uint8_t to(__nv_bfloat16 val)
|
| 635 |
+
{
|
| 636 |
+
return __bfloat162uint_rn(val);
|
| 637 |
+
}
|
| 638 |
+
#endif
|
| 639 |
+
|
| 640 |
+
} // namespace conversion
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/includes/ds_kernel_utils.h
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
/*
|
| 7 |
+
Centralized header file for preprocessor macros and constants
|
| 8 |
+
used throughout the codebase.
|
| 9 |
+
*/
|
| 10 |
+
|
| 11 |
+
#pragma once
|
| 12 |
+
|
| 13 |
+
#include <cuda.h>
|
| 14 |
+
#include <cuda_fp16.h>
|
| 15 |
+
|
| 16 |
+
#ifdef BF16_AVAILABLE
|
| 17 |
+
#include <cuda_bf16.h>
|
| 18 |
+
#endif
|
| 19 |
+
|
| 20 |
+
#define DS_HD_INLINE __host__ __device__ __forceinline__
|
| 21 |
+
#define DS_D_INLINE __device__ __forceinline__
|
| 22 |
+
|
| 23 |
+
#ifdef __HIP_PLATFORM_AMD__
|
| 24 |
+
|
| 25 |
+
// constexpr variant of warpSize for templating
|
| 26 |
+
constexpr int hw_warp_size = 64;
|
| 27 |
+
#define HALF_PRECISION_AVAILABLE = 1
|
| 28 |
+
#include <hip/hip_cooperative_groups.h>
|
| 29 |
+
#include <hip/hip_fp16.h>
|
| 30 |
+
|
| 31 |
+
#else // !__HIP_PLATFORM_AMD__
|
| 32 |
+
|
| 33 |
+
// constexpr variant of warpSize for templating
|
| 34 |
+
constexpr int hw_warp_size = 32;
|
| 35 |
+
|
| 36 |
+
#if __CUDA_ARCH__ >= 530
|
| 37 |
+
#define HALF_PRECISION_AVAILABLE = 1
|
| 38 |
+
#define PTX_AVAILABLE
|
| 39 |
+
#endif // __CUDA_ARCH__ >= 530
|
| 40 |
+
|
| 41 |
+
#if __CUDA_ARCH__ >= 800
|
| 42 |
+
#define ASYNC_COPY_AVAILABLE
|
| 43 |
+
#endif // __CUDA_ARCH__ >= 800
|
| 44 |
+
|
| 45 |
+
#include <cooperative_groups.h>
|
| 46 |
+
#include <cuda_fp16.h>
|
| 47 |
+
|
| 48 |
+
#endif //__HIP_PLATFORM_AMD__
|
| 49 |
+
|
| 50 |
+
inline int next_pow2(const int val)
|
| 51 |
+
{
|
| 52 |
+
int rounded_val = val - 1;
|
| 53 |
+
rounded_val |= rounded_val >> 1;
|
| 54 |
+
rounded_val |= rounded_val >> 2;
|
| 55 |
+
rounded_val |= rounded_val >> 4;
|
| 56 |
+
rounded_val |= rounded_val >> 8;
|
| 57 |
+
return rounded_val + 1;
|
| 58 |
+
}
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/includes/memory_access_utils.h
ADDED
|
@@ -0,0 +1,1115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#pragma once
|
| 7 |
+
|
| 8 |
+
#include <cuda.h>
|
| 9 |
+
#include "ds_kernel_utils.h"
|
| 10 |
+
|
| 11 |
+
/////////////////////////////// Memory Access Utils ///////////////////////////////
|
| 12 |
+
namespace mem_access {
|
| 13 |
+
|
| 14 |
+
enum class LoadPolicy {
|
| 15 |
+
CacheAll, // Cache at all levels
|
| 16 |
+
CacheGlobal, // Cache at L2 only
|
| 17 |
+
CacheStreaming // Cache with evict first policy
|
| 18 |
+
};
|
| 19 |
+
|
| 20 |
+
enum class StorePolicy {
|
| 21 |
+
Writeback, // Cache in L1, write-back on eviction
|
| 22 |
+
CacheGlobal, // Bypass L1, write-back on eviction
|
| 23 |
+
CacheStreaming // Allocate cache line with evict first policy
|
| 24 |
+
};
|
| 25 |
+
|
| 26 |
+
template <int AccessSize, LoadPolicy policy = LoadPolicy::CacheAll>
|
| 27 |
+
__device__ __forceinline__ void load_global(void* dst, const void* src);
|
| 28 |
+
|
| 29 |
+
template <int AccessSize, LoadPolicy policy = LoadPolicy::CacheAll>
|
| 30 |
+
__device__ __forceinline__ void load_global(void* dst, const void* src, bool do_access);
|
| 31 |
+
|
| 32 |
+
// Shared accesses have no cache policy
|
| 33 |
+
template <int AccessSize>
|
| 34 |
+
__device__ __forceinline__ void load_shared(void* dst, const void* src);
|
| 35 |
+
|
| 36 |
+
template <int AccessSize>
|
| 37 |
+
__device__ __forceinline__ void load_shared(void* dst, const void* src, bool do_access);
|
| 38 |
+
|
| 39 |
+
template <int AccessSize, StorePolicy policy = StorePolicy::Writeback>
|
| 40 |
+
__device__ __forceinline__ void store_global(void* dst, const void* src);
|
| 41 |
+
|
| 42 |
+
// Shared accesses have no cache policy
|
| 43 |
+
template <int AccessSize>
|
| 44 |
+
__device__ __forceinline__ void store_shared(void* dst, const void* src);
|
| 45 |
+
|
| 46 |
+
#ifdef ASYNC_COPY_AVAILABLE
|
| 47 |
+
template <int AccessSize>
|
| 48 |
+
__device__ __forceinline__ void memcpy_async(void* shr, const void* gbl);
|
| 49 |
+
|
| 50 |
+
template <int AccessSize>
|
| 51 |
+
__device__ __forceinline__ void memcpy_async_nop(void* shr, const void* gbl, bool predicate);
|
| 52 |
+
|
| 53 |
+
template <int AccessSize>
|
| 54 |
+
__device__ __forceinline__ void memcpy_async_zero(void* shr, const void* gbl, bool predicate);
|
| 55 |
+
|
| 56 |
+
__device__ __forceinline__ void memcpy_async_fence();
|
| 57 |
+
|
| 58 |
+
template <int stages>
|
| 59 |
+
__device__ __forceinline__ void memcpy_async_wait();
|
| 60 |
+
|
| 61 |
+
template <int stages>
|
| 62 |
+
__device__ __forceinline__ void tail_complete_wait(int remaining_stages);
|
| 63 |
+
#endif
|
| 64 |
+
|
| 65 |
+
// Util for tracking pipeline buffers
|
| 66 |
+
// TODO: Evaluate whether this should also be guarded by ASYNC_COPY_AVAILABLE
|
| 67 |
+
template <int max>
|
| 68 |
+
class BufferTracker {
|
| 69 |
+
public:
|
| 70 |
+
int current_state;
|
| 71 |
+
|
| 72 |
+
__device__ __forceinline__ BufferTracker() : current_state(0) {}
|
| 73 |
+
|
| 74 |
+
__device__ __forceinline__ int get()
|
| 75 |
+
{
|
| 76 |
+
int return_val = current_state++;
|
| 77 |
+
current_state = (current_state == max ? 0 : current_state);
|
| 78 |
+
return return_val;
|
| 79 |
+
}
|
| 80 |
+
};
|
| 81 |
+
|
| 82 |
+
__device__ __forceinline__ uint32_t lane_id()
|
| 83 |
+
{
|
| 84 |
+
#ifdef PTX_AVAILABLE
|
| 85 |
+
unsigned int lane_id;
|
| 86 |
+
asm volatile("mov.u32 %0, %%laneid;" : "=r"(lane_id));
|
| 87 |
+
return lane_id;
|
| 88 |
+
#else
|
| 89 |
+
return threadIdx.x & (warpSize - 1); // Portable
|
| 90 |
+
#endif
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
/////////// Load Global ///////////
|
| 94 |
+
template <>
|
| 95 |
+
__device__ __forceinline__ void load_global<16>(void* dst, const void* src)
|
| 96 |
+
{
|
| 97 |
+
uint4* data = reinterpret_cast<uint4*>(dst);
|
| 98 |
+
#ifdef PTX_AVAILABLE
|
| 99 |
+
asm volatile("ld.global.ca.v4.u32 {%0, %1, %2, %3}, [%4];\n"
|
| 100 |
+
: "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w)
|
| 101 |
+
: "l"(src));
|
| 102 |
+
#else
|
| 103 |
+
const uint4* src_cast = reinterpret_cast<const uint4*>(src);
|
| 104 |
+
data[0] = src_cast[0];
|
| 105 |
+
#endif
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
template <>
|
| 109 |
+
__device__ __forceinline__ void load_global<16>(void* dst, const void* src, bool do_access)
|
| 110 |
+
{
|
| 111 |
+
uint4* data = reinterpret_cast<uint4*>(dst);
|
| 112 |
+
#ifdef PTX_AVAILABLE
|
| 113 |
+
asm volatile(
|
| 114 |
+
"{\n"
|
| 115 |
+
"\t.reg .pred p;\n"
|
| 116 |
+
"\tsetp.ne.b32 p, %5, 0;\n"
|
| 117 |
+
"\tmov.b32 %0, 0;\n"
|
| 118 |
+
"\tmov.b32 %1, 0;\n"
|
| 119 |
+
"\tmov.b32 %2, 0;\n"
|
| 120 |
+
"\tmov.b32 %3, 0;\n"
|
| 121 |
+
"\t@p ld.global.v4.u32 {%0, %1, %2, %3}, [%4];\n"
|
| 122 |
+
"}\n"
|
| 123 |
+
: "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w)
|
| 124 |
+
: "l"(src), "r"((int)do_access));
|
| 125 |
+
#else
|
| 126 |
+
const uint4* src_cast = reinterpret_cast<const uint4*>(src);
|
| 127 |
+
if (do_access) {
|
| 128 |
+
data[0] = src_cast[0];
|
| 129 |
+
} else {
|
| 130 |
+
data[0].x = 0;
|
| 131 |
+
data[0].y = 0;
|
| 132 |
+
data[0].z = 0;
|
| 133 |
+
data[0].w = 0;
|
| 134 |
+
}
|
| 135 |
+
#endif
|
| 136 |
+
}
|
| 137 |
+
|
| 138 |
+
template <>
|
| 139 |
+
__device__ __forceinline__ void load_global<16, LoadPolicy::CacheGlobal>(void* dst, const void* src)
|
| 140 |
+
{
|
| 141 |
+
uint4* data = reinterpret_cast<uint4*>(dst);
|
| 142 |
+
#ifdef PTX_AVAILABLE
|
| 143 |
+
asm volatile("ld.global.cg.v4.u32 {%0, %1, %2, %3}, [%4];\n"
|
| 144 |
+
: "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w)
|
| 145 |
+
: "l"(src));
|
| 146 |
+
#else
|
| 147 |
+
const uint4* src_cast = reinterpret_cast<const uint4*>(src);
|
| 148 |
+
data[0] = src_cast[0];
|
| 149 |
+
#endif
|
| 150 |
+
}
|
| 151 |
+
|
| 152 |
+
template <>
|
| 153 |
+
__device__ __forceinline__ void load_global<16, LoadPolicy::CacheGlobal>(void* dst,
|
| 154 |
+
const void* src,
|
| 155 |
+
bool do_access)
|
| 156 |
+
{
|
| 157 |
+
uint4* data = reinterpret_cast<uint4*>(dst);
|
| 158 |
+
#ifdef PTX_AVAILABLE
|
| 159 |
+
asm volatile(
|
| 160 |
+
"{\n"
|
| 161 |
+
"\t.reg .pred p;\n"
|
| 162 |
+
"\tsetp.ne.b32 p, %5, 0;\n"
|
| 163 |
+
"\tmov.b32 %0, 0;\n"
|
| 164 |
+
"\tmov.b32 %1, 0;\n"
|
| 165 |
+
"\tmov.b32 %2, 0;\n"
|
| 166 |
+
"\tmov.b32 %3, 0;\n"
|
| 167 |
+
"\t@p ld.global.cg.v4.u32 {%0, %1, %2, %3}, [%4];\n"
|
| 168 |
+
"}\n"
|
| 169 |
+
: "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w)
|
| 170 |
+
: "l"(src), "r"((int)do_access));
|
| 171 |
+
#else
|
| 172 |
+
const uint4* src_cast = reinterpret_cast<const uint4*>(src);
|
| 173 |
+
if (do_access) {
|
| 174 |
+
data[0] = src_cast[0];
|
| 175 |
+
} else {
|
| 176 |
+
data[0].x = 0;
|
| 177 |
+
data[0].y = 0;
|
| 178 |
+
data[0].z = 0;
|
| 179 |
+
data[0].w = 0;
|
| 180 |
+
}
|
| 181 |
+
#endif
|
| 182 |
+
}
|
| 183 |
+
|
| 184 |
+
template <>
|
| 185 |
+
__device__ __forceinline__ void load_global<16, LoadPolicy::CacheStreaming>(void* dst,
|
| 186 |
+
const void* src)
|
| 187 |
+
{
|
| 188 |
+
uint4* data = reinterpret_cast<uint4*>(dst);
|
| 189 |
+
#ifdef PTX_AVAILABLE
|
| 190 |
+
asm volatile("ld.global.cs.v4.u32 {%0, %1, %2, %3}, [%4];\n"
|
| 191 |
+
: "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w)
|
| 192 |
+
: "l"(src));
|
| 193 |
+
#else
|
| 194 |
+
const uint4* src_cast = reinterpret_cast<const uint4*>(src);
|
| 195 |
+
data[0] = src_cast[0];
|
| 196 |
+
#endif
|
| 197 |
+
}
|
| 198 |
+
|
| 199 |
+
template <>
|
| 200 |
+
__device__ __forceinline__ void load_global<16, LoadPolicy::CacheStreaming>(void* dst,
|
| 201 |
+
const void* src,
|
| 202 |
+
bool do_access)
|
| 203 |
+
{
|
| 204 |
+
uint4* data = reinterpret_cast<uint4*>(dst);
|
| 205 |
+
#ifdef PTX_AVAILABLE
|
| 206 |
+
asm volatile(
|
| 207 |
+
"{\n"
|
| 208 |
+
"\t.reg .pred p;\n"
|
| 209 |
+
"\tsetp.ne.b32 p, %5, 0;\n"
|
| 210 |
+
"\tmov.b32 %0, 0;\n"
|
| 211 |
+
"\tmov.b32 %1, 0;\n"
|
| 212 |
+
"\tmov.b32 %2, 0;\n"
|
| 213 |
+
"\tmov.b32 %3, 0;\n"
|
| 214 |
+
"\t@p ld.global.cg.v4.u32 {%0, %1, %2, %3}, [%4];\n"
|
| 215 |
+
"}\n"
|
| 216 |
+
: "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w)
|
| 217 |
+
: "l"(src), "r"((int)do_access));
|
| 218 |
+
#else
|
| 219 |
+
const uint4* src_cast = reinterpret_cast<const uint4*>(src);
|
| 220 |
+
if (do_access) {
|
| 221 |
+
data[0] = src_cast[0];
|
| 222 |
+
} else {
|
| 223 |
+
data[0].x = 0;
|
| 224 |
+
data[0].y = 0;
|
| 225 |
+
data[0].z = 0;
|
| 226 |
+
data[0].w = 0;
|
| 227 |
+
}
|
| 228 |
+
#endif
|
| 229 |
+
}
|
| 230 |
+
|
| 231 |
+
template <>
|
| 232 |
+
__device__ __forceinline__ void load_global<8>(void* dst, const void* src)
|
| 233 |
+
{
|
| 234 |
+
uint2* data = reinterpret_cast<uint2*>(dst);
|
| 235 |
+
#ifdef PTX_AVAILABLE
|
| 236 |
+
asm volatile("ld.global.ca.v2.u32 {%0, %1}, [%2];\n"
|
| 237 |
+
: "=r"(data[0].x), "=r"(data[0].y)
|
| 238 |
+
: "l"(src));
|
| 239 |
+
#else
|
| 240 |
+
const uint2* src_cast = reinterpret_cast<const uint2*>(src);
|
| 241 |
+
data[0] = src_cast[0];
|
| 242 |
+
#endif
|
| 243 |
+
}
|
| 244 |
+
|
| 245 |
+
template <>
|
| 246 |
+
__device__ __forceinline__ void load_global<8>(void* dst, const void* src, bool do_access)
|
| 247 |
+
{
|
| 248 |
+
uint2* data = reinterpret_cast<uint2*>(dst);
|
| 249 |
+
#ifdef PTX_AVAILABLE
|
| 250 |
+
asm volatile(
|
| 251 |
+
"{\n"
|
| 252 |
+
"\t.reg .pred p;\n"
|
| 253 |
+
"\tsetp.ne.b32 p, %3, 0;\n"
|
| 254 |
+
"\tmov.b32 %0, 0;\n"
|
| 255 |
+
"\tmov.b32 %1, 0;\n"
|
| 256 |
+
"\t@p ld.global.v2.u32 {%0, %1}, [%2];\n"
|
| 257 |
+
"}\n"
|
| 258 |
+
: "=r"(data[0].x), "=r"(data[0].y)
|
| 259 |
+
: "l"(src), "r"((int)do_access));
|
| 260 |
+
#else
|
| 261 |
+
const uint2* src_cast = reinterpret_cast<const uint2*>(src);
|
| 262 |
+
if (do_access) {
|
| 263 |
+
data[0] = src_cast[0];
|
| 264 |
+
} else {
|
| 265 |
+
data[0].x = 0;
|
| 266 |
+
data[0].y = 0;
|
| 267 |
+
}
|
| 268 |
+
#endif
|
| 269 |
+
}
|
| 270 |
+
|
| 271 |
+
template <>
|
| 272 |
+
__device__ __forceinline__ void load_global<8, LoadPolicy::CacheGlobal>(void* dst, const void* src)
|
| 273 |
+
{
|
| 274 |
+
uint2* data = reinterpret_cast<uint2*>(dst);
|
| 275 |
+
#ifdef PTX_AVAILABLE
|
| 276 |
+
asm volatile("ld.global.cg.v2.u32 {%0, %1}, [%2];\n"
|
| 277 |
+
: "=r"(data[0].x), "=r"(data[0].y)
|
| 278 |
+
: "l"(src));
|
| 279 |
+
#else
|
| 280 |
+
const uint2* src_cast = reinterpret_cast<const uint2*>(src);
|
| 281 |
+
data[0] = src_cast[0];
|
| 282 |
+
#endif
|
| 283 |
+
}
|
| 284 |
+
|
| 285 |
+
template <>
|
| 286 |
+
__device__ __forceinline__ void load_global<8, LoadPolicy::CacheGlobal>(void* dst,
|
| 287 |
+
const void* src,
|
| 288 |
+
bool do_access)
|
| 289 |
+
{
|
| 290 |
+
uint2* data = reinterpret_cast<uint2*>(dst);
|
| 291 |
+
#ifdef PTX_AVAILABLE
|
| 292 |
+
asm volatile(
|
| 293 |
+
"{\n"
|
| 294 |
+
"\t.reg .pred p;\n"
|
| 295 |
+
"\tsetp.ne.b32 p, %3, 0;\n"
|
| 296 |
+
"\tmov.b32 %0, 0;\n"
|
| 297 |
+
"\tmov.b32 %1, 0;\n"
|
| 298 |
+
"\t@p ld.global.cg.v2.u32 {%0, %1}, [%2];\n"
|
| 299 |
+
"}\n"
|
| 300 |
+
: "=r"(data[0].x), "=r"(data[0].y)
|
| 301 |
+
: "l"(src), "r"((int)do_access));
|
| 302 |
+
#else
|
| 303 |
+
const uint2* src_cast = reinterpret_cast<const uint2*>(src);
|
| 304 |
+
if (do_access) {
|
| 305 |
+
data[0] = src_cast[0];
|
| 306 |
+
} else {
|
| 307 |
+
data[0].x = 0;
|
| 308 |
+
data[0].y = 0;
|
| 309 |
+
}
|
| 310 |
+
#endif
|
| 311 |
+
}
|
| 312 |
+
|
| 313 |
+
template <>
|
| 314 |
+
__device__ __forceinline__ void load_global<8, LoadPolicy::CacheStreaming>(void* dst,
|
| 315 |
+
const void* src)
|
| 316 |
+
{
|
| 317 |
+
uint2* data = reinterpret_cast<uint2*>(dst);
|
| 318 |
+
#ifdef PTX_AVAILABLE
|
| 319 |
+
asm volatile("ld.global.cs.v2.u32 {%0, %1}, [%2];\n"
|
| 320 |
+
: "=r"(data[0].x), "=r"(data[0].y)
|
| 321 |
+
: "l"(src));
|
| 322 |
+
#else
|
| 323 |
+
const uint2* src_cast = reinterpret_cast<const uint2*>(src);
|
| 324 |
+
data[0] = src_cast[0];
|
| 325 |
+
#endif
|
| 326 |
+
}
|
| 327 |
+
|
| 328 |
+
template <>
|
| 329 |
+
__device__ __forceinline__ void load_global<8, LoadPolicy::CacheStreaming>(void* dst,
|
| 330 |
+
const void* src,
|
| 331 |
+
bool do_access)
|
| 332 |
+
{
|
| 333 |
+
uint2* data = reinterpret_cast<uint2*>(dst);
|
| 334 |
+
#ifdef PTX_AVAILABLE
|
| 335 |
+
asm volatile(
|
| 336 |
+
"{\n"
|
| 337 |
+
"\t.reg .pred p;\n"
|
| 338 |
+
"\tsetp.ne.b32 p, %3, 0;\n"
|
| 339 |
+
"\tmov.b32 %0, 0;\n"
|
| 340 |
+
"\tmov.b32 %1, 0;\n"
|
| 341 |
+
"\t@p ld.global.cs.v2.u32 {%0, %1}, [%2];\n"
|
| 342 |
+
"}\n"
|
| 343 |
+
: "=r"(data[0].x), "=r"(data[0].y)
|
| 344 |
+
: "l"(src), "r"((int)do_access));
|
| 345 |
+
#else
|
| 346 |
+
const uint2* src_cast = reinterpret_cast<const uint2*>(src);
|
| 347 |
+
if (do_access) {
|
| 348 |
+
data[0] = src_cast[0];
|
| 349 |
+
} else {
|
| 350 |
+
data[0].x = 0;
|
| 351 |
+
data[0].y = 0;
|
| 352 |
+
}
|
| 353 |
+
#endif
|
| 354 |
+
}
|
| 355 |
+
|
| 356 |
+
template <>
|
| 357 |
+
__device__ __forceinline__ void load_global<4>(void* dst, const void* src)
|
| 358 |
+
{
|
| 359 |
+
int32_t* data = reinterpret_cast<int32_t*>(dst);
|
| 360 |
+
#ifdef PTX_AVAILABLE
|
| 361 |
+
asm volatile("ld.global.ca.u32 {%0}, [%1];\n" : "=r"(*data) : "l"(src));
|
| 362 |
+
#else
|
| 363 |
+
const int32_t* src_cast = reinterpret_cast<const int32_t*>(src);
|
| 364 |
+
data[0] = src_cast[0];
|
| 365 |
+
#endif
|
| 366 |
+
}
|
| 367 |
+
|
| 368 |
+
template <>
|
| 369 |
+
__device__ __forceinline__ void load_global<4>(void* dst, const void* src, bool do_access)
|
| 370 |
+
{
|
| 371 |
+
int32_t* data = reinterpret_cast<int32_t*>(dst);
|
| 372 |
+
#ifdef PTX_AVAILABLE
|
| 373 |
+
asm volatile(
|
| 374 |
+
"{\n"
|
| 375 |
+
"\t.reg .pred p;\n"
|
| 376 |
+
"\tsetp.ne.b32 p, %2, 0;\n"
|
| 377 |
+
"\tmov.b32 %0, 0;\n"
|
| 378 |
+
"\t@p ld.global.u32 {%0}, [%1];\n"
|
| 379 |
+
"}\n"
|
| 380 |
+
: "=r"(data[0])
|
| 381 |
+
: "l"(src), "r"((int)do_access));
|
| 382 |
+
#else
|
| 383 |
+
const int32_t* src_cast = reinterpret_cast<const int32_t*>(src);
|
| 384 |
+
if (do_access) {
|
| 385 |
+
data[0] = src_cast[0];
|
| 386 |
+
} else {
|
| 387 |
+
data[0] = 0;
|
| 388 |
+
}
|
| 389 |
+
#endif
|
| 390 |
+
}
|
| 391 |
+
|
| 392 |
+
template <>
|
| 393 |
+
__device__ __forceinline__ void load_global<4, LoadPolicy::CacheGlobal>(void* dst, const void* src)
|
| 394 |
+
{
|
| 395 |
+
int32_t* data = reinterpret_cast<int32_t*>(dst);
|
| 396 |
+
#ifdef PTX_AVAILABLE
|
| 397 |
+
asm volatile("ld.global.cg.u32 {%0}, [%1];\n" : "=r"(*data) : "l"(src));
|
| 398 |
+
#else
|
| 399 |
+
const int32_t* src_cast = reinterpret_cast<const int32_t*>(src);
|
| 400 |
+
data[0] = src_cast[0];
|
| 401 |
+
#endif
|
| 402 |
+
}
|
| 403 |
+
|
| 404 |
+
template <>
|
| 405 |
+
__device__ __forceinline__ void load_global<4, LoadPolicy::CacheGlobal>(void* dst,
|
| 406 |
+
const void* src,
|
| 407 |
+
bool do_access)
|
| 408 |
+
{
|
| 409 |
+
int32_t* data = reinterpret_cast<int32_t*>(dst);
|
| 410 |
+
#ifdef PTX_AVAILABLE
|
| 411 |
+
asm volatile(
|
| 412 |
+
"{\n"
|
| 413 |
+
"\t.reg .pred p;\n"
|
| 414 |
+
"\tsetp.ne.b32 p, %2, 0;\n"
|
| 415 |
+
"\tmov.b32 %0, 0;\n"
|
| 416 |
+
"\t@p ld.global.cg.u32 {%0}, [%1];\n"
|
| 417 |
+
"}\n"
|
| 418 |
+
: "=r"(data[0])
|
| 419 |
+
: "l"(src), "r"((int)do_access));
|
| 420 |
+
#else
|
| 421 |
+
const int32_t* src_cast = reinterpret_cast<const int32_t*>(src);
|
| 422 |
+
if (do_access) {
|
| 423 |
+
data[0] = src_cast[0];
|
| 424 |
+
} else {
|
| 425 |
+
data[0] = 0;
|
| 426 |
+
}
|
| 427 |
+
#endif
|
| 428 |
+
}
|
| 429 |
+
|
| 430 |
+
template <>
|
| 431 |
+
__device__ __forceinline__ void load_global<4, LoadPolicy::CacheStreaming>(void* dst,
|
| 432 |
+
const void* src)
|
| 433 |
+
{
|
| 434 |
+
int32_t* data = reinterpret_cast<int32_t*>(dst);
|
| 435 |
+
#ifdef PTX_AVAILABLE
|
| 436 |
+
asm volatile("ld.global.cs.u32 {%0}, [%1];\n" : "=r"(*data) : "l"(src));
|
| 437 |
+
#else
|
| 438 |
+
const int32_t* src_cast = reinterpret_cast<const int32_t*>(src);
|
| 439 |
+
data[0] = src_cast[0];
|
| 440 |
+
#endif
|
| 441 |
+
}
|
| 442 |
+
|
| 443 |
+
template <>
|
| 444 |
+
__device__ __forceinline__ void load_global<4, LoadPolicy::CacheStreaming>(void* dst,
|
| 445 |
+
const void* src,
|
| 446 |
+
bool do_access)
|
| 447 |
+
{
|
| 448 |
+
int32_t* data = reinterpret_cast<int32_t*>(dst);
|
| 449 |
+
#ifdef PTX_AVAILABLE
|
| 450 |
+
asm volatile(
|
| 451 |
+
"{\n"
|
| 452 |
+
"\t.reg .pred p;\n"
|
| 453 |
+
"\tsetp.ne.b32 p, %2, 0;\n"
|
| 454 |
+
"\tmov.b32 %0, 0;\n"
|
| 455 |
+
"\t@p ld.global.cs.u32 {%0}, [%1];\n"
|
| 456 |
+
"}\n"
|
| 457 |
+
: "=r"(data[0])
|
| 458 |
+
: "l"(src), "r"((int)do_access));
|
| 459 |
+
#else
|
| 460 |
+
const int32_t* src_cast = reinterpret_cast<const int32_t*>(src);
|
| 461 |
+
if (do_access) {
|
| 462 |
+
data[0] = src_cast[0];
|
| 463 |
+
} else {
|
| 464 |
+
data[0] = 0;
|
| 465 |
+
}
|
| 466 |
+
#endif
|
| 467 |
+
}
|
| 468 |
+
|
| 469 |
+
template <>
|
| 470 |
+
__device__ __forceinline__ void load_global<2>(void* dst, const void* src)
|
| 471 |
+
{
|
| 472 |
+
int16_t* data = reinterpret_cast<int16_t*>(dst);
|
| 473 |
+
#ifdef PTX_AVAILABLE
|
| 474 |
+
asm volatile("ld.global.ca.u16 {%0}, [%1];\n" : "=h"(*data) : "l"(src));
|
| 475 |
+
#else
|
| 476 |
+
const int16_t* src_cast = reinterpret_cast<const int16_t*>(src);
|
| 477 |
+
data[0] = src_cast[0];
|
| 478 |
+
#endif
|
| 479 |
+
}
|
| 480 |
+
|
| 481 |
+
template <>
|
| 482 |
+
__device__ __forceinline__ void load_global<2>(void* dst, const void* src, bool do_access)
|
| 483 |
+
{
|
| 484 |
+
int16_t* data = reinterpret_cast<int16_t*>(dst);
|
| 485 |
+
#ifdef PTX_AVAILABLE
|
| 486 |
+
asm volatile(
|
| 487 |
+
"{\n"
|
| 488 |
+
"\t.reg .pred p;\n"
|
| 489 |
+
"\tsetp.ne.b32 p, %2, 0;\n"
|
| 490 |
+
"\tmov.u16 %0, 0;\n"
|
| 491 |
+
"\t@p ld.global.u16 {%0}, [%1];\n"
|
| 492 |
+
"}\n"
|
| 493 |
+
: "=h"(*data)
|
| 494 |
+
: "l"(src), "r"((int)do_access));
|
| 495 |
+
#else
|
| 496 |
+
const int16_t* src_cast = reinterpret_cast<const int16_t*>(src);
|
| 497 |
+
if (do_access) {
|
| 498 |
+
data[0] = src_cast[0];
|
| 499 |
+
} else {
|
| 500 |
+
data[0] = 0;
|
| 501 |
+
}
|
| 502 |
+
#endif
|
| 503 |
+
}
|
| 504 |
+
|
| 505 |
+
template <>
|
| 506 |
+
__device__ __forceinline__ void load_global<2, LoadPolicy::CacheGlobal>(void* dst, const void* src)
|
| 507 |
+
{
|
| 508 |
+
int16_t* data = reinterpret_cast<int16_t*>(dst);
|
| 509 |
+
#ifdef PTX_AVAILABLE
|
| 510 |
+
asm volatile("ld.global.cg.u16 {%0}, [%1];\n" : "=h"(*data) : "l"(src));
|
| 511 |
+
#else
|
| 512 |
+
const int16_t* src_cast = reinterpret_cast<const int16_t*>(src);
|
| 513 |
+
data[0] = src_cast[0];
|
| 514 |
+
#endif
|
| 515 |
+
}
|
| 516 |
+
|
| 517 |
+
template <>
|
| 518 |
+
__device__ __forceinline__ void load_global<2, LoadPolicy::CacheGlobal>(void* dst,
|
| 519 |
+
const void* src,
|
| 520 |
+
bool do_access)
|
| 521 |
+
{
|
| 522 |
+
int16_t* data = reinterpret_cast<int16_t*>(dst);
|
| 523 |
+
#ifdef PTX_AVAILABLE
|
| 524 |
+
asm volatile(
|
| 525 |
+
"{\n"
|
| 526 |
+
"\t.reg .pred p;\n"
|
| 527 |
+
"\tsetp.ne.b32 p, %2, 0;\n"
|
| 528 |
+
"\tmov.u16 %0, 0;\n"
|
| 529 |
+
"\t@p ld.global.cg.u16 {%0}, [%1];\n"
|
| 530 |
+
"}\n"
|
| 531 |
+
: "=h"(*data)
|
| 532 |
+
: "l"(src), "r"((int)do_access));
|
| 533 |
+
#else
|
| 534 |
+
const int16_t* src_cast = reinterpret_cast<const int16_t*>(src);
|
| 535 |
+
if (do_access) {
|
| 536 |
+
data[0] = src_cast[0];
|
| 537 |
+
} else {
|
| 538 |
+
data[0] = 0;
|
| 539 |
+
}
|
| 540 |
+
#endif
|
| 541 |
+
}
|
| 542 |
+
|
| 543 |
+
template <>
|
| 544 |
+
__device__ __forceinline__ void load_global<2, LoadPolicy::CacheStreaming>(void* dst,
|
| 545 |
+
const void* src)
|
| 546 |
+
{
|
| 547 |
+
int16_t* data = reinterpret_cast<int16_t*>(dst);
|
| 548 |
+
#ifdef PTX_AVAILABLE
|
| 549 |
+
asm volatile("ld.global.cs.u16 {%0}, [%1];\n" : "=h"(*data) : "l"(src));
|
| 550 |
+
#else
|
| 551 |
+
const int16_t* src_cast = reinterpret_cast<const int16_t*>(src);
|
| 552 |
+
data[0] = src_cast[0];
|
| 553 |
+
#endif
|
| 554 |
+
}
|
| 555 |
+
|
| 556 |
+
template <>
|
| 557 |
+
__device__ __forceinline__ void load_global<2, LoadPolicy::CacheStreaming>(void* dst,
|
| 558 |
+
const void* src,
|
| 559 |
+
bool do_access)
|
| 560 |
+
{
|
| 561 |
+
int16_t* data = reinterpret_cast<int16_t*>(dst);
|
| 562 |
+
#ifdef PTX_AVAILABLE
|
| 563 |
+
asm volatile(
|
| 564 |
+
"{\n"
|
| 565 |
+
"\t.reg .pred p;\n"
|
| 566 |
+
"\tsetp.ne.b32 p, %2, 0;\n"
|
| 567 |
+
"\tmov.u16 %0, 0;\n"
|
| 568 |
+
"\t@p ld.global.cs.u16 {%0}, [%1];\n"
|
| 569 |
+
"}\n"
|
| 570 |
+
: "=h"(*data)
|
| 571 |
+
: "l"(src), "r"((int)do_access));
|
| 572 |
+
#else
|
| 573 |
+
const int16_t* src_cast = reinterpret_cast<const int16_t*>(src);
|
| 574 |
+
if (do_access) {
|
| 575 |
+
data[0] = src_cast[0];
|
| 576 |
+
} else {
|
| 577 |
+
data[0] = 0;
|
| 578 |
+
}
|
| 579 |
+
#endif
|
| 580 |
+
}
|
| 581 |
+
|
| 582 |
+
/////////// Load Shared ///////////
|
| 583 |
+
namespace internal {
|
| 584 |
+
|
| 585 |
+
#ifdef PTX_AVAILABLE
|
| 586 |
+
__device__ __forceinline__ unsigned convert_to_shared(const void* ptr)
|
| 587 |
+
{
|
| 588 |
+
#if __CUDACC_VER_MAJOR__ >= 11
|
| 589 |
+
// In CUDA 11 we have a builtin intrinsic
|
| 590 |
+
return __cvta_generic_to_shared(ptr);
|
| 591 |
+
#else
|
| 592 |
+
unsigned ret_val;
|
| 593 |
+
asm volatile(
|
| 594 |
+
"{\n"
|
| 595 |
+
"\t.reg .u64 p1;\n"
|
| 596 |
+
"\tcvta.to.shared.u64 p1, %1\n"
|
| 597 |
+
"\tcvt.u32.u64 %0, p1;\n"
|
| 598 |
+
"}\n"
|
| 599 |
+
: "=r"(ret_val)
|
| 600 |
+
: "l"(ptr));
|
| 601 |
+
return ret_val;
|
| 602 |
+
#endif
|
| 603 |
+
}
|
| 604 |
+
#endif
|
| 605 |
+
|
| 606 |
+
} // namespace internal
|
| 607 |
+
|
| 608 |
+
template <>
|
| 609 |
+
__device__ __forceinline__ void load_shared<16>(void* dst, const void* src)
|
| 610 |
+
{
|
| 611 |
+
uint4* data = reinterpret_cast<uint4*>(dst);
|
| 612 |
+
#ifdef PTX_AVAILABLE
|
| 613 |
+
unsigned src_shr = internal::convert_to_shared(src);
|
| 614 |
+
|
| 615 |
+
asm volatile("ld.shared.v4.u32 {%0, %1, %2, %3}, [%4];\n"
|
| 616 |
+
: "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w)
|
| 617 |
+
: "r"(src_shr));
|
| 618 |
+
#else
|
| 619 |
+
const uint4* src_cast = reinterpret_cast<const uint4*>(src);
|
| 620 |
+
data[0] = src_cast[0];
|
| 621 |
+
#endif
|
| 622 |
+
}
|
| 623 |
+
|
| 624 |
+
template <>
|
| 625 |
+
__device__ __forceinline__ void load_shared<16>(void* dst, const void* src, bool do_access)
|
| 626 |
+
{
|
| 627 |
+
uint4* data = reinterpret_cast<uint4*>(dst);
|
| 628 |
+
#ifdef PTX_AVAILABLE
|
| 629 |
+
unsigned src_shr = internal::convert_to_shared(src);
|
| 630 |
+
|
| 631 |
+
asm volatile(
|
| 632 |
+
"{\n"
|
| 633 |
+
"\t.reg .pred p;\n"
|
| 634 |
+
"\tsetp.ne.b32 p, %5, 0;\n"
|
| 635 |
+
"\tmov.b32 %0, 0;\n"
|
| 636 |
+
"\tmov.b32 %1, 0;\n"
|
| 637 |
+
"\tmov.b32 %2, 0;\n"
|
| 638 |
+
"\tmov.b32 %3, 0;\n"
|
| 639 |
+
"\t@p ld.shared.v4.u32 {%0, %1, %2, %3}, [%4];\n"
|
| 640 |
+
"}\n"
|
| 641 |
+
: "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w)
|
| 642 |
+
: "r"(src_shr), "r"((int)do_access));
|
| 643 |
+
#else
|
| 644 |
+
const uint4* src_cast = reinterpret_cast<const uint4*>(src);
|
| 645 |
+
if (do_access) {
|
| 646 |
+
data[0] = src_cast[0];
|
| 647 |
+
} else {
|
| 648 |
+
data[0].x = 0;
|
| 649 |
+
data[0].y = 0;
|
| 650 |
+
data[0].z = 0;
|
| 651 |
+
data[0].w = 0;
|
| 652 |
+
}
|
| 653 |
+
#endif
|
| 654 |
+
}
|
| 655 |
+
|
| 656 |
+
template <>
|
| 657 |
+
__device__ __forceinline__ void load_shared<8>(void* dst, const void* src)
|
| 658 |
+
{
|
| 659 |
+
uint2* data = reinterpret_cast<uint2*>(dst);
|
| 660 |
+
#ifdef PTX_AVAILABLE
|
| 661 |
+
unsigned src_shr = internal::convert_to_shared(src);
|
| 662 |
+
|
| 663 |
+
asm volatile("ld.shared.v2.u32 {%0, %1}, [%2];\n"
|
| 664 |
+
: "=r"(data[0].x), "=r"(data[0].y)
|
| 665 |
+
: "r"(src_shr));
|
| 666 |
+
#else
|
| 667 |
+
const uint2* src_cast = reinterpret_cast<const uint2*>(src);
|
| 668 |
+
data[0] = src_cast[0];
|
| 669 |
+
#endif
|
| 670 |
+
}
|
| 671 |
+
|
| 672 |
+
template <>
|
| 673 |
+
__device__ __forceinline__ void load_shared<8>(void* dst, const void* src, bool do_access)
|
| 674 |
+
{
|
| 675 |
+
uint2* data = reinterpret_cast<uint2*>(dst);
|
| 676 |
+
#ifdef PTX_AVAILABLE
|
| 677 |
+
unsigned src_shr = internal::convert_to_shared(src);
|
| 678 |
+
|
| 679 |
+
asm volatile(
|
| 680 |
+
"{\n"
|
| 681 |
+
"\t.reg .pred p;\n"
|
| 682 |
+
"\tsetp.ne.b32 p, %3, 0;\n"
|
| 683 |
+
"\tmov.b32 %0, 0;\n"
|
| 684 |
+
"\tmov.b32 %1, 0;\n"
|
| 685 |
+
"\t@p ld.shared.v2.u32 {%0, %1}, [%2];\n"
|
| 686 |
+
"}\n"
|
| 687 |
+
: "=r"(data[0].x), "=r"(data[0].y)
|
| 688 |
+
: "r"(src_shr), "r"((int)do_access));
|
| 689 |
+
#else
|
| 690 |
+
const uint2* src_cast = reinterpret_cast<const uint2*>(src);
|
| 691 |
+
if (do_access) {
|
| 692 |
+
data[0] = src_cast[0];
|
| 693 |
+
} else {
|
| 694 |
+
data[0].x = 0;
|
| 695 |
+
data[0].y = 0;
|
| 696 |
+
}
|
| 697 |
+
#endif
|
| 698 |
+
}
|
| 699 |
+
|
| 700 |
+
template <>
|
| 701 |
+
__device__ __forceinline__ void load_shared<4>(void* dst, const void* src)
|
| 702 |
+
{
|
| 703 |
+
int32_t* data = reinterpret_cast<int32_t*>(dst);
|
| 704 |
+
#ifdef PTX_AVAILABLE
|
| 705 |
+
unsigned src_shr = internal::convert_to_shared(src);
|
| 706 |
+
|
| 707 |
+
asm volatile("ld.shared.u32 {%0}, [%1];\n" : "=r"(*data) : "r"(src_shr));
|
| 708 |
+
#else
|
| 709 |
+
const int32_t* src_cast = reinterpret_cast<const int32_t*>(src);
|
| 710 |
+
data[0] = src_cast[0];
|
| 711 |
+
#endif
|
| 712 |
+
}
|
| 713 |
+
|
| 714 |
+
template <>
|
| 715 |
+
__device__ __forceinline__ void load_shared<4>(void* dst, const void* src, bool do_access)
|
| 716 |
+
{
|
| 717 |
+
int32_t* data = reinterpret_cast<int32_t*>(dst);
|
| 718 |
+
#ifdef PTX_AVAILABLE
|
| 719 |
+
unsigned src_shr = internal::convert_to_shared(src);
|
| 720 |
+
|
| 721 |
+
asm volatile(
|
| 722 |
+
"{\n"
|
| 723 |
+
"\t.reg .pred p;\n"
|
| 724 |
+
"\tsetp.ne.b32 p, %2, 0;\n"
|
| 725 |
+
"\tmov.b32 %0, 0;\n"
|
| 726 |
+
"\t@p ld.shared.u32 %0, [%1];\n"
|
| 727 |
+
"}\n"
|
| 728 |
+
: "=r"(data[0])
|
| 729 |
+
: "r"(src_shr), "r"((int)do_access));
|
| 730 |
+
#else
|
| 731 |
+
const int32_t* src_cast = reinterpret_cast<const int32_t*>(src);
|
| 732 |
+
if (do_access) {
|
| 733 |
+
data[0] = src_cast[0];
|
| 734 |
+
} else {
|
| 735 |
+
data[0] = 0;
|
| 736 |
+
}
|
| 737 |
+
#endif
|
| 738 |
+
}
|
| 739 |
+
|
| 740 |
+
/////////// Store Global ///////////
|
| 741 |
+
|
| 742 |
+
template <>
|
| 743 |
+
__device__ __forceinline__ void store_global<16>(void* dst, const void* src)
|
| 744 |
+
{
|
| 745 |
+
const uint4* data = reinterpret_cast<const uint4*>(src);
|
| 746 |
+
#ifdef PTX_AVAILABLE
|
| 747 |
+
asm volatile("st.global.wb.v4.u32 [%0], {%1, %2, %3, %4};\n"
|
| 748 |
+
:
|
| 749 |
+
: "l"(dst), "r"(data[0].x), "r"(data[0].y), "r"(data[0].z), "r"(data[0].w)
|
| 750 |
+
: "memory");
|
| 751 |
+
#else
|
| 752 |
+
uint4* dst_cast = reinterpret_cast<uint4*>(dst);
|
| 753 |
+
dst_cast[0] = data[0];
|
| 754 |
+
#endif
|
| 755 |
+
}
|
| 756 |
+
|
| 757 |
+
template <>
|
| 758 |
+
__device__ __forceinline__ void store_global<16, StorePolicy::CacheGlobal>(void* dst,
|
| 759 |
+
const void* src)
|
| 760 |
+
{
|
| 761 |
+
const uint4* data = reinterpret_cast<const uint4*>(src);
|
| 762 |
+
#ifdef PTX_AVAILABLE
|
| 763 |
+
asm volatile("st.global.cg.v4.u32 [%0], {%1, %2, %3, %4};\n"
|
| 764 |
+
:
|
| 765 |
+
: "l"(dst), "r"(data[0].x), "r"(data[0].y), "r"(data[0].z), "r"(data[0].w)
|
| 766 |
+
: "memory");
|
| 767 |
+
#else
|
| 768 |
+
uint4* dst_cast = reinterpret_cast<uint4*>(dst);
|
| 769 |
+
dst_cast[0] = data[0];
|
| 770 |
+
#endif
|
| 771 |
+
}
|
| 772 |
+
|
| 773 |
+
template <>
|
| 774 |
+
__device__ __forceinline__ void store_global<16, StorePolicy::CacheStreaming>(void* dst,
|
| 775 |
+
const void* src)
|
| 776 |
+
{
|
| 777 |
+
const uint4* data = reinterpret_cast<const uint4*>(src);
|
| 778 |
+
#ifdef PTX_AVAILABLE
|
| 779 |
+
asm volatile("st.global.cs.v4.u32 [%0], {%1, %2, %3, %4};\n"
|
| 780 |
+
:
|
| 781 |
+
: "l"(dst), "r"(data[0].x), "r"(data[0].y), "r"(data[0].z), "r"(data[0].w)
|
| 782 |
+
: "memory");
|
| 783 |
+
#else
|
| 784 |
+
uint4* dst_cast = reinterpret_cast<uint4*>(dst);
|
| 785 |
+
dst_cast[0] = data[0];
|
| 786 |
+
#endif
|
| 787 |
+
}
|
| 788 |
+
|
| 789 |
+
template <>
|
| 790 |
+
__device__ __forceinline__ void store_global<8>(void* dst, const void* src)
|
| 791 |
+
{
|
| 792 |
+
const uint2* data = reinterpret_cast<const uint2*>(src);
|
| 793 |
+
#ifdef PTX_AVAILABLE
|
| 794 |
+
asm volatile("st.global.wb.v2.u32 [%0], {%1, %2};\n"
|
| 795 |
+
:
|
| 796 |
+
: "l"(dst), "r"(data[0].x), "r"(data[0].y));
|
| 797 |
+
#else
|
| 798 |
+
uint2* dst_cast = reinterpret_cast<uint2*>(dst);
|
| 799 |
+
dst_cast[0] = data[0];
|
| 800 |
+
#endif
|
| 801 |
+
}
|
| 802 |
+
|
| 803 |
+
template <>
|
| 804 |
+
__device__ __forceinline__ void store_global<8, StorePolicy::CacheGlobal>(void* dst,
|
| 805 |
+
const void* src)
|
| 806 |
+
{
|
| 807 |
+
const uint2* data = reinterpret_cast<const uint2*>(src);
|
| 808 |
+
#ifdef PTX_AVAILABLE
|
| 809 |
+
asm volatile("st.global.cg.v2.u32 [%0], {%1, %2};\n"
|
| 810 |
+
:
|
| 811 |
+
: "l"(dst), "r"(data[0].x), "r"(data[0].y));
|
| 812 |
+
#else
|
| 813 |
+
uint2* dst_cast = reinterpret_cast<uint2*>(dst);
|
| 814 |
+
dst_cast[0] = data[0];
|
| 815 |
+
#endif
|
| 816 |
+
}
|
| 817 |
+
|
| 818 |
+
template <>
|
| 819 |
+
__device__ __forceinline__ void store_global<8, StorePolicy::CacheStreaming>(void* dst,
|
| 820 |
+
const void* src)
|
| 821 |
+
{
|
| 822 |
+
const uint2* data = reinterpret_cast<const uint2*>(src);
|
| 823 |
+
#ifdef PTX_AVAILABLE
|
| 824 |
+
asm volatile("st.global.cs.v2.u32 [%0], {%1, %2};\n"
|
| 825 |
+
:
|
| 826 |
+
: "l"(dst), "r"(data[0].x), "r"(data[0].y));
|
| 827 |
+
#else
|
| 828 |
+
uint2* dst_cast = reinterpret_cast<uint2*>(dst);
|
| 829 |
+
dst_cast[0] = data[0];
|
| 830 |
+
#endif
|
| 831 |
+
}
|
| 832 |
+
|
| 833 |
+
template <>
|
| 834 |
+
__device__ __forceinline__ void store_global<4>(void* dst, const void* src)
|
| 835 |
+
{
|
| 836 |
+
const int32_t* data = reinterpret_cast<const int32_t*>(src);
|
| 837 |
+
#ifdef PTX_AVAILABLE
|
| 838 |
+
asm volatile("st.global.wb.u32 [%0], %1;\n" : : "l"(dst), "r"(*data));
|
| 839 |
+
#else
|
| 840 |
+
int32_t* dst_cast = reinterpret_cast<int32_t*>(dst);
|
| 841 |
+
dst_cast[0] = data[0];
|
| 842 |
+
#endif
|
| 843 |
+
}
|
| 844 |
+
|
| 845 |
+
template <>
|
| 846 |
+
__device__ __forceinline__ void store_global<4, StorePolicy::CacheGlobal>(void* dst,
|
| 847 |
+
const void* src)
|
| 848 |
+
{
|
| 849 |
+
const int32_t* data = reinterpret_cast<const int32_t*>(src);
|
| 850 |
+
#ifdef PTX_AVAILABLE
|
| 851 |
+
asm volatile("st.global.cg.u32 [%0], %1;\n" : : "l"(dst), "r"(*data));
|
| 852 |
+
#else
|
| 853 |
+
int32_t* dst_cast = reinterpret_cast<int32_t*>(dst);
|
| 854 |
+
dst_cast[0] = data[0];
|
| 855 |
+
#endif
|
| 856 |
+
}
|
| 857 |
+
|
| 858 |
+
template <>
|
| 859 |
+
__device__ __forceinline__ void store_global<4, StorePolicy::CacheStreaming>(void* dst,
|
| 860 |
+
const void* src)
|
| 861 |
+
{
|
| 862 |
+
const int32_t* data = reinterpret_cast<const int32_t*>(src);
|
| 863 |
+
#ifdef PTX_AVAILABLE
|
| 864 |
+
asm volatile("st.global.cs.u32 [%0], %1;\n" : : "l"(dst), "r"(*data));
|
| 865 |
+
#else
|
| 866 |
+
int32_t* dst_cast = reinterpret_cast<int32_t*>(dst);
|
| 867 |
+
dst_cast[0] = data[0];
|
| 868 |
+
#endif
|
| 869 |
+
}
|
| 870 |
+
|
| 871 |
+
/////////// Store Shared ///////////
|
| 872 |
+
|
| 873 |
+
template <>
|
| 874 |
+
__device__ __forceinline__ void store_shared<16>(void* dst, const void* src)
|
| 875 |
+
{
|
| 876 |
+
const uint4* data = reinterpret_cast<const uint4*>(src);
|
| 877 |
+
#ifdef PTX_AVAILABLE
|
| 878 |
+
unsigned dst_int = internal::convert_to_shared(dst);
|
| 879 |
+
|
| 880 |
+
asm volatile("st.shared.v4.u32 [%0], {%1, %2, %3, %4};\n"
|
| 881 |
+
:
|
| 882 |
+
: "r"(dst_int), "r"(data[0].x), "r"(data[0].y), "r"(data[0].z), "r"(data[0].w));
|
| 883 |
+
#else
|
| 884 |
+
uint4* dst_cast = reinterpret_cast<uint4*>(dst);
|
| 885 |
+
dst_cast[0] = data[0];
|
| 886 |
+
#endif
|
| 887 |
+
}
|
| 888 |
+
|
| 889 |
+
template <>
|
| 890 |
+
__device__ __forceinline__ void store_shared<8>(void* dst, const void* src)
|
| 891 |
+
{
|
| 892 |
+
const uint2* data = reinterpret_cast<const uint2*>(src);
|
| 893 |
+
#ifdef PTX_AVAILABLE
|
| 894 |
+
unsigned dst_int = internal::convert_to_shared(dst);
|
| 895 |
+
|
| 896 |
+
asm volatile("st.shared.v2.u32 [%0], {%1, %2};\n"
|
| 897 |
+
:
|
| 898 |
+
: "r"(dst_int), "r"(data[0].x), "r"(data[0].y));
|
| 899 |
+
#else
|
| 900 |
+
uint2* dst_cast = reinterpret_cast<uint2*>(dst);
|
| 901 |
+
dst_cast[0] = data[0];
|
| 902 |
+
#endif
|
| 903 |
+
}
|
| 904 |
+
|
| 905 |
+
template <>
|
| 906 |
+
__device__ __forceinline__ void store_shared<4>(void* dst, const void* src)
|
| 907 |
+
{
|
| 908 |
+
const int32_t* data = reinterpret_cast<const int32_t*>(src);
|
| 909 |
+
#ifdef PTX_AVAILABLE
|
| 910 |
+
unsigned dst_int = internal::convert_to_shared(dst);
|
| 911 |
+
|
| 912 |
+
asm volatile("st.shared.u32 [%0], %1;\n" : : "r"(dst_int), "r"(*data));
|
| 913 |
+
#else
|
| 914 |
+
int32_t* dst_cast = reinterpret_cast<int32_t*>(dst);
|
| 915 |
+
dst_cast[0] = data[0];
|
| 916 |
+
#endif
|
| 917 |
+
}
|
| 918 |
+
|
| 919 |
+
/////////// Asynchronous Memory Copy ///////////
|
| 920 |
+
|
| 921 |
+
#ifdef ASYNC_COPY_AVAILABLE
|
| 922 |
+
template <int AccessSize>
|
| 923 |
+
__device__ __forceinline__ void memcpy_async(void* shr, const void* gbl)
|
| 924 |
+
{
|
| 925 |
+
static_assert((AccessSize == 4 || AccessSize == 8 || AccessSize == 16));
|
| 926 |
+
unsigned shr_int = internal::convert_to_shared(shr);
|
| 927 |
+
|
| 928 |
+
asm volatile("cp.async.ca.shared.global [%0], [%1], %2;\n"
|
| 929 |
+
:
|
| 930 |
+
: "r"(shr_int), "l"(gbl), "n"(AccessSize));
|
| 931 |
+
}
|
| 932 |
+
|
| 933 |
+
template <int AccessSize>
|
| 934 |
+
__device__ __forceinline__ void memcpy_async_nop(void* shr, const void* gbl, bool predicate)
|
| 935 |
+
{
|
| 936 |
+
static_assert((AccessSize == 4 || AccessSize == 8 || AccessSize == 16));
|
| 937 |
+
unsigned shr_int = internal::convert_to_shared(shr);
|
| 938 |
+
|
| 939 |
+
asm volatile(
|
| 940 |
+
"{\n"
|
| 941 |
+
" .reg .pred p;\n"
|
| 942 |
+
" setp.ne.b32 p, %0, 0;\n"
|
| 943 |
+
" @p cp.async.ca.shared.global [%1], [%2], %3;\n"
|
| 944 |
+
"}\n"
|
| 945 |
+
:
|
| 946 |
+
: "r"((int)predicate), "r"(shr_int), "l"(gbl), "n"(AccessSize));
|
| 947 |
+
}
|
| 948 |
+
|
| 949 |
+
template <int AccessSize>
|
| 950 |
+
__device__ __forceinline__ void memcpy_async_zero(void* shr, const void* gbl, bool predicate)
|
| 951 |
+
{
|
| 952 |
+
static_assert((AccessSize == 4 || AccessSize == 8 || AccessSize == 16));
|
| 953 |
+
unsigned shr_int = internal::convert_to_shared(shr);
|
| 954 |
+
int bytes_to_copy = (predicate ? AccessSize : 0);
|
| 955 |
+
|
| 956 |
+
asm volatile("cp.async.ca.shared.global [%0], [%1], %2, %3;\n"
|
| 957 |
+
:
|
| 958 |
+
: "r"(shr_int), "l"(gbl), "n"(AccessSize), "r"(bytes_to_copy));
|
| 959 |
+
}
|
| 960 |
+
|
| 961 |
+
template <int AccessSize>
|
| 962 |
+
__device__ __forceinline__ void memcpy_async_zero_nop(void* shr,
|
| 963 |
+
const void* gbl,
|
| 964 |
+
bool zero_predicate,
|
| 965 |
+
bool nop_predicate)
|
| 966 |
+
{
|
| 967 |
+
static_assert((AccessSize == 4 || AccessSize == 8 || AccessSize == 16));
|
| 968 |
+
unsigned shr_int = internal::convert_to_shared(shr);
|
| 969 |
+
int bytes_to_copy = (zero_predicate ? AccessSize : 0);
|
| 970 |
+
|
| 971 |
+
asm volatile(
|
| 972 |
+
"{\n"
|
| 973 |
+
" .reg .pred p;\n"
|
| 974 |
+
" setp.ne.b32 p, %0, 0;\n"
|
| 975 |
+
" @p cp.async.ca.shared.global [%1], [%2], %3, %4;\n"
|
| 976 |
+
"}\n"
|
| 977 |
+
:
|
| 978 |
+
: "r"((int)nop_predicate), "r"(shr_int), "l"(gbl), "n"(AccessSize), "r"(bytes_to_copy));
|
| 979 |
+
}
|
| 980 |
+
|
| 981 |
+
// Cache global variants. Separate interface to require deliberate use of them.
|
| 982 |
+
__device__ __forceinline__ void memcpy_async_cg(void* shr, const void* gbl)
|
| 983 |
+
{
|
| 984 |
+
unsigned shr_int = internal::convert_to_shared(shr);
|
| 985 |
+
|
| 986 |
+
asm volatile("cp.async.cg.shared.global [%0], [%1], 16;\n" : : "r"(shr_int), "l"(gbl));
|
| 987 |
+
}
|
| 988 |
+
|
| 989 |
+
__device__ __forceinline__ void memcpy_async_nop_cg(void* shr, const void* gbl, bool predicate)
|
| 990 |
+
{
|
| 991 |
+
unsigned shr_int = internal::convert_to_shared(shr);
|
| 992 |
+
|
| 993 |
+
asm volatile(
|
| 994 |
+
"{\n"
|
| 995 |
+
" .reg .pred p;\n"
|
| 996 |
+
" setp.ne.b32 p, %0, 0;\n"
|
| 997 |
+
" @p cp.async.cg.shared.global [%1], [%2], 16;\n"
|
| 998 |
+
"}\n"
|
| 999 |
+
:
|
| 1000 |
+
: "r"((int)predicate), "r"(shr_int), "l"(gbl));
|
| 1001 |
+
}
|
| 1002 |
+
|
| 1003 |
+
__device__ __forceinline__ void memcpy_async_zero_cg(void* shr, const void* gbl, bool predicate)
|
| 1004 |
+
{
|
| 1005 |
+
unsigned shr_int = internal::convert_to_shared(shr);
|
| 1006 |
+
int bytes_to_copy = (predicate ? 16 : 0);
|
| 1007 |
+
|
| 1008 |
+
asm volatile("cp.async.cg.shared.global [%0], [%1], 16, %2;\n"
|
| 1009 |
+
:
|
| 1010 |
+
: "r"(shr_int), "l"(gbl), "r"(bytes_to_copy));
|
| 1011 |
+
}
|
| 1012 |
+
|
| 1013 |
+
__device__ __forceinline__ void memcpy_async_zero_nop_cg(void* shr,
|
| 1014 |
+
const void* gbl,
|
| 1015 |
+
bool zero_predicate,
|
| 1016 |
+
bool nop_predicate)
|
| 1017 |
+
{
|
| 1018 |
+
unsigned shr_int = internal::convert_to_shared(shr);
|
| 1019 |
+
int bytes_to_copy = (zero_predicate ? 16 : 0);
|
| 1020 |
+
|
| 1021 |
+
asm volatile(
|
| 1022 |
+
"{\n"
|
| 1023 |
+
" .reg .pred p;\n"
|
| 1024 |
+
" setp.ne.b32 p, %0, 0;\n"
|
| 1025 |
+
" @p cp.async.cg.shared.global [%1], [%2], 16, %3;\n"
|
| 1026 |
+
"}\n"
|
| 1027 |
+
:
|
| 1028 |
+
: "r"((int)nop_predicate), "r"(shr_int), "l"(gbl), "r"(bytes_to_copy));
|
| 1029 |
+
}
|
| 1030 |
+
|
| 1031 |
+
__device__ __forceinline__ void memcpy_async_fence() { asm volatile("cp.async.commit_group;\n"); }
|
| 1032 |
+
|
| 1033 |
+
template <int stages>
|
| 1034 |
+
__device__ __forceinline__ void memcpy_async_wait()
|
| 1035 |
+
{
|
| 1036 |
+
static_assert(stages <= 8);
|
| 1037 |
+
|
| 1038 |
+
asm volatile("cp.async.wait_group %0;\n" : : "n"(stages));
|
| 1039 |
+
}
|
| 1040 |
+
|
| 1041 |
+
// TODO: The tail complete should be a known compile time artifact, should try and induce this
|
| 1042 |
+
// without all of the branches from the call-site. This is a hacky solution.
|
| 1043 |
+
template <>
|
| 1044 |
+
__device__ __forceinline__ void tail_complete_wait<1>(int remaining_stages)
|
| 1045 |
+
{
|
| 1046 |
+
if (remaining_stages == 0) memcpy_async_wait<0>();
|
| 1047 |
+
}
|
| 1048 |
+
|
| 1049 |
+
template <>
|
| 1050 |
+
__device__ __forceinline__ void tail_complete_wait<2>(int remaining_stages)
|
| 1051 |
+
{
|
| 1052 |
+
if (remaining_stages == 1)
|
| 1053 |
+
memcpy_async_wait<1>();
|
| 1054 |
+
else if (remaining_stages == 0)
|
| 1055 |
+
memcpy_async_wait<0>();
|
| 1056 |
+
}
|
| 1057 |
+
|
| 1058 |
+
template <>
|
| 1059 |
+
__device__ __forceinline__ void tail_complete_wait<3>(int remaining_stages)
|
| 1060 |
+
{
|
| 1061 |
+
if (remaining_stages == 2)
|
| 1062 |
+
memcpy_async_wait<2>();
|
| 1063 |
+
else if (remaining_stages == 1)
|
| 1064 |
+
memcpy_async_wait<1>();
|
| 1065 |
+
else if (remaining_stages == 0)
|
| 1066 |
+
memcpy_async_wait<0>();
|
| 1067 |
+
}
|
| 1068 |
+
|
| 1069 |
+
template <>
|
| 1070 |
+
__device__ __forceinline__ void tail_complete_wait<4>(int remaining_stages)
|
| 1071 |
+
{
|
| 1072 |
+
if (remaining_stages == 3)
|
| 1073 |
+
memcpy_async_wait<3>();
|
| 1074 |
+
else if (remaining_stages == 2)
|
| 1075 |
+
memcpy_async_wait<2>();
|
| 1076 |
+
else if (remaining_stages == 1)
|
| 1077 |
+
memcpy_async_wait<1>();
|
| 1078 |
+
else if (remaining_stages == 0)
|
| 1079 |
+
memcpy_async_wait<0>();
|
| 1080 |
+
}
|
| 1081 |
+
|
| 1082 |
+
template <>
|
| 1083 |
+
__device__ __forceinline__ void tail_complete_wait<5>(int remaining_stages)
|
| 1084 |
+
{
|
| 1085 |
+
if (remaining_stages == 4)
|
| 1086 |
+
memcpy_async_wait<4>();
|
| 1087 |
+
else if (remaining_stages == 3)
|
| 1088 |
+
memcpy_async_wait<3>();
|
| 1089 |
+
else if (remaining_stages == 2)
|
| 1090 |
+
memcpy_async_wait<2>();
|
| 1091 |
+
else if (remaining_stages == 1)
|
| 1092 |
+
memcpy_async_wait<1>();
|
| 1093 |
+
else if (remaining_stages == 0)
|
| 1094 |
+
memcpy_async_wait<0>();
|
| 1095 |
+
}
|
| 1096 |
+
|
| 1097 |
+
template <>
|
| 1098 |
+
__device__ __forceinline__ void tail_complete_wait<6>(int remaining_stages)
|
| 1099 |
+
{
|
| 1100 |
+
if (remaining_stages == 5)
|
| 1101 |
+
memcpy_async_wait<5>();
|
| 1102 |
+
else if (remaining_stages == 4)
|
| 1103 |
+
memcpy_async_wait<4>();
|
| 1104 |
+
else if (remaining_stages == 3)
|
| 1105 |
+
memcpy_async_wait<3>();
|
| 1106 |
+
else if (remaining_stages == 2)
|
| 1107 |
+
memcpy_async_wait<2>();
|
| 1108 |
+
else if (remaining_stages == 1)
|
| 1109 |
+
memcpy_async_wait<1>();
|
| 1110 |
+
else if (remaining_stages == 0)
|
| 1111 |
+
memcpy_async_wait<0>();
|
| 1112 |
+
}
|
| 1113 |
+
#endif
|
| 1114 |
+
|
| 1115 |
+
} // namespace mem_access
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/includes/reduction_utils.h
ADDED
|
@@ -0,0 +1,778 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#pragma once
|
| 7 |
+
|
| 8 |
+
#include "conversion_utils.h"
|
| 9 |
+
#include "ds_kernel_utils.h"
|
| 10 |
+
#include "memory_access_utils.h"
|
| 11 |
+
|
| 12 |
+
namespace cg = cooperative_groups;
|
| 13 |
+
|
| 14 |
+
namespace reduce {
|
| 15 |
+
|
| 16 |
+
enum class ROpType {
|
| 17 |
+
// Addition
|
| 18 |
+
Add,
|
| 19 |
+
|
| 20 |
+
// Maximum reduction
|
| 21 |
+
Max,
|
| 22 |
+
|
| 23 |
+
// Minimum reduction
|
| 24 |
+
Min,
|
| 25 |
+
};
|
| 26 |
+
|
| 27 |
+
constexpr int max_threads = 1024;
|
| 28 |
+
constexpr int max_warps = max_threads / hw_warp_size;
|
| 29 |
+
|
| 30 |
+
/*
|
| 31 |
+
High level API. The API takes in a set of operations and variables
|
| 32 |
+
and performs that reduction operation on that variable. The reductions
|
| 33 |
+
of each of the arguments are completely independent of each other (
|
| 34 |
+
i.e., the val1-op1 combination has no impact on val2-op2).
|
| 35 |
+
|
| 36 |
+
Example usage:
|
| 37 |
+
``` cpp
|
| 38 |
+
float max_val;
|
| 39 |
+
float min_val;
|
| 40 |
+
reduce::block<rop::Max, rop::Min>(tb, warp, max_val, min_val);
|
| 41 |
+
```
|
| 42 |
+
|
| 43 |
+
TODO(cmikeh2): In theory, we might be able to do this sequentially with
|
| 44 |
+
device functions and rely on the assembler correctly behaving. My initial
|
| 45 |
+
instinct is this won't work, but if it does it would reduce implementation
|
| 46 |
+
cost significantly.
|
| 47 |
+
|
| 48 |
+
TODO(cmikeh2): We need to support sub-block reductions. The warp intrinsic
|
| 49 |
+
currently supports this (more incidentally than anything else). It is not
|
| 50 |
+
uncommon in something like softmax or a fused attention kernel to map multiple
|
| 51 |
+
reductions to a thread block, but each reduction itself is only scoped
|
| 52 |
+
to part of the threads (i.e block size = 512, 128 threads per reduction).
|
| 53 |
+
*/
|
| 54 |
+
template <ROpType Op, int warp_bound = max_warps>
|
| 55 |
+
DS_D_INLINE void block(cg::thread_block& tb, cg::thread_block_tile<hw_warp_size>& warp, float& val);
|
| 56 |
+
|
| 57 |
+
template <ROpType Op1, ROpType Op2, int warp_bound = max_warps>
|
| 58 |
+
DS_D_INLINE void block(cg::thread_block& tb,
|
| 59 |
+
cg::thread_block_tile<hw_warp_size>& warp,
|
| 60 |
+
float& val1,
|
| 61 |
+
float& val2);
|
| 62 |
+
|
| 63 |
+
template <ROpType Op1, ROpType Op2, ROpType Op3, int warp_bound = max_warps>
|
| 64 |
+
DS_D_INLINE void block(cg::thread_block& tb,
|
| 65 |
+
cg::thread_block_tile<hw_warp_size>& warp,
|
| 66 |
+
float& val1,
|
| 67 |
+
float& val2,
|
| 68 |
+
float& val3);
|
| 69 |
+
|
| 70 |
+
template <ROpType Op1, ROpType Op2, ROpType Op3, ROpType Op4, int warp_bound = max_warps>
|
| 71 |
+
DS_D_INLINE void block(cg::thread_block& tb,
|
| 72 |
+
cg::thread_block_tile<hw_warp_size>& warp,
|
| 73 |
+
float& val1,
|
| 74 |
+
float& val2,
|
| 75 |
+
float& val3,
|
| 76 |
+
float& val4);
|
| 77 |
+
|
| 78 |
+
/*
|
| 79 |
+
The partitioned block is a special case of the above where in the warps of a threadblock are
|
| 80 |
+
partitioned into separate independent reductions. For example, I might have an 8 warp thread block
|
| 81 |
+
in which each pair of warps is processing an independent piece of data. I would then reduce that
|
| 82 |
+
data with the something like the following:
|
| 83 |
+
``` cpp
|
| 84 |
+
float max_val;
|
| 85 |
+
reduce::partitioned_block<rop::Max, 2>(tb, warp, max_val);
|
| 86 |
+
```
|
| 87 |
+
After which, each pair of warps would have coherent data with each other. Note, this API will not
|
| 88 |
+
provide correct results if the number of warps per partition is not a power of 2.
|
| 89 |
+
*/
|
| 90 |
+
template <ROpType Op, int num_threads>
|
| 91 |
+
DS_D_INLINE void partitioned_block(cg::thread_block& tb,
|
| 92 |
+
cg::thread_block_tile<hw_warp_size>& warp,
|
| 93 |
+
float& val);
|
| 94 |
+
|
| 95 |
+
template <ROpType Op1, ROpType Op2, int num_threads>
|
| 96 |
+
DS_D_INLINE void partitioned_block(cg::thread_block& tb,
|
| 97 |
+
cg::thread_block_tile<hw_warp_size>& warp,
|
| 98 |
+
float& val1,
|
| 99 |
+
float& val2);
|
| 100 |
+
|
| 101 |
+
template <ROpType Op1, ROpType Op2, ROpType Op3, int num_threads>
|
| 102 |
+
DS_D_INLINE void partitioned_block(cg::thread_block& tb,
|
| 103 |
+
cg::thread_block_tile<hw_warp_size>& warp,
|
| 104 |
+
float& val1,
|
| 105 |
+
float& val2,
|
| 106 |
+
float& val3);
|
| 107 |
+
|
| 108 |
+
template <ROpType Op1, ROpType Op2, ROpType Op3, ROpType Op4, int num_threads>
|
| 109 |
+
DS_D_INLINE void partitioned_block(cg::thread_block& tb,
|
| 110 |
+
cg::thread_block_tile<hw_warp_size>& warp,
|
| 111 |
+
float& val1,
|
| 112 |
+
float& val2,
|
| 113 |
+
float& val3,
|
| 114 |
+
float& val4);
|
| 115 |
+
|
| 116 |
+
/*
|
| 117 |
+
Single element reduction primitives. Used inside serial collection
|
| 118 |
+
loops.
|
| 119 |
+
|
| 120 |
+
Example usage:
|
| 121 |
+
using rop = reduce::OpType;
|
| 122 |
+
float min = init<rop::Min>();
|
| 123 |
+
for (int i = 0; i < 4; i++) {
|
| 124 |
+
min = reduce::element<rop::Min>(min, data[i]);
|
| 125 |
+
}
|
| 126 |
+
*/
|
| 127 |
+
|
| 128 |
+
template <ROpType Op, typename T>
|
| 129 |
+
DS_D_INLINE T element(const T lhs, const T rhs);
|
| 130 |
+
|
| 131 |
+
template <ROpType OType, typename T = float>
|
| 132 |
+
DS_D_INLINE T init();
|
| 133 |
+
|
| 134 |
+
/********************** Internal reduction APIs **********************/
|
| 135 |
+
|
| 136 |
+
/*
|
| 137 |
+
Single element "reductions". TODO(cmikeh2): this sort of "op" concept
|
| 138 |
+
should be refactored into its own implementation at some point. This interface
|
| 139 |
+
may be easily expanded for new types/operations, but the typical reductions
|
| 140 |
+
we need are covered with min/max/add on float.
|
| 141 |
+
|
| 142 |
+
NOTE: there is no mean reduction because that relies on knowledge of how
|
| 143 |
+
many values were already reduced into each scalar. Implementing this on top
|
| 144 |
+
of reduce should be straightforward (can just wrap the sum reduction) and
|
| 145 |
+
would be a good extension of the header.
|
| 146 |
+
*/
|
| 147 |
+
|
| 148 |
+
DS_D_INLINE int _warp_rank()
|
| 149 |
+
{
|
| 150 |
+
const int thread_rank =
|
| 151 |
+
threadIdx.x + threadIdx.y * blockDim.x + threadIdx.z * blockDim.x * blockDim.y;
|
| 152 |
+
return thread_rank / hw_warp_size;
|
| 153 |
+
}
|
| 154 |
+
|
| 155 |
+
/* Float element reduce implementations */
|
| 156 |
+
template <>
|
| 157 |
+
DS_D_INLINE float element<ROpType::Add>(const float lhs, const float rhs)
|
| 158 |
+
{
|
| 159 |
+
return lhs + rhs;
|
| 160 |
+
}
|
| 161 |
+
|
| 162 |
+
template <>
|
| 163 |
+
DS_D_INLINE float element<ROpType::Max>(const float lhs, const float rhs)
|
| 164 |
+
{
|
| 165 |
+
return fmaxf(lhs, rhs);
|
| 166 |
+
}
|
| 167 |
+
|
| 168 |
+
template <>
|
| 169 |
+
DS_D_INLINE float element<ROpType::Min>(const float lhs, const float rhs)
|
| 170 |
+
{
|
| 171 |
+
return fminf(lhs, rhs);
|
| 172 |
+
}
|
| 173 |
+
|
| 174 |
+
/* __half element reduce implementation */
|
| 175 |
+
template <>
|
| 176 |
+
DS_D_INLINE __half element<ROpType::Add>(const __half lhs, const __half rhs)
|
| 177 |
+
{
|
| 178 |
+
return lhs + rhs;
|
| 179 |
+
}
|
| 180 |
+
|
| 181 |
+
template <>
|
| 182 |
+
DS_D_INLINE __half element<ROpType::Max>(const __half lhs, const __half rhs)
|
| 183 |
+
{
|
| 184 |
+
#if __CUDA_ARCH__ >= 800
|
| 185 |
+
// Intrinsic limited to Ampere + newer
|
| 186 |
+
return __hmax(lhs, rhs);
|
| 187 |
+
#else
|
| 188 |
+
return (lhs > rhs) ? lhs : rhs;
|
| 189 |
+
#endif
|
| 190 |
+
}
|
| 191 |
+
|
| 192 |
+
template <>
|
| 193 |
+
DS_D_INLINE __half element<ROpType::Min>(const __half lhs, const __half rhs)
|
| 194 |
+
{
|
| 195 |
+
#if __CUDA_ARCH__ >= 800
|
| 196 |
+
// Intrinsic limited to Ampere + newer
|
| 197 |
+
return __hmin(lhs, rhs);
|
| 198 |
+
#else
|
| 199 |
+
return (lhs < rhs) ? lhs : rhs;
|
| 200 |
+
#endif
|
| 201 |
+
}
|
| 202 |
+
|
| 203 |
+
/* __half2 element reduce implementation */
|
| 204 |
+
template <>
|
| 205 |
+
DS_D_INLINE __half2 element<ROpType::Add>(const __half2 lhs, const __half2 rhs)
|
| 206 |
+
{
|
| 207 |
+
return lhs + rhs;
|
| 208 |
+
}
|
| 209 |
+
|
| 210 |
+
template <>
|
| 211 |
+
DS_D_INLINE __half2 element<ROpType::Max>(const __half2 lhs, const __half2 rhs)
|
| 212 |
+
{
|
| 213 |
+
#if __CUDA_ARCH__ >= 800
|
| 214 |
+
return __hmax2(lhs, rhs);
|
| 215 |
+
#else
|
| 216 |
+
__half2 ret_val;
|
| 217 |
+
ret_val.x = (lhs.x > rhs.x) ? lhs.x : rhs.x;
|
| 218 |
+
ret_val.y = (lhs.y > rhs.y) ? lhs.y : rhs.y;
|
| 219 |
+
return ret_val;
|
| 220 |
+
#endif
|
| 221 |
+
}
|
| 222 |
+
|
| 223 |
+
template <>
|
| 224 |
+
DS_D_INLINE __half2 element<ROpType::Min>(const __half2 lhs, const __half2 rhs)
|
| 225 |
+
{
|
| 226 |
+
#if __CUDA_ARCH__ >= 800
|
| 227 |
+
return __hmin2(lhs, rhs);
|
| 228 |
+
#else
|
| 229 |
+
__half2 ret_val;
|
| 230 |
+
ret_val.x = (lhs.x < rhs.x) ? lhs.x : rhs.x;
|
| 231 |
+
ret_val.y = (lhs.y < rhs.y) ? lhs.y : rhs.y;
|
| 232 |
+
return ret_val;
|
| 233 |
+
#endif
|
| 234 |
+
}
|
| 235 |
+
|
| 236 |
+
template <>
|
| 237 |
+
DS_D_INLINE int32_t element<ROpType::Add>(const int32_t lhs, const int32_t rhs)
|
| 238 |
+
{
|
| 239 |
+
return lhs + rhs;
|
| 240 |
+
}
|
| 241 |
+
|
| 242 |
+
template <>
|
| 243 |
+
DS_D_INLINE int32_t element<ROpType::Max>(const int32_t lhs, const int32_t rhs)
|
| 244 |
+
{
|
| 245 |
+
return (lhs > rhs) ? lhs : rhs;
|
| 246 |
+
}
|
| 247 |
+
|
| 248 |
+
template <>
|
| 249 |
+
DS_D_INLINE int32_t element<ROpType::Min>(const int32_t lhs, const int32_t rhs)
|
| 250 |
+
{
|
| 251 |
+
return (lhs < rhs) ? lhs : rhs;
|
| 252 |
+
}
|
| 253 |
+
|
| 254 |
+
template <>
|
| 255 |
+
DS_D_INLINE uint32_t element<ROpType::Add>(const uint32_t lhs, const uint32_t rhs)
|
| 256 |
+
{
|
| 257 |
+
return lhs + rhs;
|
| 258 |
+
}
|
| 259 |
+
|
| 260 |
+
template <>
|
| 261 |
+
DS_D_INLINE uint32_t element<ROpType::Max>(const uint32_t lhs, const uint32_t rhs)
|
| 262 |
+
{
|
| 263 |
+
return (lhs > rhs) ? lhs : rhs;
|
| 264 |
+
}
|
| 265 |
+
|
| 266 |
+
template <>
|
| 267 |
+
DS_D_INLINE uint32_t element<ROpType::Min>(const uint32_t lhs, const uint32_t rhs)
|
| 268 |
+
{
|
| 269 |
+
return (lhs < rhs) ? lhs : rhs;
|
| 270 |
+
}
|
| 271 |
+
|
| 272 |
+
template <>
|
| 273 |
+
DS_D_INLINE int64_t element<ROpType::Add>(const int64_t lhs, const int64_t rhs)
|
| 274 |
+
{
|
| 275 |
+
return lhs + rhs;
|
| 276 |
+
}
|
| 277 |
+
|
| 278 |
+
template <>
|
| 279 |
+
DS_D_INLINE int64_t element<ROpType::Max>(const int64_t lhs, const int64_t rhs)
|
| 280 |
+
{
|
| 281 |
+
return (lhs > rhs) ? lhs : rhs;
|
| 282 |
+
}
|
| 283 |
+
|
| 284 |
+
template <>
|
| 285 |
+
DS_D_INLINE int64_t element<ROpType::Min>(const int64_t lhs, const int64_t rhs)
|
| 286 |
+
{
|
| 287 |
+
return (lhs < rhs) ? lhs : rhs;
|
| 288 |
+
}
|
| 289 |
+
|
| 290 |
+
/*
|
| 291 |
+
Reduction initialization primitives
|
| 292 |
+
*/
|
| 293 |
+
template <>
|
| 294 |
+
DS_D_INLINE float init<ROpType::Add>()
|
| 295 |
+
{
|
| 296 |
+
return 0.0f;
|
| 297 |
+
}
|
| 298 |
+
|
| 299 |
+
template <>
|
| 300 |
+
DS_D_INLINE float init<ROpType::Min>()
|
| 301 |
+
{
|
| 302 |
+
// Positive infinity
|
| 303 |
+
return INFINITY;
|
| 304 |
+
}
|
| 305 |
+
|
| 306 |
+
template <>
|
| 307 |
+
DS_D_INLINE float init<ROpType::Max>()
|
| 308 |
+
{
|
| 309 |
+
// Negative infinity
|
| 310 |
+
return -INFINITY;
|
| 311 |
+
}
|
| 312 |
+
|
| 313 |
+
template <>
|
| 314 |
+
DS_D_INLINE __half init<ROpType::Add>()
|
| 315 |
+
{
|
| 316 |
+
constexpr __half_raw zero = {0x0000};
|
| 317 |
+
return __half(zero);
|
| 318 |
+
}
|
| 319 |
+
|
| 320 |
+
template <>
|
| 321 |
+
DS_D_INLINE __half init<ROpType::Min>()
|
| 322 |
+
{
|
| 323 |
+
constexpr __half_raw inf = {0x7C00};
|
| 324 |
+
return __half(inf);
|
| 325 |
+
}
|
| 326 |
+
|
| 327 |
+
template <>
|
| 328 |
+
DS_D_INLINE __half init<ROpType::Max>()
|
| 329 |
+
{
|
| 330 |
+
constexpr __half_raw neg_inf = {0xFC00};
|
| 331 |
+
return __half(neg_inf);
|
| 332 |
+
}
|
| 333 |
+
|
| 334 |
+
template <>
|
| 335 |
+
DS_D_INLINE __half2 init<ROpType::Add>()
|
| 336 |
+
{
|
| 337 |
+
#ifdef __HIP_PLATFORM_AMD__
|
| 338 |
+
return __half2{_Float16_2{0x0000, 0x0000}};
|
| 339 |
+
#else
|
| 340 |
+
constexpr __half2_raw zero = {0x0000, 0x0000};
|
| 341 |
+
return __half2(zero);
|
| 342 |
+
#endif
|
| 343 |
+
}
|
| 344 |
+
|
| 345 |
+
template <>
|
| 346 |
+
DS_D_INLINE __half2 init<ROpType::Min>()
|
| 347 |
+
{
|
| 348 |
+
#ifdef __HIP_PLATFORM_AMD__
|
| 349 |
+
return __half2{_Float16_2{0x7C00, 0x7C00}};
|
| 350 |
+
#else
|
| 351 |
+
constexpr __half2_raw inf = {0x7C00, 0x7C00};
|
| 352 |
+
return __half2(inf);
|
| 353 |
+
#endif
|
| 354 |
+
}
|
| 355 |
+
|
| 356 |
+
template <>
|
| 357 |
+
DS_D_INLINE __half2 init<ROpType::Max>()
|
| 358 |
+
{
|
| 359 |
+
#ifdef __HIP_PLATFORM_AMD__
|
| 360 |
+
return __half2{_Float16_2{0xFC00, 0xFC00}};
|
| 361 |
+
#else
|
| 362 |
+
constexpr __half2_raw neg_inf = {0xFC00, 0xFC00};
|
| 363 |
+
return __half2(neg_inf);
|
| 364 |
+
#endif
|
| 365 |
+
}
|
| 366 |
+
|
| 367 |
+
template <>
|
| 368 |
+
DS_D_INLINE int32_t init<ROpType::Add>()
|
| 369 |
+
{
|
| 370 |
+
return 0;
|
| 371 |
+
}
|
| 372 |
+
|
| 373 |
+
template <>
|
| 374 |
+
DS_D_INLINE int32_t init<ROpType::Min>()
|
| 375 |
+
{
|
| 376 |
+
return 0x7FFFFFFF;
|
| 377 |
+
}
|
| 378 |
+
|
| 379 |
+
template <>
|
| 380 |
+
DS_D_INLINE int32_t init<ROpType::Max>()
|
| 381 |
+
{
|
| 382 |
+
return 0x80000000;
|
| 383 |
+
}
|
| 384 |
+
|
| 385 |
+
template <>
|
| 386 |
+
DS_D_INLINE uint32_t init<ROpType::Add>()
|
| 387 |
+
{
|
| 388 |
+
return 0;
|
| 389 |
+
}
|
| 390 |
+
|
| 391 |
+
template <>
|
| 392 |
+
DS_D_INLINE uint32_t init<ROpType::Min>()
|
| 393 |
+
{
|
| 394 |
+
return 0xFFFFFFFF;
|
| 395 |
+
}
|
| 396 |
+
|
| 397 |
+
template <>
|
| 398 |
+
DS_D_INLINE uint32_t init<ROpType::Max>()
|
| 399 |
+
{
|
| 400 |
+
return 0;
|
| 401 |
+
}
|
| 402 |
+
|
| 403 |
+
template <>
|
| 404 |
+
DS_D_INLINE int64_t init<ROpType::Add>()
|
| 405 |
+
{
|
| 406 |
+
return 0;
|
| 407 |
+
}
|
| 408 |
+
|
| 409 |
+
template <>
|
| 410 |
+
DS_D_INLINE int64_t init<ROpType::Min>()
|
| 411 |
+
{
|
| 412 |
+
return 0x7FFFFFFFFFFFFFFF;
|
| 413 |
+
}
|
| 414 |
+
|
| 415 |
+
template <>
|
| 416 |
+
DS_D_INLINE int64_t init<ROpType::Max>()
|
| 417 |
+
{
|
| 418 |
+
return 0x8000000000000000;
|
| 419 |
+
}
|
| 420 |
+
|
| 421 |
+
template <>
|
| 422 |
+
DS_D_INLINE uint64_t init<ROpType::Add>()
|
| 423 |
+
{
|
| 424 |
+
return 0;
|
| 425 |
+
}
|
| 426 |
+
|
| 427 |
+
template <>
|
| 428 |
+
DS_D_INLINE uint64_t init<ROpType::Min>()
|
| 429 |
+
{
|
| 430 |
+
return 0xFFFFFFFFFFFFFFFF;
|
| 431 |
+
}
|
| 432 |
+
|
| 433 |
+
template <>
|
| 434 |
+
DS_D_INLINE uint64_t init<ROpType::Max>()
|
| 435 |
+
{
|
| 436 |
+
return 0;
|
| 437 |
+
}
|
| 438 |
+
|
| 439 |
+
template <ROpType Op, typename T>
|
| 440 |
+
DS_D_INLINE void init(T* data)
|
| 441 |
+
{
|
| 442 |
+
data[0] = init<Op, T>();
|
| 443 |
+
}
|
| 444 |
+
|
| 445 |
+
template <ROpType Op1, ROpType Op2, typename T>
|
| 446 |
+
DS_D_INLINE void init(T* data)
|
| 447 |
+
{
|
| 448 |
+
data[0] = init<Op1, T>();
|
| 449 |
+
data[1] = init<Op2, T>();
|
| 450 |
+
}
|
| 451 |
+
|
| 452 |
+
template <ROpType Op1, ROpType Op2, ROpType Op3, typename T>
|
| 453 |
+
DS_D_INLINE void init(T* data)
|
| 454 |
+
{
|
| 455 |
+
data[0] = init<Op1, T>();
|
| 456 |
+
data[1] = init<Op2, T>();
|
| 457 |
+
data[2] = init<Op3, T>();
|
| 458 |
+
}
|
| 459 |
+
|
| 460 |
+
template <ROpType Op1, ROpType Op2, ROpType Op3, ROpType Op4, typename T>
|
| 461 |
+
DS_D_INLINE void init(T* data)
|
| 462 |
+
{
|
| 463 |
+
data[0] = init<Op1, T>();
|
| 464 |
+
data[1] = init<Op2, T>();
|
| 465 |
+
data[2] = init<Op3, T>();
|
| 466 |
+
data[3] = init<Op4, T>();
|
| 467 |
+
}
|
| 468 |
+
|
| 469 |
+
/*
|
| 470 |
+
Warp reduction primitives
|
| 471 |
+
|
| 472 |
+
`reduction_width` is an unsafe template parameter, that is that
|
| 473 |
+
when using `reduction_width` < hw_warp_size the warp is partitioned
|
| 474 |
+
into `hw_warp_size` / `reduction_width` groups of partial sums.
|
| 475 |
+
|
| 476 |
+
If someone can figure out how to use variadic templates in a reasonable way
|
| 477 |
+
here (fold is C++17 only and I don't think helps and recursion feels like
|
| 478 |
+
huge overkill that harms readability) that would be wonderful.
|
| 479 |
+
*/
|
| 480 |
+
|
| 481 |
+
template <typename T, ROpType Op, int reduce_width = hw_warp_size>
|
| 482 |
+
DS_D_INLINE void _warp(cg::thread_block_tile<hw_warp_size>& warp, T* data)
|
| 483 |
+
{
|
| 484 |
+
#pragma unroll
|
| 485 |
+
for (int i = 1; i < reduce_width; i *= 2) {
|
| 486 |
+
data[0] = element<Op>(data[0], warp.shfl_xor(data[0], i));
|
| 487 |
+
}
|
| 488 |
+
}
|
| 489 |
+
|
| 490 |
+
template <typename T, ROpType Op1, ROpType Op2, int reduce_width = hw_warp_size>
|
| 491 |
+
DS_D_INLINE void _warp(cg::thread_block_tile<hw_warp_size>& warp, T* data)
|
| 492 |
+
{
|
| 493 |
+
#pragma unroll
|
| 494 |
+
for (int i = 1; i < reduce_width; i *= 2) {
|
| 495 |
+
data[0] = element<Op1>(data[0], warp.shfl_xor(data[0], i));
|
| 496 |
+
data[1] = element<Op2>(data[1], warp.shfl_xor(data[1], i));
|
| 497 |
+
}
|
| 498 |
+
}
|
| 499 |
+
|
| 500 |
+
template <typename T, ROpType Op1, ROpType Op2, ROpType Op3, int reduce_width = hw_warp_size>
|
| 501 |
+
DS_D_INLINE void _warp(cg::thread_block_tile<hw_warp_size>& warp, T* data)
|
| 502 |
+
{
|
| 503 |
+
#pragma unroll
|
| 504 |
+
for (int i = 1; i < reduce_width; i *= 2) {
|
| 505 |
+
data[0] = element<Op1>(data[0], warp.shfl_xor(data[0], i));
|
| 506 |
+
data[1] = element<Op2>(data[1], warp.shfl_xor(data[1], i));
|
| 507 |
+
data[2] = element<Op3>(data[2], warp.shfl_xor(data[2], i));
|
| 508 |
+
}
|
| 509 |
+
}
|
| 510 |
+
|
| 511 |
+
template <typename T,
|
| 512 |
+
ROpType Op1,
|
| 513 |
+
ROpType Op2,
|
| 514 |
+
ROpType Op3,
|
| 515 |
+
ROpType Op4,
|
| 516 |
+
int reduce_width = hw_warp_size>
|
| 517 |
+
DS_D_INLINE void _warp(cg::thread_block_tile<hw_warp_size>& warp, T* data)
|
| 518 |
+
{
|
| 519 |
+
#pragma unroll
|
| 520 |
+
for (int i = 1; i < reduce_width; i *= 2) {
|
| 521 |
+
data[0] = element<Op1>(data[0], warp.shfl_xor(data[0], i));
|
| 522 |
+
data[1] = element<Op2>(data[1], warp.shfl_xor(data[1], i));
|
| 523 |
+
data[2] = element<Op3>(data[2], warp.shfl_xor(data[2], i));
|
| 524 |
+
data[3] = element<Op4>(data[3], warp.shfl_xor(data[3], i));
|
| 525 |
+
}
|
| 526 |
+
}
|
| 527 |
+
|
| 528 |
+
/*
|
| 529 |
+
Implementation for primary block reduction that serves both `block` and
|
| 530 |
+
`partitioned_block`.
|
| 531 |
+
|
| 532 |
+
Total warps refers to the reduction width of the reduction, not
|
| 533 |
+
the number of warps in the block (which may exceed that
|
| 534 |
+
if the block is partitioned or if we do a conservative bound at
|
| 535 |
+
compile time).
|
| 536 |
+
*/
|
| 537 |
+
template <typename T, int total_warps, ROpType... Ops>
|
| 538 |
+
DS_D_INLINE void _block(cg::thread_block& tb,
|
| 539 |
+
cg::thread_block_tile<hw_warp_size>& warp_arg,
|
| 540 |
+
T* data)
|
| 541 |
+
{
|
| 542 |
+
constexpr int elems = sizeof...(Ops);
|
| 543 |
+
constexpr int bytes = sizeof(T);
|
| 544 |
+
// Unused when `partition_size == 1` or total_warps == 1
|
| 545 |
+
__shared__ T reduce_buffer[max_warps * elems];
|
| 546 |
+
|
| 547 |
+
#ifdef __HIP_PLATFORM_AMD__
|
| 548 |
+
const int total_threads = blockDim.x * blockDim.y * blockDim.z;
|
| 549 |
+
const int running_warps = total_threads / hw_warp_size;
|
| 550 |
+
#else
|
| 551 |
+
const int running_warps = warp_arg.meta_group_size();
|
| 552 |
+
#endif
|
| 553 |
+
|
| 554 |
+
// Always perform warp-scope reduction
|
| 555 |
+
_warp<T, Ops...>(warp_arg, data);
|
| 556 |
+
|
| 557 |
+
// If max_warps == 1 let's skip the runtime check
|
| 558 |
+
if (total_warps != 1) {
|
| 559 |
+
if (warp_arg.thread_rank() == 0) {
|
| 560 |
+
#pragma unroll
|
| 561 |
+
for (int i = 0; i < elems; i++) {
|
| 562 |
+
mem_access::store_shared<bytes>(reduce_buffer + elems * _warp_rank() + i, data + i);
|
| 563 |
+
}
|
| 564 |
+
}
|
| 565 |
+
|
| 566 |
+
// Synchronization inside block-uniform conditional is safe
|
| 567 |
+
tb.sync();
|
| 568 |
+
|
| 569 |
+
if (_warp_rank() == 0) {
|
| 570 |
+
if (warp_arg.thread_rank() < running_warps) {
|
| 571 |
+
#pragma unroll
|
| 572 |
+
for (int i = 0; i < elems; i++) {
|
| 573 |
+
mem_access::load_shared<bytes>(
|
| 574 |
+
data + i, reduce_buffer + elems * warp_arg.thread_rank() + i);
|
| 575 |
+
}
|
| 576 |
+
} else {
|
| 577 |
+
init<Ops...>(data);
|
| 578 |
+
}
|
| 579 |
+
|
| 580 |
+
_warp<T, Ops..., total_warps>(warp_arg, data);
|
| 581 |
+
|
| 582 |
+
#pragma unroll
|
| 583 |
+
for (int i = 0; i < elems; i++) {
|
| 584 |
+
mem_access::store_shared<bytes>(reduce_buffer + elems * warp_arg.thread_rank() + i,
|
| 585 |
+
data + i);
|
| 586 |
+
}
|
| 587 |
+
}
|
| 588 |
+
|
| 589 |
+
// Synchronization inside block-uniform conditional is safe
|
| 590 |
+
tb.sync();
|
| 591 |
+
|
| 592 |
+
#pragma unroll
|
| 593 |
+
for (int i = 0; i < elems; i++) {
|
| 594 |
+
mem_access::load_shared<bytes>(data + i, reduce_buffer + _warp_rank() * elems + i);
|
| 595 |
+
}
|
| 596 |
+
}
|
| 597 |
+
}
|
| 598 |
+
|
| 599 |
+
/*
|
| 600 |
+
Main API implementations. For the most part, they just convert the individual
|
| 601 |
+
variables into arrays, which makes working with them easier with a single
|
| 602 |
+
implementation. In theory, we could use the `_block` implementation as another
|
| 603 |
+
option, but the nature of using a pointer is a little less safe and this allows
|
| 604 |
+
us to obfuscate the details of the partitioned implementation.
|
| 605 |
+
*/
|
| 606 |
+
template <ROpType Op, int warp_bound>
|
| 607 |
+
DS_D_INLINE void block(cg::thread_block& tb, cg::thread_block_tile<hw_warp_size>& warp, float& val)
|
| 608 |
+
{
|
| 609 |
+
_block<float, warp_bound, Op>(tb, warp, &val);
|
| 610 |
+
}
|
| 611 |
+
|
| 612 |
+
template <ROpType Op1, ROpType Op2, int warp_bound>
|
| 613 |
+
DS_D_INLINE void block(cg::thread_block& tb,
|
| 614 |
+
cg::thread_block_tile<hw_warp_size>& warp,
|
| 615 |
+
float& val1,
|
| 616 |
+
float& val2)
|
| 617 |
+
{
|
| 618 |
+
float data[2] = {val1, val2};
|
| 619 |
+
_block<float, warp_bound, Op1, Op2>(tb, warp, data);
|
| 620 |
+
val1 = data[0];
|
| 621 |
+
val2 = data[1];
|
| 622 |
+
}
|
| 623 |
+
|
| 624 |
+
template <ROpType Op1, ROpType Op2, ROpType Op3, int warp_bound>
|
| 625 |
+
DS_D_INLINE void block(cg::thread_block& tb,
|
| 626 |
+
cg::thread_block_tile<hw_warp_size>& warp,
|
| 627 |
+
float& val1,
|
| 628 |
+
float& val2,
|
| 629 |
+
float& val3)
|
| 630 |
+
{
|
| 631 |
+
float data[3] = {val1, val2, val3};
|
| 632 |
+
_block<float, warp_bound, Op1, Op2, Op3>(tb, warp, data);
|
| 633 |
+
val1 = data[0];
|
| 634 |
+
val2 = data[1];
|
| 635 |
+
val3 = data[2];
|
| 636 |
+
}
|
| 637 |
+
|
| 638 |
+
template <ROpType Op1, ROpType Op2, ROpType Op3, ROpType Op4, int warp_bound>
|
| 639 |
+
DS_D_INLINE void block(cg::thread_block& tb,
|
| 640 |
+
cg::thread_block_tile<hw_warp_size>& warp,
|
| 641 |
+
float& val1,
|
| 642 |
+
float& val2,
|
| 643 |
+
float& val3,
|
| 644 |
+
float& val4)
|
| 645 |
+
{
|
| 646 |
+
float data[4] = {val1, val2, val3, val4};
|
| 647 |
+
_block<float, warp_bound, Op1, Op2, Op3, Op4>(tb, warp, data);
|
| 648 |
+
val1 = data[0];
|
| 649 |
+
val2 = data[1];
|
| 650 |
+
val3 = data[2];
|
| 651 |
+
val4 = data[3];
|
| 652 |
+
}
|
| 653 |
+
|
| 654 |
+
/*
|
| 655 |
+
Note: for the partitioned blocks, the implementation does not support non-power of 2 blocks in order
|
| 656 |
+
to shorten block scale reduction length.
|
| 657 |
+
*/
|
| 658 |
+
template <ROpType Op, int num_threads>
|
| 659 |
+
DS_D_INLINE void partitioned_block(cg::thread_block& tb,
|
| 660 |
+
cg::thread_block_tile<hw_warp_size>& warp,
|
| 661 |
+
float& val)
|
| 662 |
+
{
|
| 663 |
+
if (num_threads <= hw_warp_size) {
|
| 664 |
+
_warp<float, Op, num_threads>(warp, &val);
|
| 665 |
+
} else {
|
| 666 |
+
constexpr int num_warps = num_threads / hw_warp_size;
|
| 667 |
+
_block<float, num_warps, Op>(tb, warp, &val);
|
| 668 |
+
}
|
| 669 |
+
}
|
| 670 |
+
|
| 671 |
+
template <ROpType Op1, ROpType Op2, int num_threads>
|
| 672 |
+
DS_D_INLINE void partitioned_block(cg::thread_block& tb,
|
| 673 |
+
cg::thread_block_tile<hw_warp_size>& warp,
|
| 674 |
+
float& val1,
|
| 675 |
+
float& val2)
|
| 676 |
+
{
|
| 677 |
+
float data[2] = {val1, val2};
|
| 678 |
+
|
| 679 |
+
if (num_threads <= hw_warp_size) {
|
| 680 |
+
_warp<float, Op1, Op2, num_threads>(warp, data);
|
| 681 |
+
} else {
|
| 682 |
+
constexpr int num_warps = num_threads / hw_warp_size;
|
| 683 |
+
_block<float, num_warps, Op1, Op2>(tb, warp, data);
|
| 684 |
+
}
|
| 685 |
+
|
| 686 |
+
val1 = data[0];
|
| 687 |
+
val2 = data[1];
|
| 688 |
+
}
|
| 689 |
+
|
| 690 |
+
template <ROpType Op1, ROpType Op2, ROpType Op3, int num_threads>
|
| 691 |
+
DS_D_INLINE void partitioned_block(cg::thread_block& tb,
|
| 692 |
+
cg::thread_block_tile<hw_warp_size>& warp,
|
| 693 |
+
float& val1,
|
| 694 |
+
float& val2,
|
| 695 |
+
float& val3)
|
| 696 |
+
{
|
| 697 |
+
float data[3] = {val1, val2, val3};
|
| 698 |
+
|
| 699 |
+
if (num_threads <= hw_warp_size) {
|
| 700 |
+
_warp<float, Op1, Op2, Op3, num_threads>(warp, data);
|
| 701 |
+
} else {
|
| 702 |
+
constexpr int num_warps = num_threads / hw_warp_size;
|
| 703 |
+
_block<float, num_warps, Op1, Op2, Op3>(tb, warp, data);
|
| 704 |
+
}
|
| 705 |
+
|
| 706 |
+
val1 = data[0];
|
| 707 |
+
val2 = data[1];
|
| 708 |
+
val3 = data[2];
|
| 709 |
+
}
|
| 710 |
+
|
| 711 |
+
template <ROpType Op1, ROpType Op2, ROpType Op3, ROpType Op4, int num_threads>
|
| 712 |
+
DS_D_INLINE void partitioned_block(cg::thread_block& tb,
|
| 713 |
+
cg::thread_block_tile<hw_warp_size>& warp,
|
| 714 |
+
float& val1,
|
| 715 |
+
float& val2,
|
| 716 |
+
float& val3,
|
| 717 |
+
float& val4)
|
| 718 |
+
{
|
| 719 |
+
float data[4] = {val1, val2, val3, val4};
|
| 720 |
+
|
| 721 |
+
if (num_threads <= hw_warp_size) {
|
| 722 |
+
_warp<float, Op1, Op2, Op3, Op4, num_threads>(warp, data);
|
| 723 |
+
} else {
|
| 724 |
+
constexpr int num_warps = num_threads / hw_warp_size;
|
| 725 |
+
_block<float, num_warps, Op1, Op2, Op3, Op4>(tb, warp, data);
|
| 726 |
+
}
|
| 727 |
+
|
| 728 |
+
val1 = data[0];
|
| 729 |
+
val2 = data[1];
|
| 730 |
+
val3 = data[2];
|
| 731 |
+
val4 = data[3];
|
| 732 |
+
}
|
| 733 |
+
|
| 734 |
+
/*
|
| 735 |
+
Arg-reduce is a specialization of the above. We only support this with a single reduction
|
| 736 |
+
parameter. This only works for max/min reductions.
|
| 737 |
+
*/
|
| 738 |
+
|
| 739 |
+
__align__(8) struct IdxReduceResult {
|
| 740 |
+
/*
|
| 741 |
+
NOTE: ORDERING MATTERS HERE! The idx is the least significant set of bits
|
| 742 |
+
and the val is the most significant. Changing the order of this declaration
|
| 743 |
+
will break the code.
|
| 744 |
+
*/
|
| 745 |
+
int idx;
|
| 746 |
+
float val;
|
| 747 |
+
};
|
| 748 |
+
|
| 749 |
+
template <ROpType Op, int warpBound>
|
| 750 |
+
DS_D_INLINE IdxReduceResult
|
| 751 |
+
idx_reduce(cg::thread_block& tb, cg::thread_block_tile<hw_warp_size>& warp, float val, int idx)
|
| 752 |
+
{
|
| 753 |
+
IdxReduceResult res = {idx, val};
|
| 754 |
+
|
| 755 |
+
// Clear out the nan. This shouldn't be an issue for our initial applications
|
| 756 |
+
if (isnan(val)) res.val = init<Op>();
|
| 757 |
+
|
| 758 |
+
// Can do float compares as integers. By packing the index into the lower bits
|
| 759 |
+
// we can just do a single int64 rather than a branch, compare, and select.
|
| 760 |
+
// One side benefit of this is that it is by nature a stable algorithm and
|
| 761 |
+
// will always bias ties to the higher index.
|
| 762 |
+
int64_t* res_as_int = reinterpret_cast<int64_t*>(&res);
|
| 763 |
+
|
| 764 |
+
// The way floating point compare works is normally to perform a sign comparison
|
| 765 |
+
// and if they match, then do a comparison of the rest of the bits as unsigned
|
| 766 |
+
// integers. Since we are bundling these, that means for negative values we need
|
| 767 |
+
// to reverse the sort order, which we can do with an XOR.
|
| 768 |
+
if (val < 0) { *res_as_int ^= 0x7fffffff00000000; }
|
| 769 |
+
|
| 770 |
+
_block<int64_t, warpBound, Op>(tb, warp, res_as_int);
|
| 771 |
+
|
| 772 |
+
// Sign bit is preserved, so we can check if we need to invert the mantissa back
|
| 773 |
+
if (res.val < 0) { *res_as_int ^= 0x7fffffff00000000; }
|
| 774 |
+
|
| 775 |
+
return res;
|
| 776 |
+
}
|
| 777 |
+
|
| 778 |
+
} // namespace reduce
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
from .atom_builder import *
|
| 7 |
+
from .blocked_flash import *
|
| 8 |
+
from .embed import *
|
| 9 |
+
from .linear_blocked_kv_rotary import *
|
| 10 |
+
from .logits_gather import *
|
| 11 |
+
from .moe_gather import *
|
| 12 |
+
from .moe_scatter import *
|
| 13 |
+
from .top_k_gating import *
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/atom_builder/__init__.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
from .atom_builder import *
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/atom_builder/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (235 Bytes). View file
|
|
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/atom_builder/__pycache__/atom_builder.cpython-310.pyc
ADDED
|
Binary file (2 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/atom_builder/atom_builder.cpp
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#include "atom_builder.h"
|
| 7 |
+
#include "attention_atom.h"
|
| 8 |
+
#include "ragged_dtypes.h"
|
| 9 |
+
|
| 10 |
+
int32_t build_atoms(torch::Tensor& atoms_ten,
|
| 11 |
+
torch::Tensor& batch_metadata,
|
| 12 |
+
torch::Tensor& seq_metadata,
|
| 13 |
+
torch::Tensor& kv_ptrs,
|
| 14 |
+
const int32_t q_block_size,
|
| 15 |
+
const int32_t kv_block_size)
|
| 16 |
+
{
|
| 17 |
+
const RaggedBatchDescriptor* batch_desc =
|
| 18 |
+
reinterpret_cast<const RaggedBatchDescriptor*>(batch_metadata.data_ptr());
|
| 19 |
+
|
| 20 |
+
const InflightSeqDescriptor* seq_desc =
|
| 21 |
+
reinterpret_cast<const InflightSeqDescriptor*>(seq_metadata.data_ptr());
|
| 22 |
+
|
| 23 |
+
int32_t** kv_ptr_list = reinterpret_cast<int32_t**>(kv_ptrs.data_ptr());
|
| 24 |
+
|
| 25 |
+
AttentionAtom* atoms = reinterpret_cast<AttentionAtom*>(atoms_ten.data_ptr());
|
| 26 |
+
|
| 27 |
+
int32_t n_atoms = 0;
|
| 28 |
+
for (int i = 0; i < batch_desc->n_sequences; i++) {
|
| 29 |
+
const int seq_atoms = (seq_desc[i].n_tokens + q_block_size - 1) / q_block_size;
|
| 30 |
+
int32_t cur_start_idx = seq_desc[i].start_idx;
|
| 31 |
+
int32_t global_start_idx = seq_desc[i].seen_tokens;
|
| 32 |
+
int32_t remaining_toks = seq_desc[i].n_tokens;
|
| 33 |
+
|
| 34 |
+
for (int j = 0; j < seq_atoms; j++) {
|
| 35 |
+
atoms[n_atoms].block_idx_list = kv_ptr_list[i];
|
| 36 |
+
atoms[n_atoms].q_start_idx = cur_start_idx;
|
| 37 |
+
atoms[n_atoms].q_len = std::min(remaining_toks, q_block_size);
|
| 38 |
+
atoms[n_atoms].global_q_idx = global_start_idx;
|
| 39 |
+
|
| 40 |
+
const int32_t end_toks = global_start_idx + atoms[n_atoms].q_len;
|
| 41 |
+
// TODO(cmikeh2): This logic needs to be changed for sparse implementations
|
| 42 |
+
atoms[n_atoms].kv_blocks = (end_toks + kv_block_size - 1) / kv_block_size;
|
| 43 |
+
atoms[n_atoms].total_extent = end_toks;
|
| 44 |
+
|
| 45 |
+
cur_start_idx += atoms[n_atoms].q_len;
|
| 46 |
+
global_start_idx += atoms[n_atoms].q_len;
|
| 47 |
+
remaining_toks -= atoms[n_atoms].q_len;
|
| 48 |
+
n_atoms++;
|
| 49 |
+
}
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
return n_atoms;
|
| 53 |
+
}
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/atom_builder/atom_builder.h
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#pragma once
|
| 7 |
+
|
| 8 |
+
#include <torch/extension.h>
|
| 9 |
+
|
| 10 |
+
/*
|
| 11 |
+
Construct the attention atoms given the ragged metadata for the current batch.
|
| 12 |
+
This could largely be done at the Python level, but since we pack the KV ptr
|
| 13 |
+
alongside the int32_t metadata, it gets very ugly to handle the mixed-width
|
| 14 |
+
data structures (since we're packing them in a single tensor).
|
| 15 |
+
*/
|
| 16 |
+
int32_t build_atoms(torch::Tensor& atoms_ten,
|
| 17 |
+
torch::Tensor& batch_metadata,
|
| 18 |
+
torch::Tensor& seq_metadata,
|
| 19 |
+
torch::Tensor& kv_ptrs,
|
| 20 |
+
const int32_t q_block_size,
|
| 21 |
+
const int32_t kv_block_size);
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/atom_builder/atom_builder.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
from typing import Tuple
|
| 7 |
+
|
| 8 |
+
import torch
|
| 9 |
+
|
| 10 |
+
from ... import DSKernelBase
|
| 11 |
+
from deepspeed.ops.op_builder import RaggedOpsBuilder
|
| 12 |
+
from ....ragged import RaggedBatchWrapper
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class AtomBuilder(DSKernelBase):
|
| 16 |
+
"""
|
| 17 |
+
C++ implementation to populate the attention atoms for the blocked attention
|
| 18 |
+
kernel.
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
def __init__(self) -> None:
|
| 22 |
+
"""
|
| 23 |
+
Triggers compilation of the C++ implementation.
|
| 24 |
+
"""
|
| 25 |
+
inf_module = RaggedOpsBuilder().load()
|
| 26 |
+
self.kernel = inf_module.build_atoms
|
| 27 |
+
|
| 28 |
+
def __call__(self, atoms: torch.Tensor, ragged_batch: RaggedBatchWrapper, q_block_size: int,
|
| 29 |
+
kv_block_size: int) -> Tuple[torch.Tensor, int]:
|
| 30 |
+
"""
|
| 31 |
+
Populates the attention atoms for the blocked attention kernel.
|
| 32 |
+
|
| 33 |
+
Args:
|
| 34 |
+
atoms (torch.Tensor): Pre-allocated int32 tensor of shape [max_atoms, 8]
|
| 35 |
+
ragged_batch (torch.Tensor): Wrapper for the ragged batch.
|
| 36 |
+
q_block_size (int): The block size for the queries (as determined by the
|
| 37 |
+
attention implementation)
|
| 38 |
+
kv_block_size (int): The block size for the keys/values (as determined by the
|
| 39 |
+
attention implementation)
|
| 40 |
+
|
| 41 |
+
Returns:
|
| 42 |
+
|
| 43 |
+
"""
|
| 44 |
+
if atoms.device != torch.device("cpu"):
|
| 45 |
+
raise RuntimeError("AtomBuilder must be called on tensors")
|
| 46 |
+
|
| 47 |
+
n_atoms = self.kernel(atoms, ragged_batch.batch_metadata_buffer(on_device=False),
|
| 48 |
+
ragged_batch.inflight_seq_descriptors(on_device=False),
|
| 49 |
+
ragged_batch.kv_ptrs(on_device=False), q_block_size, kv_block_size)
|
| 50 |
+
return atoms, n_atoms
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/__init__.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
from .logits_gather import *
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (237 Bytes). View file
|
|
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/__pycache__/logits_gather.cpython-310.pyc
ADDED
|
Binary file (2.29 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/logits_gather.cuh
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#pragma once
|
| 7 |
+
|
| 8 |
+
#include "ds_kernel_utils.h"
|
| 9 |
+
#include "ragged_dtypes.h"
|
| 10 |
+
|
| 11 |
+
#ifdef BF16_AVAILABLE
|
| 12 |
+
#include <cuda_bf16.h>
|
| 13 |
+
#endif
|
| 14 |
+
|
| 15 |
+
template <typename T>
|
| 16 |
+
void launch_logits_gather(T* final_token_acts,
|
| 17 |
+
const T* all_acts,
|
| 18 |
+
const RaggedBatchDescriptor* batch_metadata,
|
| 19 |
+
const InflightSeqDescriptor* seq_metadata,
|
| 20 |
+
const int32_t n_seqs,
|
| 21 |
+
const int32_t embed_dim,
|
| 22 |
+
cudaStream_t stream);
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/logits_gather.h
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#pragma once
|
| 7 |
+
|
| 8 |
+
#include <c10/cuda/CUDAStream.h>
|
| 9 |
+
#include <torch/extension.h>
|
| 10 |
+
#include "logits_gather.cuh"
|
| 11 |
+
#include "ragged_dtypes.h"
|
| 12 |
+
|
| 13 |
+
/*
|
| 14 |
+
Logits gather will parse the ragged batch data structure and gather only the logits that
|
| 15 |
+
will be used for token sampling.
|
| 16 |
+
*/
|
| 17 |
+
void gather_for_logits(torch::Tensor& final_token_acts,
|
| 18 |
+
torch::Tensor& all_acts,
|
| 19 |
+
torch::Tensor& batch_metadata,
|
| 20 |
+
torch::Tensor& seq_metadata);
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/logits_gather.py
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
|
| 8 |
+
from ... import DSKernelBase
|
| 9 |
+
from deepspeed.ops.op_builder import RaggedOpsBuilder
|
| 10 |
+
from ....inference_utils import elem_size
|
| 11 |
+
from ....ragged import RaggedBatchWrapper
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class RaggedLogitsGather(DSKernelBase):
|
| 15 |
+
"""
|
| 16 |
+
CUDA Kernel implementation for gather the hidden states of the final token
|
| 17 |
+
of each sequence. This is used to reduce the cost of the performing the unembedding.
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
supported_dtypes = [torch.float16, torch.bfloat16, torch.float32]
|
| 21 |
+
|
| 22 |
+
def __init__(self, model_dim: int, fp_dtype: torch.dtype):
|
| 23 |
+
"""
|
| 24 |
+
Parameters:
|
| 25 |
+
fp_dtype (torch.dtype): Data type for the input/output. Supported values
|
| 26 |
+
are torch.float16, torch.bfloat16, and torch.float32.
|
| 27 |
+
"""
|
| 28 |
+
if fp_dtype not in RaggedLogitsGather.supported_dtypes:
|
| 29 |
+
raise ValueError("Unsupported data type: {}, supported_dtypes are {}".format(
|
| 30 |
+
fp_dtype, RaggedLogitsGather.supported_dtypes))
|
| 31 |
+
|
| 32 |
+
if elem_size(fp_dtype) * model_dim % 16 != 0:
|
| 33 |
+
raise ValueError("Embedding dimension must be aligned to 16 bytes, got {}".format(model_dim))
|
| 34 |
+
|
| 35 |
+
inf_module = RaggedOpsBuilder().load()
|
| 36 |
+
self.kernel = inf_module.gather_for_logits
|
| 37 |
+
|
| 38 |
+
def __call__(self, final_token_activations: torch.Tensor, all_activations: torch.Tensor,
|
| 39 |
+
ragged_wrapper: RaggedBatchWrapper) -> torch.Tensor:
|
| 40 |
+
"""
|
| 41 |
+
Gather the hidden states of the final token of each sequence from `all_activations` into
|
| 42 |
+
`final_token_activations`.
|
| 43 |
+
|
| 44 |
+
Args:
|
| 45 |
+
final_token_activations (torch.Tensor): Output tensor of shape [num_seqs, model_dim]
|
| 46 |
+
all_activations (torch.Tensor): Input tensor of shape [num_tokens, model_dim]
|
| 47 |
+
ragged_wrapper (RaggedBatchWrapper): Wrapper for the ragged batch.
|
| 48 |
+
"""
|
| 49 |
+
|
| 50 |
+
self.kernel(final_token_activations, all_activations, ragged_wrapper.batch_metadata_buffer(),
|
| 51 |
+
ragged_wrapper.inflight_seq_descriptors())
|
| 52 |
+
return final_token_activations
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/logits_gather_cuda.cu
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#include "ds_kernel_utils.h"
|
| 7 |
+
#include "logits_gather.cuh"
|
| 8 |
+
#include "memory_access_utils.h"
|
| 9 |
+
#include "ragged_dtypes.h"
|
| 10 |
+
|
| 11 |
+
namespace logits_gather {
|
| 12 |
+
|
| 13 |
+
constexpr int granularity = 16;
|
| 14 |
+
constexpr int threads = 512;
|
| 15 |
+
|
| 16 |
+
} // namespace logits_gather
|
| 17 |
+
|
| 18 |
+
template <typename T>
|
| 19 |
+
__global__ void logits_gather_kernel(T* final_token_acts,
|
| 20 |
+
const T* token_acts,
|
| 21 |
+
const RaggedBatchDescriptor* ragged_batch,
|
| 22 |
+
const InflightSeqDescriptor* inflight_batch,
|
| 23 |
+
const int32_t embed_dim)
|
| 24 |
+
{
|
| 25 |
+
constexpr int T_vector = logits_gather::granularity / sizeof(T);
|
| 26 |
+
|
| 27 |
+
const int32_t seq_id = blockIdx.y;
|
| 28 |
+
|
| 29 |
+
// It's possible we've padded the output Tensor (under CG conditions)
|
| 30 |
+
if (seq_id >= ragged_batch->n_sequences) return;
|
| 31 |
+
|
| 32 |
+
const InflightSeqDescriptor seq = inflight_batch[seq_id];
|
| 33 |
+
const int final_token_idx = seq.start_idx + seq.n_tokens - 1;
|
| 34 |
+
|
| 35 |
+
const int token_offset = final_token_idx * embed_dim;
|
| 36 |
+
const int thread_offset =
|
| 37 |
+
threadIdx.x * T_vector + blockIdx.x * logits_gather::threads * T_vector;
|
| 38 |
+
|
| 39 |
+
const int final_token_offset = seq_id * embed_dim;
|
| 40 |
+
|
| 41 |
+
T reg_buf[T_vector];
|
| 42 |
+
|
| 43 |
+
if (thread_offset < embed_dim) {
|
| 44 |
+
mem_access::load_global<logits_gather::granularity>(
|
| 45 |
+
reg_buf, token_acts + token_offset + thread_offset);
|
| 46 |
+
|
| 47 |
+
mem_access::store_global<logits_gather::granularity>(
|
| 48 |
+
final_token_acts + final_token_offset + thread_offset, reg_buf);
|
| 49 |
+
}
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
template <typename T>
|
| 53 |
+
void launch_logits_gather(T* final_token_acts,
|
| 54 |
+
const T* all_acts,
|
| 55 |
+
const RaggedBatchDescriptor* ragged_batch,
|
| 56 |
+
const InflightSeqDescriptor* inflight_batch,
|
| 57 |
+
const int32_t n_seqs,
|
| 58 |
+
const int32_t embed_dim,
|
| 59 |
+
cudaStream_t stream)
|
| 60 |
+
{
|
| 61 |
+
constexpr int T_vector = logits_gather::granularity / sizeof(T);
|
| 62 |
+
constexpr int elems_per_block = logits_gather::threads * T_vector;
|
| 63 |
+
const int parallel_blocks = (embed_dim + elems_per_block - 1) / elems_per_block;
|
| 64 |
+
|
| 65 |
+
const dim3 grid(parallel_blocks, n_seqs, 1);
|
| 66 |
+
const dim3 block(logits_gather::threads, 1, 1);
|
| 67 |
+
|
| 68 |
+
logits_gather_kernel<T><<<grid, block, 0, stream>>>(
|
| 69 |
+
final_token_acts, all_acts, ragged_batch, inflight_batch, embed_dim);
|
| 70 |
+
}
|
| 71 |
+
|
| 72 |
+
#define INSTANTIATE_FOR_TYPE(T) \
|
| 73 |
+
template void launch_logits_gather<T>(T * final_token_acts, \
|
| 74 |
+
const T* all_acts, \
|
| 75 |
+
const RaggedBatchDescriptor* ragged_batch, \
|
| 76 |
+
const InflightSeqDescriptor* inflight_batch, \
|
| 77 |
+
const int32_t n_seqs, \
|
| 78 |
+
const int32_t embed_dim, \
|
| 79 |
+
cudaStream_t stream);
|
| 80 |
+
|
| 81 |
+
INSTANTIATE_FOR_TYPE(float)
|
| 82 |
+
INSTANTIATE_FOR_TYPE(__half)
|
| 83 |
+
|
| 84 |
+
#ifdef BF16_AVAILABLE
|
| 85 |
+
INSTANTIATE_FOR_TYPE(__nv_bfloat16)
|
| 86 |
+
#endif
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_gather/__init__.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
from .moe_gather import *
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_gather/moe_gather.cpp
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#include "moe_gather.h"
|
| 7 |
+
#include <c10/cuda/CUDAStream.h>
|
| 8 |
+
|
| 9 |
+
#define DISPATCH_MOE_GATHER(T_TYPE, C_TYPE) \
|
| 10 |
+
if (layer_output.options().dtype() == torch::T_TYPE) { \
|
| 11 |
+
launch_moe_gather((C_TYPE*)layer_output.data_ptr(), \
|
| 12 |
+
(const C_TYPE*)moe_output.data_ptr(), \
|
| 13 |
+
(const float*)scores.data_ptr(), \
|
| 14 |
+
(const int32_t*)mapped_slots.data_ptr(), \
|
| 15 |
+
(int32_t*)expert_count.data_ptr(), \
|
| 16 |
+
n_channels, \
|
| 17 |
+
n_experts, \
|
| 18 |
+
n_tokens, \
|
| 19 |
+
n_top_k, \
|
| 20 |
+
normalize_scales, \
|
| 21 |
+
at::cuda::getCurrentCUDAStream()); \
|
| 22 |
+
return; \
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
/*
|
| 26 |
+
Re-gather the outputs of MoE and scale them by the gating score.
|
| 27 |
+
*/
|
| 28 |
+
void moe_gather(torch::Tensor& layer_output,
|
| 29 |
+
const torch::Tensor& moe_output,
|
| 30 |
+
const torch::Tensor& scores,
|
| 31 |
+
const torch::Tensor& mapped_slots,
|
| 32 |
+
const torch::Tensor& expert_count,
|
| 33 |
+
const bool normalize_scales)
|
| 34 |
+
{
|
| 35 |
+
const int32_t n_channels = layer_output.size(1);
|
| 36 |
+
const int32_t n_experts = expert_count.size(0);
|
| 37 |
+
const int32_t n_tokens = layer_output.size(0);
|
| 38 |
+
const int32_t n_top_k = mapped_slots.size(1);
|
| 39 |
+
|
| 40 |
+
TORCH_CHECK(moe_output.size(0) == n_tokens * n_top_k);
|
| 41 |
+
TORCH_CHECK(moe_output.size(1) == n_channels);
|
| 42 |
+
TORCH_CHECK(scores.size(0) == n_tokens);
|
| 43 |
+
TORCH_CHECK(mapped_slots.size(0) == n_tokens);
|
| 44 |
+
|
| 45 |
+
TORCH_CHECK(scores.size(1) == n_top_k);
|
| 46 |
+
|
| 47 |
+
TORCH_CHECK(layer_output.scalar_type() == moe_output.scalar_type());
|
| 48 |
+
TORCH_CHECK(scores.scalar_type() == torch::kFloat32);
|
| 49 |
+
TORCH_CHECK(mapped_slots.scalar_type() == torch::kInt32);
|
| 50 |
+
TORCH_CHECK(expert_count.scalar_type() == torch::kInt32);
|
| 51 |
+
|
| 52 |
+
DISPATCH_MOE_GATHER(kHalf, __half);
|
| 53 |
+
|
| 54 |
+
#ifdef BF16_AVAILABLE
|
| 55 |
+
DISPATCH_MOE_GATHER(kBFloat16, __nv_bfloat16);
|
| 56 |
+
#endif
|
| 57 |
+
|
| 58 |
+
TORCH_CHECK(false, "Unsupported data type for MoE gather");
|
| 59 |
+
}
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_gather/moe_gather.h
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#pragma once
|
| 7 |
+
|
| 8 |
+
#include <c10/cuda/CUDAStream.h>
|
| 9 |
+
#include <torch/extension.h>
|
| 10 |
+
#include "moe_gather.cuh"
|
| 11 |
+
|
| 12 |
+
/*
|
| 13 |
+
Re-gather the outputs of MoE and scale them by the gating score.
|
| 14 |
+
*/
|
| 15 |
+
void moe_gather(torch::Tensor& layer_output,
|
| 16 |
+
const torch::Tensor& moe_output,
|
| 17 |
+
const torch::Tensor& scores,
|
| 18 |
+
const torch::Tensor& mapped_slots,
|
| 19 |
+
const torch::Tensor& expert_counts,
|
| 20 |
+
const bool normalize_scales);
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_gather/moe_gather_cuda.cu
ADDED
|
@@ -0,0 +1,169 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#include "conversion_utils.h"
|
| 7 |
+
#include "ds_kernel_utils.h"
|
| 8 |
+
#include "moe_gather.cuh"
|
| 9 |
+
#include "reduction_utils.h"
|
| 10 |
+
#include "top_k_gating.cuh"
|
| 11 |
+
#include "top_k_utils.h"
|
| 12 |
+
|
| 13 |
+
namespace gather {
|
| 14 |
+
|
| 15 |
+
constexpr int access_granularity = 16;
|
| 16 |
+
constexpr int threads = 256;
|
| 17 |
+
|
| 18 |
+
} // namespace gather
|
| 19 |
+
|
| 20 |
+
template <typename T, int copyUnroll, int N_TOP_K>
|
| 21 |
+
__global__ void moe_gather_kernel(T* layer_output,
|
| 22 |
+
const T* moe_output,
|
| 23 |
+
const float* scores,
|
| 24 |
+
const int32_t* mapped_slots,
|
| 25 |
+
int32_t* expert_counts,
|
| 26 |
+
const int32_t n_channels,
|
| 27 |
+
const int32_t n_experts,
|
| 28 |
+
const bool normalize_scales)
|
| 29 |
+
{
|
| 30 |
+
constexpr int32_t vector_size = gather::access_granularity / sizeof(T);
|
| 31 |
+
constexpr int32_t stride = vector_size * gather::threads;
|
| 32 |
+
|
| 33 |
+
const int32_t token_idx = blockIdx.x;
|
| 34 |
+
int32_t token_mapped_slots[N_TOP_K];
|
| 35 |
+
|
| 36 |
+
bool all_slots_invalid = true;
|
| 37 |
+
for (int i = 0; i < N_TOP_K; i++) {
|
| 38 |
+
token_mapped_slots[i] = mapped_slots[token_idx * N_TOP_K + i];
|
| 39 |
+
all_slots_invalid &= (token_mapped_slots[i] == gating::unassigned);
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
if (token_idx == 0) {
|
| 43 |
+
// Reset expert counts for its next use.
|
| 44 |
+
if (threadIdx.x < n_experts) { expert_counts[threadIdx.x] = 0; }
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
if (all_slots_invalid) {
|
| 48 |
+
// This token was not assigned to anything.
|
| 49 |
+
// TODO(cmikeh2): It's possible we want different behavior here moving forward.
|
| 50 |
+
return;
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
float token_scores[N_TOP_K];
|
| 54 |
+
for (int i = 0; i < N_TOP_K; i++) { token_scores[i] = scores[token_idx * N_TOP_K + i]; }
|
| 55 |
+
|
| 56 |
+
if (normalize_scales) {
|
| 57 |
+
// Normalize the scores so that they sum to 1.
|
| 58 |
+
float sum = 0.0f;
|
| 59 |
+
for (int i = 0; i < N_TOP_K; i++) { sum += token_scores[i]; }
|
| 60 |
+
|
| 61 |
+
if (sum > 0.0f) {
|
| 62 |
+
for (int i = 0; i < N_TOP_K; i++) { token_scores[i] /= sum; }
|
| 63 |
+
}
|
| 64 |
+
}
|
| 65 |
+
|
| 66 |
+
const int32_t channel_offset = threadIdx.x * vector_size;
|
| 67 |
+
|
| 68 |
+
const T* moe_output_bases[N_TOP_K];
|
| 69 |
+
#pragma unroll
|
| 70 |
+
for (int i = 0; i < N_TOP_K; i++) {
|
| 71 |
+
moe_output_bases[i] = moe_output + token_mapped_slots[i] * n_channels + channel_offset;
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
T* layer_output_base = layer_output + token_idx * n_channels + channel_offset;
|
| 75 |
+
|
| 76 |
+
#pragma unroll
|
| 77 |
+
for (int i = 0; i < copyUnroll; i++) {
|
| 78 |
+
if (i * stride + channel_offset < n_channels) {
|
| 79 |
+
float accum_buffer[vector_size];
|
| 80 |
+
for (int j = 0; j < vector_size; j++) {
|
| 81 |
+
accum_buffer[j] = reduce::init<reduce::ROpType::Add>();
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
#pragma unroll
|
| 85 |
+
for (int j = 0; j < N_TOP_K; j++) {
|
| 86 |
+
T reg_buffer[vector_size];
|
| 87 |
+
mem_access::load_global<gather::access_granularity>(
|
| 88 |
+
reg_buffer, moe_output_bases[j] + i * stride);
|
| 89 |
+
|
| 90 |
+
#pragma unroll
|
| 91 |
+
for (int k = 0; k < vector_size; k++) {
|
| 92 |
+
float up_cast = conversion::to<float>(reg_buffer[k]);
|
| 93 |
+
accum_buffer[k] += up_cast * token_scores[j];
|
| 94 |
+
}
|
| 95 |
+
}
|
| 96 |
+
|
| 97 |
+
T store_buffer[vector_size];
|
| 98 |
+
#pragma unroll
|
| 99 |
+
for (int j = 0; j < vector_size; j++) {
|
| 100 |
+
store_buffer[j] = conversion::to<T>(accum_buffer[j]);
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
mem_access::store_global<gather::access_granularity>(layer_output_base + i * stride,
|
| 104 |
+
store_buffer);
|
| 105 |
+
}
|
| 106 |
+
}
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
#define LAUNCH_FOR_UNROLL(COUNT) \
|
| 110 |
+
case COUNT: \
|
| 111 |
+
moe_gather_kernel<T, COUNT, CONST_TOP_K><<<grid, block, 0, stream>>>(layer_output, \
|
| 112 |
+
moe_output, \
|
| 113 |
+
scores, \
|
| 114 |
+
mapped_slots, \
|
| 115 |
+
expert_counts, \
|
| 116 |
+
n_channels, \
|
| 117 |
+
n_experts, \
|
| 118 |
+
normalize_scales); \
|
| 119 |
+
break;
|
| 120 |
+
|
| 121 |
+
template <typename T>
|
| 122 |
+
void launch_moe_gather(T* layer_output,
|
| 123 |
+
const T* moe_output,
|
| 124 |
+
const float* scores,
|
| 125 |
+
const int32_t* mapped_slots,
|
| 126 |
+
int32_t* expert_counts,
|
| 127 |
+
const int32_t n_channels,
|
| 128 |
+
const int32_t n_experts,
|
| 129 |
+
const int32_t n_tokens,
|
| 130 |
+
const int32_t n_top_k,
|
| 131 |
+
const bool normalize_scales,
|
| 132 |
+
cudaStream_t stream)
|
| 133 |
+
{
|
| 134 |
+
constexpr int vals_per_unroll = gather::threads * gather::access_granularity / sizeof(T);
|
| 135 |
+
const int copy_unroll = (n_channels + vals_per_unroll - 1) / vals_per_unroll;
|
| 136 |
+
|
| 137 |
+
const dim3 block(gather::threads);
|
| 138 |
+
const dim3 grid(n_tokens);
|
| 139 |
+
|
| 140 |
+
TOP_K_SWITCH(n_top_k, [&] {
|
| 141 |
+
switch (copy_unroll) {
|
| 142 |
+
LAUNCH_FOR_UNROLL(1)
|
| 143 |
+
LAUNCH_FOR_UNROLL(2)
|
| 144 |
+
LAUNCH_FOR_UNROLL(3)
|
| 145 |
+
LAUNCH_FOR_UNROLL(4)
|
| 146 |
+
LAUNCH_FOR_UNROLL(5)
|
| 147 |
+
LAUNCH_FOR_UNROLL(6)
|
| 148 |
+
}
|
| 149 |
+
});
|
| 150 |
+
}
|
| 151 |
+
|
| 152 |
+
#define INSTANTIATE_GATHER_FOR_TYPE(TYPE) \
|
| 153 |
+
template void launch_moe_gather<TYPE>(TYPE * layer_output, \
|
| 154 |
+
const TYPE* moe_output, \
|
| 155 |
+
const float* scores, \
|
| 156 |
+
const int32_t* mapped_slots, \
|
| 157 |
+
int32_t* expert_counts, \
|
| 158 |
+
const int32_t n_channels, \
|
| 159 |
+
const int32_t n_experts, \
|
| 160 |
+
const int32_t n_tokens, \
|
| 161 |
+
const int32_t n_top_k, \
|
| 162 |
+
const bool normalize_scales, \
|
| 163 |
+
cudaStream_t stream);
|
| 164 |
+
|
| 165 |
+
INSTANTIATE_GATHER_FOR_TYPE(__half)
|
| 166 |
+
|
| 167 |
+
#ifdef BF16_AVAILABLE
|
| 168 |
+
INSTANTIATE_GATHER_FOR_TYPE(__nv_bfloat16)
|
| 169 |
+
#endif
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_scatter/__init__.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
from .moe_scatter import *
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_scatter/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (233 Bytes). View file
|
|
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_scatter/__pycache__/moe_scatter.cpython-310.pyc
ADDED
|
Binary file (2.66 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_scatter/moe_scatter.cpp
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#include "moe_scatter.h"
|
| 7 |
+
#include <c10/cuda/CUDAStream.h>
|
| 8 |
+
|
| 9 |
+
#define DISPATCH_MOE_SCATTER(T_TYPE, C_TYPE) \
|
| 10 |
+
if (activations.options().dtype() == torch::T_TYPE) { \
|
| 11 |
+
launch_moe_scatter((C_TYPE*)moe_input.data_ptr(), \
|
| 12 |
+
(int64_t*)expert_count_cumsums.data_ptr(), \
|
| 13 |
+
(int32_t*)mapped_slots.data_ptr(), \
|
| 14 |
+
(const C_TYPE*)activations.data_ptr(), \
|
| 15 |
+
(const int32_t*)expert_counts.data_ptr(), \
|
| 16 |
+
(const int32_t*)assignments.data_ptr(), \
|
| 17 |
+
(const int32_t*)offsets.data_ptr(), \
|
| 18 |
+
n_channels, \
|
| 19 |
+
n_tokens, \
|
| 20 |
+
n_experts, \
|
| 21 |
+
n_top_k, \
|
| 22 |
+
at::cuda::getCurrentCUDAStream()); \
|
| 23 |
+
return; \
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
/*
|
| 27 |
+
Performs a cumsum on the expert counts and copies the hidden states to the
|
| 28 |
+
appropriate spot to ensure that each experts inputs are contiguous.
|
| 29 |
+
*/
|
| 30 |
+
void moe_scatter(torch::Tensor& moe_input,
|
| 31 |
+
torch::Tensor& expert_count_cumsums,
|
| 32 |
+
torch::Tensor& mapped_slots,
|
| 33 |
+
torch::Tensor& activations,
|
| 34 |
+
torch::Tensor& expert_counts,
|
| 35 |
+
torch::Tensor& assignments,
|
| 36 |
+
torch::Tensor& offsets)
|
| 37 |
+
{
|
| 38 |
+
const int32_t n_tokens = activations.size(0);
|
| 39 |
+
const int32_t n_channels = activations.size(1);
|
| 40 |
+
const int32_t n_top_k = assignments.size(1);
|
| 41 |
+
|
| 42 |
+
// Should have a lot of matching buffer sizes here.
|
| 43 |
+
TORCH_CHECK(n_tokens == assignments.size(0));
|
| 44 |
+
TORCH_CHECK(n_tokens == offsets.size(0));
|
| 45 |
+
TORCH_CHECK(n_channels == moe_input.size(1));
|
| 46 |
+
|
| 47 |
+
TORCH_CHECK(n_top_k == offsets.size(1));
|
| 48 |
+
TORCH_CHECK(n_top_k * n_tokens == moe_input.size(0));
|
| 49 |
+
TORCH_CHECK(n_top_k == mapped_slots.size(1));
|
| 50 |
+
|
| 51 |
+
const int32_t n_experts = expert_count_cumsums.size(0);
|
| 52 |
+
|
| 53 |
+
TORCH_CHECK(moe_input.scalar_type() == activations.scalar_type());
|
| 54 |
+
TORCH_CHECK(expert_count_cumsums.scalar_type() == torch::kInt64);
|
| 55 |
+
TORCH_CHECK(mapped_slots.scalar_type() == torch::kInt32);
|
| 56 |
+
TORCH_CHECK(expert_counts.scalar_type() == torch::kInt32);
|
| 57 |
+
TORCH_CHECK(assignments.scalar_type() == torch::kInt32);
|
| 58 |
+
TORCH_CHECK(offsets.scalar_type() == torch::kInt32);
|
| 59 |
+
|
| 60 |
+
DISPATCH_MOE_SCATTER(kHalf, __half);
|
| 61 |
+
|
| 62 |
+
#ifdef BF16_AVAILABLE
|
| 63 |
+
DISPATCH_MOE_SCATTER(kBFloat16, __nv_bfloat16);
|
| 64 |
+
#endif
|
| 65 |
+
|
| 66 |
+
TORCH_CHECK(false, "Unsupported dtype for moe_scatter")
|
| 67 |
+
}
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_scatter/moe_scatter.cuh
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#pragma once
|
| 7 |
+
|
| 8 |
+
#include "ds_kernel_utils.h"
|
| 9 |
+
#include "ragged_dtypes.h"
|
| 10 |
+
|
| 11 |
+
template <typename T>
|
| 12 |
+
void launch_moe_scatter(T* moe_input,
|
| 13 |
+
int64_t* expert_count_cumsums,
|
| 14 |
+
int32_t* mapped_slots,
|
| 15 |
+
const T* activations,
|
| 16 |
+
const int32_t* expert_counts,
|
| 17 |
+
const int32_t* assignments,
|
| 18 |
+
const int32_t* offsets,
|
| 19 |
+
const int32_t n_channels,
|
| 20 |
+
const int32_t n_tokens,
|
| 21 |
+
const int32_t n_experts,
|
| 22 |
+
const int32_t n_top_k,
|
| 23 |
+
cudaStream_t stream);
|